diff --git a/CI/script/gtest_collect_output.py b/CI/script/gtest_collect_output.py
index 80028bb0485c00725eb8c5ff68b2f45a0dcbc37e..9f378da12535130985836f6820273067cccf94a6 100644
--- a/CI/script/gtest_collect_output.py
+++ b/CI/script/gtest_collect_output.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-#-*- coding: UTF-8 -*-
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
import os
import sys
@@ -15,62 +15,68 @@ G_ERRORS = 0
G_TIME = 0
G_NAME = "AllTests"
-if len(sys.argv) > 1 :
+if len(sys.argv) > 1:
G_OUTPUT_DIR = sys.argv[1]
-
-if len(sys.argv) > 2 :
+
+if len(sys.argv) > 2:
G_OUTPUT_FILE = sys.argv[2]
-if not os.path.isdir(G_OUTPUT_DIR) :
- print("ERROR:%s not exist." % (G_OUTPUT_DIR))
+if not os.path.isdir(G_OUTPUT_DIR):
+ print("ERROR:%s not exist." % (G_OUTPUT_DIR))
exit(1)
-#逐个读取gtest_output目录的xml文件,最终汇总成一个文件
-#需要考虑testsuites字段属性数字累加,其他字段直接拼接合并
+# 逐个读取gtest_output目录的xml文件,最终汇总成一个文件
+# 需要考虑testsuites字段属性数字累加,其他字段直接拼接合并
impl = xml.dom.getDOMImplementation()
-output_dom = impl.createDocument(None, None, None)
+output_dom = impl.createDocument(None, None, None)
output_root = output_dom.createElement("testsuites")
-for FILE_IDX in os.listdir(G_OUTPUT_DIR) :
+for FILE_IDX in os.listdir(G_OUTPUT_DIR):
print("analyse %s ......" % FILE_IDX)
dom = xml.dom.minidom.parse("%s/%s" % (G_OUTPUT_DIR, FILE_IDX))
root = dom.documentElement
- #L_TESTSUITES = root.getElementsByTagName("testsuites")[0]
-
- L_TEST = root.getAttribute('tests')
+ # L_TESTSUITES = root.getElementsByTagName("testsuites")[0]
+
+ L_TEST = root.getAttribute("tests")
G_TEST += int(L_TEST)
-
- L_FAILURES = root.getAttribute('failures')
+
+ L_FAILURES = root.getAttribute("failures")
G_FAILURES += int(L_FAILURES)
-
- L_DISABLED = root.getAttribute('disabled')
+
+ L_DISABLED = root.getAttribute("disabled")
G_DISABLED += int(L_DISABLED)
-
- L_ERRORS = root.getAttribute('errors')
+
+ L_ERRORS = root.getAttribute("errors")
G_ERRORS += int(L_ERRORS)
-
- L_TIME = root.getAttribute('time')
+
+ L_TIME = root.getAttribute("time")
G_TIME += float(L_TIME)
-
+
L_TESTSUITE = root.getElementsByTagName("testsuite")
- for TESTSUITE_IDX in L_TESTSUITE :
+ for TESTSUITE_IDX in L_TESTSUITE:
output_root.appendChild(TESTSUITE_IDX)
-
- print(" tests=%s, failures=%s, disabled=%s, errors=%s, time=%s" % (L_TEST, L_FAILURES, L_DISABLED, L_ERRORS, L_TIME))
- print("all tests=%d, failures=%d, disabled=%d, errors=%d, time=%0.3f" % (G_TEST, G_FAILURES, G_DISABLED, G_ERRORS, G_TIME))
+
+ print(
+ " tests=%s, failures=%s, disabled=%s, errors=%s, time=%s"
+ % (L_TEST, L_FAILURES, L_DISABLED, L_ERRORS, L_TIME)
+ )
+ print(
+ "all tests=%d, failures=%d, disabled=%d, errors=%d, time=%0.3f"
+ % (G_TEST, G_FAILURES, G_DISABLED, G_ERRORS, G_TIME)
+ )
print()
-#写入xml文件
+# 写入xml文件
output_dom.appendChild(output_root)
-output_root.setAttribute('tests', str(G_TEST))
-output_root.setAttribute('failures', str(G_FAILURES))
-output_root.setAttribute('disabled', str(G_DISABLED))
-output_root.setAttribute('errors', str(G_ERRORS))
-output_root.setAttribute('time', str(G_TIME))
-output_root.setAttribute('name', str(G_NAME))
+output_root.setAttribute("tests", str(G_TEST))
+output_root.setAttribute("failures", str(G_FAILURES))
+output_root.setAttribute("disabled", str(G_DISABLED))
+output_root.setAttribute("errors", str(G_ERRORS))
+output_root.setAttribute("time", str(G_TIME))
+output_root.setAttribute("name", str(G_NAME))
-output_file = open(G_OUTPUT_FILE, mode = 'w')
-output_dom.writexml(output_file, '', ' ', '\n', 'utf-8')
-output_file.close()
\ No newline at end of file
+output_file = open(G_OUTPUT_FILE, mode="w")
+output_dom.writexml(output_file, "", " ", "\n", "utf-8")
+output_file.close()
diff --git a/common/script/logs_handler/do_compress_and_archive.py b/common/script/logs_handler/do_compress_and_archive.py
index 0c4fb688f7ff9666be8cbb024fe89883227989f7..dba3a996352ae6526fd5173f1bf85ba7b43a3347 100644
--- a/common/script/logs_handler/do_compress_and_archive.py
+++ b/common/script/logs_handler/do_compress_and_archive.py
@@ -9,7 +9,7 @@ CUR_PATH, _ = os.path.split(os.path.abspath(__file__))
def file_reader(data_path):
- with open(data_path, 'r', encoding='utf-8') as file:
+ with open(data_path, "r", encoding="utf-8") as file:
info = file.read()
return json.loads(info)
@@ -25,7 +25,7 @@ def compress_bak_files(main_path, file_names):
# 切换工作目录,确保仅压缩日志文件本身
cwd = os.getcwd()
os.chdir(main_path)
- with tarfile.open(f'{ori_log_file}.tar.gz', 'w:gz') as tar:
+ with tarfile.open(f"{ori_log_file}.tar.gz", "w:gz") as tar:
tar.add(file_name)
os.chdir(cwd)
os.remove(ori_log_file)
@@ -33,10 +33,12 @@ def compress_bak_files(main_path, file_names):
def reg_handler(log_name_prefix, log_name_tail, log_name, match_string):
"""匹配出待压缩的文件"""
- match_condition = 'tar.gz' in match_string or \
- len(match_string) <= len(log_name) \
- or match_string.endswith("swp") or \
- match_string.endswith("swo")
+ match_condition = (
+ "tar.gz" in match_string
+ or len(match_string) <= len(log_name)
+ or match_string.endswith("swp")
+ or match_string.endswith("swo")
+ )
if match_condition:
return False
if log_name_prefix in match_string and log_name_tail in match_string:
@@ -48,7 +50,7 @@ def change_file_mod(bak_path, file_names, user_name):
"""改变日志定时清理进程创建的压缩文件权限"""
uid, gid = getpwnam(user_name)[2:4]
for file_name, _ in file_names:
- tar_file_name = f'{str(Path(bak_path, file_name))}.tar.gz'
+ tar_file_name = f"{str(Path(bak_path, file_name))}.tar.gz"
os.chmod(tar_file_name, 0o660)
os.chown(tar_file_name, uid=uid, gid=gid)
@@ -69,9 +71,14 @@ def delete_old_bak_logs(main_path, log_name, max_log_val):
return
# 统计main_path目录下所有以log_name的压缩文件
- tar_files = [(str(Path(main_path, file_item)), get_file_creation_time(str(Path(main_path, file_item))))
- for file_item in os.listdir(main_path)
- if file_item.startswith(log_name) and file_item.endswith('tar.gz')]
+ tar_files = [
+ (
+ str(Path(main_path, file_item)),
+ get_file_creation_time(str(Path(main_path, file_item))),
+ )
+ for file_item in os.listdir(main_path)
+ if file_item.startswith(log_name) and file_item.endswith("tar.gz")
+ ]
tar_files.sort(key=lambda x: x[1], reverse=False)
for tar_file, _ in tar_files:
@@ -88,18 +95,24 @@ def bak_logs_handler(log_content, log_name, ori_bak_files, max_log_vol, user_nam
def main(log_content, log_name, max_log_vol, user_name):
- log_name_prefix, log_name_tail = log_name.split('.')[0], log_name.split('.')[1]
+ log_name_prefix, log_name_tail = log_name.split(".")[0], log_name.split(".")[1]
ori_bak_files = []
for name in os.listdir(log_content):
if reg_handler(log_name_prefix, log_name_tail, log_name, name):
- ori_bak_files.append((str(Path(log_content, f'{name}')),
- get_file_creation_time(str(Path(log_content, f'{name}')))))
+ ori_bak_files.append(
+ (
+ str(Path(log_content, f"{name}")),
+ get_file_creation_time(str(Path(log_content, f"{name}"))),
+ )
+ )
if not ori_bak_files:
return
ori_bak_files.sort(key=lambda x: x[1], reverse=False)
- bak_logs_handler(log_content, log_name_prefix, ori_bak_files, max_log_vol, user_name)
+ bak_logs_handler(
+ log_content, log_name_prefix, ori_bak_files, max_log_vol, user_name
+ )
if __name__ == "__main__":
diff --git a/common/script/logs_handler/logs_handler.py b/common/script/logs_handler/logs_handler.py
index d3deec9ccda1ec300d0ab4fab148b75c04c480d0..ea43fada53b54b596aab3a10537eb06c63145aae 100644
--- a/common/script/logs_handler/logs_handler.py
+++ b/common/script/logs_handler/logs_handler.py
@@ -12,13 +12,13 @@ ENV_FILE = "/opt/cantian/action/env.sh"
def file_reader(data_path):
- with open(data_path, 'r', encoding='utf-8') as file:
+ with open(data_path, "r", encoding="utf-8") as file:
info = file.read()
return json.loads(info)
def get_param_value(param):
- with open(ENV_FILE, 'r', encoding='utf-8') as file:
+ with open(ENV_FILE, "r", encoding="utf-8") as file:
env_config = file.readlines()
if param == "deploy_user":
for line in env_config:
@@ -41,11 +41,11 @@ def close_child_process(proc):
os.killpg(proc.pid, signal.SIGTERM)
except ProcessLookupError as err:
_ = err
- return 'success'
+ return "success"
except Exception as err:
return str(err)
- return 'success'
+ return "success"
def shell_task(exec_cmd):
@@ -55,8 +55,14 @@ def shell_task(exec_cmd):
return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=os.setsid,
+ )
pobj.stdin.write(exec_cmd.encode())
pobj.stdin.write(os.linesep.encode())
try:
@@ -78,16 +84,21 @@ def shell_task(exec_cmd):
class LogsHandler:
def __init__(self):
- self.config_params = file_reader(str(Path(CUR_PATH, 'config.json')))
+ self.config_params = file_reader(str(Path(CUR_PATH, "config.json")))
self.deploy_user = get_param_value("deploy_user")
self.user_name = None
def execute(self):
for item in self.config_params:
- user = item.get('userandgroup') if \
- item.get('userandgroup') != "deploy_user" else self.deploy_user
- self.user_name = user.split(':')[0]
- log_file_dir, max_log_vol = item.get('log_file_dir'), int(item.get('max_log_vol'))
+ user = (
+ item.get("userandgroup")
+ if item.get("userandgroup") != "deploy_user"
+ else self.deploy_user
+ )
+ self.user_name = user.split(":")[0]
+ log_file_dir, max_log_vol = item.get("log_file_dir"), int(
+ item.get("max_log_vol")
+ )
if os.path.exists("/.dockerenv"):
max_log_vol //= 2
# 分离日志目录和日志名
@@ -97,18 +108,26 @@ class LogsHandler:
files_names = os.listdir(log_content)
log_name_pre = log_name.split(".")[0]
for file_name in files_names:
- conditions = log_name_pre in file_name and not file_name.endswith("tar.gz") \
- and not file_name.endswith("swp") and not file_name.endswith("swo") \
- and len(file_name) > len(log_name)
+ conditions = (
+ log_name_pre in file_name
+ and not file_name.endswith("tar.gz")
+ and not file_name.endswith("swp")
+ and not file_name.endswith("swo")
+ and len(file_name) > len(log_name)
+ )
if conditions:
# 判断当前是否有新产生的归档日志,有就退出循环进行打包操作
break
else:
continue
- exec_cmd = f"su - {self.user_name} -s /bin/bash -c 'python3 {CUR_PATH}/do_compress_and_archive.py " \
- f"{log_content} {log_name} {max_log_vol} {self.user_name}' "
+ exec_cmd = (
+ f"su - {self.user_name} -s /bin/bash -c 'python3 {CUR_PATH}/do_compress_and_archive.py "
+ f"{log_content} {log_name} {max_log_vol} {self.user_name}' "
+ )
return_code, stdout, stderr = shell_task(exec_cmd)
if return_code or stderr:
- LOG.error(f'failed to execute log cleanup of {log_content}, '
- f'return_code: {return_code}, stderr: {stderr}')
+ LOG.error(
+ f"failed to execute log cleanup of {log_content}, "
+ f"return_code: {return_code}, stderr: {stderr}"
+ )
diff --git a/common/script/logs_handler/logs_tool/log.py b/common/script/logs_handler/logs_tool/log.py
index c1d5ae7552824f68b5d02741d3ba72aab878e7b4..e5709ec04b8814c2f33ca2fc0437c2c56bd7f061 100644
--- a/common/script/logs_handler/logs_tool/log.py
+++ b/common/script/logs_handler/logs_tool/log.py
@@ -42,15 +42,19 @@ def setup(project_name):
log_path = _get_log_file_path(project_name)
if log_path:
file_log = handlers.RotatingFileHandler(
- log_path, maxBytes=log_config.get("log_file_max_size"),
- backupCount=log_config.get("log_file_backup_count"))
+ log_path,
+ maxBytes=log_config.get("log_file_max_size"),
+ backupCount=log_config.get("log_file_backup_count"),
+ )
log_root.addHandler(file_log)
for handler in log_root.handlers:
handler.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")))
+ datefmt=log_config.get("log_date_format"),
+ )
+ )
if log_config.get("debug"):
log_root.setLevel(logging.DEBUG)
diff --git a/common/script/logs_handler/logs_tool/log_config.py b/common/script/logs_handler/logs_tool/log_config.py
index 887d4d3e8bd3029ccfff319e101e6fed4667b75d..7b19f7dc781c0bd1a555276436c65223bddf8845 100644
--- a/common/script/logs_handler/logs_tool/log_config.py
+++ b/common/script/logs_handler/logs_tool/log_config.py
@@ -7,8 +7,8 @@ CONSOLE_CONF = {
"log_file_backup_count": 5,
"log_date_format": "%Y-%m-%d %H:%M:%S",
"logging_default_format_string": "%(asctime)s console %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
"logging_context_format_string": "%(asctime)s console %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s"
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
}
}
diff --git a/ct_om/service/cantian_exporter/config/config.py b/ct_om/service/cantian_exporter/config/config.py
index 75cc174fa6f054454b782ce01b31375eb8e0dbd5..ec75fc5e399fd80bf35ce7f9796f63bd2672e079 100644
--- a/ct_om/service/cantian_exporter/config/config.py
+++ b/ct_om/service/cantian_exporter/config/config.py
@@ -7,8 +7,8 @@ CONSOLE_CONF = {
"log_file_backup_count": 5,
"log_date_format": "%Y-%m-%d %H:%M:%S",
"logging_default_format_string": "%(asctime)s console %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
"logging_context_format_string": "%(asctime)s console %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s"
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
}
}
diff --git a/ct_om/service/cantian_exporter/exporter/execute.py b/ct_om/service/cantian_exporter/exporter/execute.py
index 39bd4d0cd74423b689ab36d1b1e490bddc28c49d..94257238f91027aa402993686a6c52f9bd285775 100644
--- a/ct_om/service/cantian_exporter/exporter/execute.py
+++ b/ct_om/service/cantian_exporter/exporter/execute.py
@@ -18,7 +18,10 @@ def main():
save_file = SaveFile()
while True:
- cms_nodes_info, dbstor_info = get_node_info.execute(), get_dbstor_info.get_dbstor_info()
+ cms_nodes_info, dbstor_info = (
+ get_node_info.execute(),
+ get_dbstor_info.get_dbstor_info(),
+ )
cms_nodes_info.update(dbstor_info)
try:
dr_status_info = get_dr_info.execute()
@@ -29,7 +32,11 @@ def main():
try:
save_file.create_files(cms_nodes_info)
except Exception as err:
- LOG.error("[result] Fail to record report data in json file, [err_msg] {}".format(str(err)))
+ LOG.error(
+ "[result] Fail to record report data in json file, [err_msg] {}".format(
+ str(err)
+ )
+ )
time.sleep(20)
diff --git a/ct_om/service/cantian_exporter/exporter/get_certificate_status.py b/ct_om/service/cantian_exporter/exporter/get_certificate_status.py
index 54bc6d8cbda9c91e063a219559174eea02e1515a..28f8a603138e832306d6e8d89ba43266537740e3 100644
--- a/ct_om/service/cantian_exporter/exporter/get_certificate_status.py
+++ b/ct_om/service/cantian_exporter/exporter/get_certificate_status.py
@@ -6,7 +6,7 @@ from cryptography.hazmat.backends import default_backend
def file_reader(file_path):
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
return file.read()
@@ -28,7 +28,11 @@ def get_certificate_status():
cert_status = "revoked"
not_before = cert.not_valid_before
not_after = cert.not_valid_after
- if not not_before.replace(tzinfo=timezone.utc) <= current_time <= not_after.replace(tzinfo=timezone.utc):
+ if (
+ not not_before.replace(tzinfo=timezone.utc)
+ <= current_time
+ <= not_after.replace(tzinfo=timezone.utc)
+ ):
cert_status = "expired"
return crl_status, cert_status
diff --git a/ct_om/service/cantian_exporter/exporter/get_info.py b/ct_om/service/cantian_exporter/exporter/get_info.py
index a66526204f99a06283a60271d165699605fdcc9f..c840dbb60a2b8a8af37992883616ac58b7daec0b 100644
--- a/ct_om/service/cantian_exporter/exporter/get_info.py
+++ b/ct_om/service/cantian_exporter/exporter/get_info.py
@@ -15,16 +15,17 @@ from datetime import datetime
from exporter.log import EXPORTER_LOG as LOG
from exporter.tool import SimpleSql
from exporter.tool import _exec_popen
-sys.path.append('/opt/cantian/action/dbstor')
+
+sys.path.append("/opt/cantian/action/dbstor")
from kmc_adapter import CApiWrapper
cur_abs_path, _ = os.path.split(os.path.abspath(__file__))
-OLD_CANTIAND_DATA_SAVE_PATH = Path(cur_abs_path, 'cantiand_report_data_saves.json')
-DEPLOY_PARAM_PATH = '/opt/cantian/config/deploy_param.json'
-INSTALL_CONFIG_PATH = '/opt/cantian/action/cantian/install_config.json'
-CANTIAND_INI_PATH = '/mnt/dbdata/local/cantian/tmp/data/cfg/cantiand.ini'
-CANTIAND_LOG_PATH = '/opt/cantian/log/cantian/run/cantiand.rlog'
-CTSQL_INI_PATH = '/mnt/dbdata/local/cantian/tmp/data/cfg/*sql.ini'
+OLD_CANTIAND_DATA_SAVE_PATH = Path(cur_abs_path, "cantiand_report_data_saves.json")
+DEPLOY_PARAM_PATH = "/opt/cantian/config/deploy_param.json"
+INSTALL_CONFIG_PATH = "/opt/cantian/action/cantian/install_config.json"
+CANTIAND_INI_PATH = "/mnt/dbdata/local/cantian/tmp/data/cfg/cantiand.ini"
+CANTIAND_LOG_PATH = "/opt/cantian/log/cantian/run/cantiand.rlog"
+CTSQL_INI_PATH = "/mnt/dbdata/local/cantian/tmp/data/cfg/*sql.ini"
PRIMARY_KEYSTORE = "/opt/cantian/common/config/primary_keystore_bak.ks"
STANDBY_KEYSTORE = "/opt/cantian/common/config/standby_keystore_bak.ks"
LOGICREP_START_TIME_PATH = "/opt/software/tools/logicrep/log/start_time"
@@ -34,12 +35,12 @@ CONVERT_DICT = {
"M": 1024 * 1024,
"G": 1024 * 1024 * 1024,
"T": 1000 * 1024 * 1024 * 1024,
- "P": 1000 * 1000 * 1024 * 1024 * 1024
+ "P": 1000 * 1000 * 1024 * 1024 * 1024,
}
def file_reader(file_path):
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
return file.read()
@@ -47,28 +48,56 @@ def file_writer(file_path, data):
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
if data:
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.write(json.dumps(data))
else:
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.truncate()
class GetNodesInfo:
def __init__(self):
- self.std_output = {'node_id': '', 'stat': '', 'work_stat': 0, 'cluster_name': '', 'cms_ip': '',
- 'cantian_vlan_ip': '', 'storage_vlan_ip': '', 'share_logic_ip': '',
- 'storage_share_fs': '', 'storage_archive_fs': '', 'storage_metadata_fs': '',
- 'data_buffer_size': '', 'log_buffer_size': '', 'log_buffer_count': '',
- 'cluster_stat': '', 'cms_port': '', 'cms_connected_domain': '', 'disk_iostat': '',
- 'mem_total': '', 'mem_free': '', 'mem_used': '', 'cpu_us': '', 'cpu_sy': '', 'cpu_id': '',
- 'sys_backup_sets': {}, 'checkpoint_pages': {}, 'checkpoint_period': {}, 'global_lock': {},
- 'local_lock': {}, 'local_txn': {}, 'global_txn': {}, "dv_lrpl_detail": {},
- 'pitr_warning': '', 'logicrep': ''
- }
+ self.std_output = {
+ "node_id": "",
+ "stat": "",
+ "work_stat": 0,
+ "cluster_name": "",
+ "cms_ip": "",
+ "cantian_vlan_ip": "",
+ "storage_vlan_ip": "",
+ "share_logic_ip": "",
+ "storage_share_fs": "",
+ "storage_archive_fs": "",
+ "storage_metadata_fs": "",
+ "data_buffer_size": "",
+ "log_buffer_size": "",
+ "log_buffer_count": "",
+ "cluster_stat": "",
+ "cms_port": "",
+ "cms_connected_domain": "",
+ "disk_iostat": "",
+ "mem_total": "",
+ "mem_free": "",
+ "mem_used": "",
+ "cpu_us": "",
+ "cpu_sy": "",
+ "cpu_id": "",
+ "sys_backup_sets": {},
+ "checkpoint_pages": {},
+ "checkpoint_period": {},
+ "global_lock": {},
+ "local_lock": {},
+ "local_txn": {},
+ "global_txn": {},
+ "dv_lrpl_detail": {},
+ "pitr_warning": "",
+ "logicrep": "",
+ }
self.sql = SimpleSql()
- self.kmc_decrypt = CApiWrapper(primary_keystore=PRIMARY_KEYSTORE, standby_keystore=STANDBY_KEYSTORE)
+ self.kmc_decrypt = CApiWrapper(
+ primary_keystore=PRIMARY_KEYSTORE, standby_keystore=STANDBY_KEYSTORE
+ )
self.kmc_decrypt.initialize()
self.deploy_param = None
self.mes_ssl_switch = False
@@ -78,15 +107,18 @@ class GetNodesInfo:
self.storage_archive_fs = None
self.dm_pwd = None
- self.sh_cmd = {'top -bn 1 -i': self.update_cpu_mem_info,
- 'source ~/.bashrc&&cms stat': self.update_cms_status_info,
- 'source ~/.bashrc&&cms node -list': self.update_cms_port_info,
- 'source ~/.bashrc&&cms node -connected': self.update_cms_node_connected,
- 'source ~/.bashrc&&cms diskiostat': self.update_cms_diskiostat
- }
+ self.sh_cmd = {
+ "top -bn 1 -i": self.update_cpu_mem_info,
+ "source ~/.bashrc&&cms stat": self.update_cms_status_info,
+ "source ~/.bashrc&&cms node -list": self.update_cms_port_info,
+ "source ~/.bashrc&&cms node -connected": self.update_cms_node_connected,
+ "source ~/.bashrc&&cms diskiostat": self.update_cms_diskiostat,
+ }
self.sql_file = os.path.join(cur_abs_path, "../config/get_ctsql_info.sql")
- self.logicrep_sql_file = os.path.join(cur_abs_path, "../config/get_logicrep_info.sql")
- self.reg_string = r'invalid argument'
+ self.logicrep_sql_file = os.path.join(
+ cur_abs_path, "../config/get_logicrep_info.sql"
+ )
+ self.reg_string = r"invalid argument"
@staticmethod
def ctsql_result_parse(result: str) -> zip:
@@ -133,8 +165,10 @@ class GetNodesInfo:
Return:
返回一个字典,键同dict_data,值为cantian进程当前生效的值
"""
- cmd = "ps -ef | grep -v grep | grep cantiand | grep -w '\-D " \
- "/mnt/dbdata/local/cantian/tmp/data' | awk '{print $2}'"
+ cmd = (
+ "ps -ef | grep -v grep | grep cantiand | grep -w '\-D "
+ "/mnt/dbdata/local/cantian/tmp/data' | awk '{print $2}'"
+ )
install_config = json.loads(file_reader(INSTALL_CONFIG_PATH))
is_single = install_config.get("M_RUNING_MODE")
if is_single == "cantiand_with_mysql_in_cluster":
@@ -145,16 +179,18 @@ class GetNodesInfo:
return {}
if not os.path.exists(OLD_CANTIAND_DATA_SAVE_PATH):
- record_data = {'report_data': dict_data, 'cantian_pid': pidof_cantiand}
+ record_data = {"report_data": dict_data, "cantian_pid": pidof_cantiand}
file_writer(OLD_CANTIAND_DATA_SAVE_PATH, record_data)
return dict_data
old_report_data = json.loads(file_reader(OLD_CANTIAND_DATA_SAVE_PATH))
- old_data, old_pidof_cantiand = old_report_data.get('report_data'), old_report_data.get('cantian_pid')
- init_record_data = {'report_data': old_data, 'cantian_pid': pidof_cantiand}
+ old_data, old_pidof_cantiand = old_report_data.get(
+ "report_data"
+ ), old_report_data.get("cantian_pid")
+ init_record_data = {"report_data": old_data, "cantian_pid": pidof_cantiand}
if old_pidof_cantiand != pidof_cantiand:
if old_data != dict_data:
- init_record_data['report_data'] = dict_data
+ init_record_data["report_data"] = dict_data
file_writer(OLD_CANTIAND_DATA_SAVE_PATH, init_record_data)
return dict_data
@@ -168,27 +204,31 @@ class GetNodesInfo:
res: 上层函数传递进来的字典类型数据,用于记录当前函数获取的上报指标
"""
# 找到最后一次出现ntp时间误差的行数
- exist_cmd = f"grep -onE '\[NTP_TIME_WARN\] .* us.*' {CANTIAND_LOG_PATH} " \
- "| grep -v ignored" \
- f"| tail -n 1 | awk -F: '{{print $1}}'"
- ignored_exist_cmd = f"grep -onE '\[NTP_TIME_WARN\] .+ ignored.' {CANTIAND_LOG_PATH}" \
- f" | tail -n 1 | awk -F: '{{print $1}}'"
+ exist_cmd = (
+ f"grep -onE '\[NTP_TIME_WARN\] .* us.*' {CANTIAND_LOG_PATH} "
+ "| grep -v ignored"
+ f"| tail -n 1 | awk -F: '{{print $1}}'"
+ )
+ ignored_exist_cmd = (
+ f"grep -onE '\[NTP_TIME_WARN\] .+ ignored.' {CANTIAND_LOG_PATH}"
+ f" | tail -n 1 | awk -F: '{{print $1}}'"
+ )
_, exist_res, _ = _exec_popen(exist_cmd)
# 不存在ntp时间误差
if not exist_res:
- res.update({'pitr_warning': 'False'})
+ res.update({"pitr_warning": "False"})
return
_, ignored_res, _ = _exec_popen(ignored_exist_cmd)
# 存在ntp时间误差
if not ignored_res:
- res.update({'pitr_warning': 'True'})
+ res.update({"pitr_warning": "True"})
return
ignored_res, exist_res = int(ignored_res), int(exist_res)
- pitr_flag = 'False' if ignored_res > exist_res else 'True'
- res.update({'pitr_warning': pitr_flag})
+ pitr_flag = "False" if ignored_res > exist_res else "True"
+ res.update({"pitr_warning": pitr_flag})
@staticmethod
def get_cms_lock_failed_info(res):
@@ -201,20 +241,23 @@ class GetNodesInfo:
"read failed"
"write failed"
"""
- check_cmd = "zgrep -E \"(cms_disk_lock timeout.|read failed|write failed)\" " \
- "/opt/cantian/log/cms/run/* | grep -oE \"[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9:]+\" | sort"
+ check_cmd = (
+ 'zgrep -E "(cms_disk_lock timeout.|read failed|write failed)" '
+ '/opt/cantian/log/cms/run/* | grep -oE "[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9:]+" | sort'
+ )
_, output, _ = _exec_popen(check_cmd)
# 存在加解锁失败问题
if output:
lock_failed_happen_time = output.split("\n")[-1]
- datetime_object = datetime.strptime(lock_failed_happen_time, '%Y-%m-%d %H:%M:%S')
+ datetime_object = datetime.strptime(
+ lock_failed_happen_time, "%Y-%m-%d %H:%M:%S"
+ )
happen_times = datetime_object.timestamp()
current_time = time.time()
if int(current_time) - int(happen_times) < 10:
- res.update({'cms_lock_status': 'abnormal'})
+ res.update({"cms_lock_status": "abnormal"})
return
- res.update({'cms_lock_status': 'normal'})
-
+ res.update({"cms_lock_status": "normal"})
@staticmethod
def close_child_process(proc):
@@ -230,7 +273,7 @@ class GetNodesInfo:
except Exception as err:
return str(err), ABNORMAL_STATE
- return 'success', NORMAL_STATE
+ return "success", NORMAL_STATE
@staticmethod
def get_logicrep_running_info(max_archive_size, sql_info):
@@ -240,18 +283,30 @@ class GetNodesInfo:
"""
process_speed = sql_info.get("logicrep_progress", {}).get("PROCESS_SPEED", "0")
process_speed = float(process_speed) * CONVERT_DICT.get("M")
- redo_gen_speed = sql_info.get("logicrep_progress", {}).get("REDO_GEN_SPEED", "0")
+ redo_gen_speed = sql_info.get("logicrep_progress", {}).get(
+ "REDO_GEN_SPEED", "0"
+ )
redo_gen_speed = float(redo_gen_speed) * CONVERT_DICT.get("M")
- speed_update_time = sql_info.get("logicrep_progress", {}).get("SPEED_UPDATE_TIME",
- "1970-01-01 00:00:00").split(".")[0]
- speed_update_time = int(datetime.strptime(speed_update_time, '%Y-%m-%d %H:%M:%S').timestamp())
+ speed_update_time = (
+ sql_info.get("logicrep_progress", {})
+ .get("SPEED_UPDATE_TIME", "1970-01-01 00:00:00")
+ .split(".")[0]
+ )
+ speed_update_time = int(
+ datetime.strptime(speed_update_time, "%Y-%m-%d %H:%M:%S").timestamp()
+ )
current_time = int(time.time())
# 如果入湖进程刷新时间(speed_update_time)与当前时间(current_time)间隔不大于30s,
# 当前入湖实际速度为日志刷盘速度(redo_gen_speed) - 工具处理速度(process_speed)
# 否则入湖实际速度为(redo_gen_speed)
- real_process_speed = redo_gen_speed - process_speed if \
- current_time - speed_update_time < 30 else redo_gen_speed
- arch_clean_upper_limit = sql_info.get("arch_clean_upper_limit", {}).get("RUNTIME_VALUE", 85)
+ real_process_speed = (
+ redo_gen_speed - process_speed
+ if current_time - speed_update_time < 30
+ else redo_gen_speed
+ )
+ arch_clean_upper_limit = sql_info.get("arch_clean_upper_limit", {}).get(
+ "RUNTIME_VALUE", 85
+ )
# 归档清理上限(arch_clean_upper_size)
arch_clean_upper_size = max_archive_size * int(arch_clean_upper_limit) / 100
return arch_clean_upper_size, real_process_speed
@@ -263,7 +318,7 @@ class GetNodesInfo:
logicrep_cmd = "ps -ef | grep ZLogCatcherMain | grep -v grep"
if os.path.exists(LOGICREP_START_TIME_PATH):
- with open(LOGICREP_START_TIME_PATH, 'r') as f:
+ with open(LOGICREP_START_TIME_PATH, "r") as f:
start_time = f.readline().strip()
if not start_time:
start_time = "null"
@@ -272,15 +327,15 @@ class GetNodesInfo:
_, process_info, _ = _exec_popen(logicrep_cmd)
if process_info:
- res.update({'logicrep': 'Online', 'logicrep_start_time': start_time})
+ res.update({"logicrep": "Online", "logicrep_start_time": start_time})
res.update(self.get_logicrep_info_from_sql(res))
return
logicrep_path = "/opt/software/tools/logicrep/"
if os.path.exists(logicrep_path):
- res.update({'logicrep': 'Offline', 'logicrep_start_time': start_time})
+ res.update({"logicrep": "Offline", "logicrep_start_time": start_time})
res.update(self.get_logicrep_info_from_sql(res))
return
- res.update({'logicrep': 'None'})
+ res.update({"logicrep": "None"})
def get_certificate_status(self, res):
"""
@@ -292,15 +347,9 @@ class GetNodesInfo:
output, err_state = self.shell_task(cmd)
if not err_state and output:
crl_status, crt_status = re.findall(r"'([^']*)'", output)
- res.update({
- "crl_status": crl_status,
- "crt_status": crt_status
- })
+ res.update({"crl_status": crl_status, "crt_status": crt_status})
else:
- res.update({
- "crl_status": None,
- "crt_status": None
- })
+ res.update({"crl_status": None, "crt_status": None})
def shell_task(self, exec_cmd):
"""公共方法,用于执行shell命令
@@ -309,37 +358,57 @@ class GetNodesInfo:
exec_cmd: 具体的某个shell命令
"""
try:
- proc = subprocess.Popen(exec_cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
+ proc = subprocess.Popen(
+ exec_cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid
+ )
except Exception as err:
- LOG.error("[shell task] node {} execute '{}' failed, err: {}".format(self.node_id, exec_cmd, str(err)))
+ LOG.error(
+ "[shell task] node {} execute '{}' failed, err: {}".format(
+ self.node_id, exec_cmd, str(err)
+ )
+ )
_, close_state = self.close_child_process(proc)
if close_state:
- LOG.error("[shell task] after node {} executes cmd '{}', "
- "it fails to kill the forked process ".format(self.node_id, exec_cmd))
+ LOG.error(
+ "[shell task] after node {} executes cmd '{}', "
+ "it fails to kill the forked process ".format(
+ self.node_id, exec_cmd
+ )
+ )
return str(err), ABNORMAL_STATE
try:
output, err_state = proc.communicate(timeout=TIME_OUT)
except Exception as err:
- LOG.error("[shell task] node {} execute cmd '{}' failed, err: {}".format(self.node_id, exec_cmd, str(err)))
+ LOG.error(
+ "[shell task] node {} execute cmd '{}' failed, err: {}".format(
+ self.node_id, exec_cmd, str(err)
+ )
+ )
return str(err), ABNORMAL_STATE
finally:
close_res, close_state = self.close_child_process(proc)
if close_state:
- LOG.error("[shell task] after node {} executes cmd '{}', "
- "it fails to kill the forked process ".format(self.node_id, exec_cmd))
+ LOG.error(
+ "[shell task] after node {} executes cmd '{}', "
+ "it fails to kill the forked process ".format(self.node_id, exec_cmd)
+ )
return close_res, close_state
if err_state or not output:
- LOG.error("[shell task] node {} execute cmd '{}' failed, output: {}, "
- "err_state: {}".format(self.node_id, exec_cmd, str(output), err_state))
+ LOG.error(
+ "[shell task] node {} execute cmd '{}' failed, output: {}, "
+ "err_state: {}".format(self.node_id, exec_cmd, str(output), err_state)
+ )
return output, ABNORMAL_STATE
- output = output.decode('utf-8')
+ output = output.decode("utf-8")
if re.findall(self.reg_string, output):
- LOG.error("the execution result of command '{}' matched the regular pattern '{}', "
- "and the execution failed".format(exec_cmd, self.reg_string))
+ LOG.error(
+ "the execution result of command '{}' matched the regular pattern '{}', "
+ "and the execution failed".format(exec_cmd, self.reg_string)
+ )
return output, ABNORMAL_STATE
return output, err_state
@@ -350,21 +419,37 @@ class GetNodesInfo:
Args:
res: 上层函数传递进来的字典类型数据,用于记录当前函数获取的上报指标
"""
- deploy_key_list = ['cluster_name', 'cantian_vlan_ip', 'storage_vlan_ip', 'cms_ip', 'share_logic_ip',
- 'storage_archive_fs', 'storage_share_fs', 'storage_metadata_fs']
- res.update({name: self.deploy_param.get(name, '') for name in deploy_key_list})
+ deploy_key_list = [
+ "cluster_name",
+ "cantian_vlan_ip",
+ "storage_vlan_ip",
+ "cms_ip",
+ "share_logic_ip",
+ "storage_archive_fs",
+ "storage_share_fs",
+ "storage_metadata_fs",
+ ]
+ res.update({name: self.deploy_param.get(name, "") for name in deploy_key_list})
- cantiand_key_list = ['data_buffer_size', 'log_buffer_size', 'log_buffer_count']
+ cantiand_key_list = ["data_buffer_size", "log_buffer_size", "log_buffer_count"]
try:
cantiand_data = file_reader(CANTIAND_INI_PATH)
except Exception as err:
- LOG.error("[file read task] node {} read '{}' from {} failed, "
- "err_details: {}".format(self.node_id, cantiand_key_list, CANTIAND_INI_PATH, str(err)))
+ LOG.error(
+ "[file read task] node {} read '{}' from {} failed, "
+ "err_details: {}".format(
+ self.node_id, cantiand_key_list, CANTIAND_INI_PATH, str(err)
+ )
+ )
else:
- processed_data = [data for data in cantiand_data.split('\n') if data]
- reg_string = r'DATA_BUFFER_SIZE|LOG_BUFFER_SIZE|LOG_BUFFER_COUNT'
- report_data = [item for item in processed_data if re.findall(reg_string, item)]
- cantian_report_data = {item.split(' ')[0].lower(): item.split(' ')[-1] for item in report_data}
+ processed_data = [data for data in cantiand_data.split("\n") if data]
+ reg_string = r"DATA_BUFFER_SIZE|LOG_BUFFER_SIZE|LOG_BUFFER_COUNT"
+ report_data = [
+ item for item in processed_data if re.findall(reg_string, item)
+ ]
+ cantian_report_data = {
+ item.split(" ")[0].lower(): item.split(" ")[-1] for item in report_data
+ }
res.update(self.cantiand_report_handler(cantian_report_data))
def get_info_from_sql(self, res):
@@ -374,7 +459,7 @@ class GetNodesInfo:
res: 上层函数传递进来的字典类型数据,用于记录当前函数获取的上报指标
"""
# 参天进程异常,性能上报不采集ctsql数据库中的指标,防止和参天进程竞争ctsql
- if res.get('stat') != 'ONLINE' or str(res.get('work_stat')) != '1':
+ if res.get("stat") != "ONLINE" or str(res.get("work_stat")) != "1":
return
res.update(self.sql_info_query())
@@ -395,21 +480,27 @@ class GetNodesInfo:
def modify_logicrep_sql_file(self):
logicrep_sql = file_reader(self.logicrep_sql_file)
- logicrep_sql = logicrep_sql.replace('LOGICREP0', 'LOGICREP1')
+ logicrep_sql = logicrep_sql.replace("LOGICREP0", "LOGICREP1")
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(self.logicrep_sql_file, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(
+ os.open(self.logicrep_sql_file, flags, modes), "w", encoding="utf-8"
+ ) as file:
file.write(logicrep_sql)
def get_logicrep_info_from_sql(self, res):
logicrep_process_info = {
"unrep_archive_count": "null",
"unrep_archive_percent": "null",
- "estimate_full_time": "null"
+ "estimate_full_time": "null",
}
- if res.get('stat') != 'ONLINE' or str(res.get('work_stat')) != '1' or not self.decrypt_pwd \
- or not self.storage_archive_fs:
+ if (
+ res.get("stat") != "ONLINE"
+ or str(res.get("work_stat")) != "1"
+ or not self.decrypt_pwd
+ or not self.storage_archive_fs
+ ):
return logicrep_process_info
if self.node_id == 1:
self.modify_logicrep_sql_file()
@@ -419,18 +510,29 @@ class GetNodesInfo:
tmp_archive_count = self.get_tmp_archive_count(res)
max_sequence = sql_info.get("max(sequence)", {}).get("MAX(SEQUENCE#)")
- max_archive_count = tmp_archive_count if not max_sequence else tmp_archive_count + int(max_sequence)
+ max_archive_count = (
+ tmp_archive_count
+ if not max_sequence
+ else tmp_archive_count + int(max_sequence)
+ )
if not max_archive_count:
return logicrep_process_info
- undo_archive_size, max_archive_size, sub_logicrep_process_info = self.get_logicrep_undo_count_and_percent(
- sql_info, res, max_archive_count)
+ undo_archive_size, max_archive_size, sub_logicrep_process_info = (
+ self.get_logicrep_undo_count_and_percent(sql_info, res, max_archive_count)
+ )
arch_clean_upper_size, real_process_speed = self.get_logicrep_running_info(
- max_archive_size, sql_info)
+ max_archive_size, sql_info
+ )
# 日志预计堆满时间 = (归档上限空间(arch_clean_upper_size) - 未梳理日志空间(undo_archive_size)) / 日志刷盘速度(real_process_speed)
- full_remaining_time = (arch_clean_upper_size - undo_archive_size) / real_process_speed \
- if real_process_speed != 0 else "null"
- logicrep_process_info.update({"estimate_full_time": "{:.2f}s".format(full_remaining_time)})
+ full_remaining_time = (
+ (arch_clean_upper_size - undo_archive_size) / real_process_speed
+ if real_process_speed != 0
+ else "null"
+ )
+ logicrep_process_info.update(
+ {"estimate_full_time": "{:.2f}s".format(full_remaining_time)}
+ )
logicrep_process_info.update(sub_logicrep_process_info)
return logicrep_process_info
@@ -438,33 +540,63 @@ class GetNodesInfo:
"""
获取入湖未归档文件数量(unrep_archive_count)和未归档百分比(unrep_archive_percent)
"""
- max_arch_files_size = sql_info.get("max_arch_files_size", {}).get("RUNTIME_VALUE")
+ max_arch_files_size = sql_info.get("max_arch_files_size", {}).get(
+ "RUNTIME_VALUE"
+ )
units = max_arch_files_size[-1]
max_archive_size = int(max_arch_files_size[:-1]) * CONVERT_DICT.get(units)
arch_file_size = sql_info.get("arch_file_size", {}).get("RUNTIME_VALUE")
- arch_clean_upper_limit = int(sql_info.get("arch_clean_upper_limit", {}).get("RUNTIME_VALUE", "85"))
+ arch_clean_upper_limit = int(
+ sql_info.get("arch_clean_upper_limit", {}).get("RUNTIME_VALUE", "85")
+ )
units = arch_file_size[-1]
single_archive_size = int(arch_file_size[:-1]) * CONVERT_DICT.get(units)
logic_point = sql_info.get("logicrep_progress", {}).get("LOGPOINT")
temp_archive_size = self.get_tmp_archive_size(res)
if not logic_point:
undo_archive_count = max_archive_count
- undo_archive_size = undo_archive_count * single_archive_size if temp_archive_size == 0 else (
- (undo_archive_count - 1) * single_archive_size + temp_archive_size)
- undo_archive_percent = 0 if max_archive_size == 0 else \
- (undo_archive_size / (max_archive_size * arch_clean_upper_limit / 100) * 100)
+ undo_archive_size = (
+ undo_archive_count * single_archive_size
+ if temp_archive_size == 0
+ else (
+ (undo_archive_count - 1) * single_archive_size + temp_archive_size
+ )
+ )
+ undo_archive_percent = (
+ 0
+ if max_archive_size == 0
+ else (
+ undo_archive_size
+ / (max_archive_size * arch_clean_upper_limit / 100)
+ * 100
+ )
+ )
else:
point_info = logic_point.split("-")
asn = int(point_info[1], 16)
offset = int(point_info[2], 16)
undo_archive_count = max_archive_count - asn + 1
- undo_archive_size = temp_archive_size - offset if undo_archive_count == 0 \
- else (undo_archive_count - 1) * single_archive_size + temp_archive_size - offset
- undo_archive_percent = 0 if max_archive_size == 0 else \
- (undo_archive_size / (max_archive_size * arch_clean_upper_limit / 100) * 100)
+ undo_archive_size = (
+ temp_archive_size - offset
+ if undo_archive_count == 0
+ else (undo_archive_count - 1) * single_archive_size
+ + temp_archive_size
+ - offset
+ )
+ undo_archive_percent = (
+ 0
+ if max_archive_size == 0
+ else (
+ undo_archive_size
+ / (max_archive_size * arch_clean_upper_limit / 100)
+ * 100
+ )
+ )
undo_archive_percent = "{:.2f}%".format(undo_archive_percent)
- logicrep_process_info = {"unrep_archive_count": str(undo_archive_count),
- "unrep_archive_percent": str(undo_archive_percent)}
+ logicrep_process_info = {
+ "unrep_archive_count": str(undo_archive_count),
+ "unrep_archive_percent": str(undo_archive_percent),
+ }
return undo_archive_size, max_archive_size, logicrep_process_info
def sql_logicrep_info_query(self):
@@ -474,8 +606,12 @@ class GetNodesInfo:
"""
res = {}
report_key = [
- "MAX(SEQUENCE)", "LOGICREP_PROGRESS", "LREP_MODE", "MAX_ARCH_FILES_SIZE",
- "ARCH_FILE_SIZE", "ARCH_CLEAN_UPPER_LIMIT"
+ "MAX(SEQUENCE)",
+ "LOGICREP_PROGRESS",
+ "LREP_MODE",
+ "MAX_ARCH_FILES_SIZE",
+ "ARCH_FILE_SIZE",
+ "ARCH_CLEAN_UPPER_LIMIT",
]
return_code, sql_res = self.sql.query(self.logicrep_sql_file)
if not return_code and sql_res:
@@ -489,10 +625,14 @@ class GetNodesInfo:
"""
res = {}
report_key = [
- "SYS_BACKUP_SETS", "CHECKPOINT_PAGES",
- "CHECKPOINT_PERIOD", "GLOBAL_LOCK",
- "LOCAL_LOCK", "LOCAL_TXN", "GLOBAL_TXN",
- "DV_LRPL_DETAIL"
+ "SYS_BACKUP_SETS",
+ "CHECKPOINT_PAGES",
+ "CHECKPOINT_PERIOD",
+ "GLOBAL_LOCK",
+ "LOCAL_LOCK",
+ "LOCAL_TXN",
+ "GLOBAL_TXN",
+ "DV_LRPL_DETAIL",
]
return_code, sql_res = self.sql.query(self.sql_file)
if not return_code and sql_res:
@@ -520,11 +660,9 @@ class GetNodesInfo:
"""
res = {}
sql_res_list = sql_res.split("SQL>")
- for index, sql_res in enumerate(sql_res_list[1:len(report_key) + 1]):
+ for index, sql_res in enumerate(sql_res_list[1 : len(report_key) + 1]):
res.update(
- {
- report_key[index].lower(): dict(self.ctsql_result_parse(sql_res))
- }
+ {report_key[index].lower(): dict(self.ctsql_result_parse(sql_res))}
)
return res
@@ -549,10 +687,12 @@ class GetNodesInfo:
"""
cms_port_info, cms_port_err = self.shell_task(cms_port_cmd)
if not cms_port_err and cms_port_info:
- tmp_port_info = [re.split(r'\s+', val.strip(' '))
- for _, val in enumerate(cms_port_info.split('\n'))
- if val][1:]
- return {'cms_port': str(tmp_port_info[self.node_id][-1])}
+ tmp_port_info = [
+ re.split(r"\s+", val.strip(" "))
+ for _, val in enumerate(cms_port_info.split("\n"))
+ if val
+ ][1:]
+ return {"cms_port": str(tmp_port_info[self.node_id][-1])}
return {}
@@ -564,13 +704,28 @@ class GetNodesInfo:
"""
node_info, err_code = self.shell_task(cms_node_connected_cmd)
if not err_code and node_info:
- processed_info = [re.split(r'\s+', item.strip(' ')) for item in node_info.split('\n') if item]
+ processed_info = [
+ re.split(r"\s+", item.strip(" "))
+ for item in node_info.split("\n")
+ if item
+ ]
remain_nums = len(processed_info[1:])
node_id_idx, ip_idx, voting_idx = 0, 2, 4
- node_data = [{'NODE_ID': item[node_id_idx], 'IP': item[ip_idx], 'VOTING': item[voting_idx]}
- for item in processed_info[1:]]
+ node_data = [
+ {
+ "NODE_ID": item[node_id_idx],
+ "IP": item[ip_idx],
+ "VOTING": item[voting_idx],
+ }
+ for item in processed_info[1:]
+ ]
- res = {'cms_connected_domain': {'remaining_nodes_nums': remain_nums, 'remaining_nodes': node_data}}
+ res = {
+ "cms_connected_domain": {
+ "remaining_nodes_nums": remain_nums,
+ "remaining_nodes": node_data,
+ }
+ }
return res
return {}
@@ -583,21 +738,28 @@ class GetNodesInfo:
"""
res = {}
- id_to_key = {'0': 'node_id', '2': 'stat', '5': 'work_stat'}
+ id_to_key = {"0": "node_id", "2": "stat", "5": "work_stat"}
cms_output, cms_err = self.shell_task(cms_stats_cmd)
if not cms_err and cms_output:
- tmp_info = [re.split(r'\s+', val.strip(' '))
- for _, val in enumerate(cms_output.split('\n'))
- if val]
- cms_stat = [{val: item[int(key)] for key, val in id_to_key.items()} for item in tmp_info[1:]]
- cluster_stat = 0 if {'ONLINE'} == set([item.get('stat') for item in cms_stat]) else 1
+ tmp_info = [
+ re.split(r"\s+", val.strip(" "))
+ for _, val in enumerate(cms_output.split("\n"))
+ if val
+ ]
+ cms_stat = [
+ {val: item[int(key)] for key, val in id_to_key.items()}
+ for item in tmp_info[1:]
+ ]
+ cluster_stat = (
+ 0 if {"ONLINE"} == set([item.get("stat") for item in cms_stat]) else 1
+ )
stat_data = cms_stat[self.node_id]
- work_stat = stat_data.get('work_stat')
- stat_data['work_stat'] = int(work_stat)
+ work_stat = stat_data.get("work_stat")
+ stat_data["work_stat"] = int(work_stat)
res.update(stat_data)
- res.update({'cluster_stat': cluster_stat})
+ res.update({"cluster_stat": cluster_stat})
return res
@@ -611,7 +773,7 @@ class GetNodesInfo:
"""
cms_output, cms_err = self.shell_task(cms_disk_iostat_cmd)
if not cms_err and cms_output:
- return {'disk_iostat': cms_output.split('\n')[0]}
+ return {"disk_iostat": cms_output.split("\n")[0]}
return {}
@@ -624,15 +786,19 @@ class GetNodesInfo:
output, err = self.shell_task(exec_cmd)
if not err and output:
- output = output.split('\n')
- cpu_info, physical_mem = [item.strip() for item in re.split(r'[,:]', output[2].strip())], \
- [item.strip() for item in re.split(r'[,:]', output[3].strip())]
- mem_unit = physical_mem[0].split(' ')[0]
- cpu_res, mem_res = {('cpu_' + item.split(' ')[1]): item.split(' ')[0] + '%'
- for item in cpu_info[1:5]}, \
- {('mem_' + item.split(' ')[1]): item.split(' ')[0] + mem_unit
- for item in physical_mem[1:4]}
- cpu_res.pop('cpu_ni')
+ output = output.split("\n")
+ cpu_info, physical_mem = [
+ item.strip() for item in re.split(r"[,:]", output[2].strip())
+ ], [item.strip() for item in re.split(r"[,:]", output[3].strip())]
+ mem_unit = physical_mem[0].split(" ")[0]
+ cpu_res, mem_res = {
+ ("cpu_" + item.split(" ")[1]): item.split(" ")[0] + "%"
+ for item in cpu_info[1:5]
+ }, {
+ ("mem_" + item.split(" ")[1]): item.split(" ")[0] + mem_unit
+ for item in physical_mem[1:4]
+ }
+ cpu_res.pop("cpu_ni")
mem_res.update(cpu_res)
return mem_res
@@ -663,10 +829,14 @@ class GetNodesInfo:
try:
self.deploy_param = json.loads(file_reader(DEPLOY_PARAM_PATH))
except Exception as err:
- LOG.error('[result] execution failed when read deploy_param.json, [err_msg] {}'.format(str(err)))
+ LOG.error(
+ "[result] execution failed when read deploy_param.json, [err_msg] {}".format(
+ str(err)
+ )
+ )
return res
- self.node_id = int(self.deploy_param.get('node_id'))
+ self.node_id = int(self.deploy_param.get("node_id"))
self.mes_ssl_switch = self.deploy_param.get("mes_ssl_switch")
self.storage_archive_fs = self.deploy_param.get("storage_archive_fs")
@@ -675,15 +845,23 @@ class GetNodesInfo:
self._init_ctsql_vars()
# 恢复环境变量,避免cms命令执行失败
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
try:
self.get_export_data(res)
except Exception as err:
- LOG.error('[result] execution failed when get specific export data. '
- '[err_msg] {}, [err_traceback] {}'.format(str(err), traceback.format_exc(limit=-1)))
+ LOG.error(
+ "[result] execution failed when get specific export data. "
+ "[err_msg] {}, [err_traceback] {}".format(
+ str(err), traceback.format_exc(limit=-1)
+ )
+ )
return res
return res
@@ -691,13 +869,17 @@ class GetNodesInfo:
def _init_ctsql_vars(self):
ctsql_ini_path = glob.glob(CTSQL_INI_PATH)[0]
ctsql_ini_data = file_reader(ctsql_ini_path)
- encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find('=') + 1:].strip()
+ encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find("=") + 1 :].strip()
try:
self.decrypt_pwd = self.kmc_decrypt.decrypt(encrypt_pwd)
except Exception as err:
# 日志限频
if not self.ctsql_decrypt_error_flag:
- LOG.error('[result] decrypt ctsql passwd failed, [err_msg] {}'.format(str(err)))
+ LOG.error(
+ "[result] decrypt ctsql passwd failed, [err_msg] {}".format(
+ str(err)
+ )
+ )
self.ctsql_decrypt_error_flag = True
self.ctsql_decrypt_error_flag = False
self.sql.update_sys_data(self.node_id, self.decrypt_pwd)
@@ -707,20 +889,26 @@ class GetDbstorInfo:
def __init__(self):
self.deploy_config = self.get_deploy_info()
self.std_output = {
- self.deploy_config.get("storage_dbstor_fs"):
- {
- 'limit': 0, 'used': 0, 'free': 0,
- 'snapshotLimit': 0, 'snapshotUsed': 0,
- 'fsId': '', 'linkState': ''
- },
- self.deploy_config.get("storage_dbstor_page_fs"):
- {
- 'limit': 0, 'used': 0, 'free': 0,
- 'snapshotLimit': 0, 'snapshotUsed': 0,
- 'fsId': '', 'linkState': ''
- }
+ self.deploy_config.get("storage_dbstor_fs"): {
+ "limit": 0,
+ "used": 0,
+ "free": 0,
+ "snapshotLimit": 0,
+ "snapshotUsed": 0,
+ "fsId": "",
+ "linkState": "",
+ },
+ self.deploy_config.get("storage_dbstor_page_fs"): {
+ "limit": 0,
+ "used": 0,
+ "free": 0,
+ "snapshotLimit": 0,
+ "snapshotUsed": 0,
+ "fsId": "",
+ "linkState": "",
+ },
}
- self.info_file_path = '/opt/cantian/common/data/dbstor_info.json'
+ self.info_file_path = "/opt/cantian/common/data/dbstor_info.json"
self.index = 0
self.max_index = 10
self.last_time_stamp = None
@@ -739,16 +927,20 @@ class GetDbstorInfo:
break
except Exception as err:
try_times -= 1
- LOG.error("[dbstor info reader] fail to read dbstor info from '{}', "
- "err_msg: {}, remaining attempts: {}".format(self.info_file_path, str(err), try_times))
+ LOG.error(
+ "[dbstor info reader] fail to read dbstor info from '{}', "
+ "err_msg: {}, remaining attempts: {}".format(
+ self.info_file_path, str(err), try_times
+ )
+ )
time.sleep(1)
continue
if not dbstor_info:
- raise Exception('dbstor_info is empty.')
+ raise Exception("dbstor_info is empty.")
dbstor_log_fs, dbstor_page_fs = dbstor_info
- time_stamp, _ = dbstor_log_fs.pop('timestamp'), dbstor_page_fs.pop('timestamp')
+ time_stamp, _ = dbstor_log_fs.pop("timestamp"), dbstor_page_fs.pop("timestamp")
if time_stamp != self.last_time_stamp:
self.index, self.last_time_stamp = 0, time_stamp
else:
@@ -766,10 +958,12 @@ class GetDbstorInfo:
if dbstor_info:
dbstor_log_fs, dbstor_page_fs = dbstor_info
- log_fs_name, page_fs_name = dbstor_log_fs.pop('fsName'), dbstor_page_fs.pop('fsName')
+ log_fs_name, page_fs_name = dbstor_log_fs.pop("fsName"), dbstor_page_fs.pop(
+ "fsName"
+ )
cur_res = {log_fs_name: dbstor_log_fs, page_fs_name: dbstor_page_fs}
if self.index >= self.max_index:
- cur_res.update({'work_stat': 6})
+ cur_res.update({"work_stat": 6})
res.update(cur_res)
return res
diff --git a/ct_om/service/cantian_exporter/exporter/log.py b/ct_om/service/cantian_exporter/exporter/log.py
index 88eaaba88b07ba6b822fae905f6c45ceb961dd84..5b5a262b647adcc7b61e9c8249abe60ef5ab908e 100644
--- a/ct_om/service/cantian_exporter/exporter/log.py
+++ b/ct_om/service/cantian_exporter/exporter/log.py
@@ -44,15 +44,19 @@ def setup(project_name):
log_path = _get_log_file_path(project_name)
if log_path:
file_log = handlers.RotatingFileHandler(
- log_path, maxBytes=log_config.get("log_file_max_size"),
- backupCount=log_config.get("log_file_backup_count"))
+ log_path,
+ maxBytes=log_config.get("log_file_max_size"),
+ backupCount=log_config.get("log_file_backup_count"),
+ )
log_root.addHandler(file_log)
for handler in log_root.handlers:
handler.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")))
+ datefmt=log_config.get("log_date_format"),
+ )
+ )
if log_config.get("debug"):
log_root.setLevel(logging.DEBUG)
diff --git a/ct_om/service/cantian_exporter/exporter/save_file.py b/ct_om/service/cantian_exporter/exporter/save_file.py
index ccc5633fc826b05121af6da9408238c4ba74c616..0193aad96d89664b904c7ca292239db9b0a05832 100644
--- a/ct_om/service/cantian_exporter/exporter/save_file.py
+++ b/ct_om/service/cantian_exporter/exporter/save_file.py
@@ -11,17 +11,21 @@ upper_path = os.path.abspath(os.path.join(dir_name, ".."))
class SaveFile:
def __init__(self):
- self.file_save_path = os.path.join(upper_path, 'exporter_data')
+ self.file_save_path = os.path.join(upper_path, "exporter_data")
self.uid = os.getuid()
self._init_config()
- self.save = OrderedDict({name: os.path.join(self.file_save_path, name)
- for name in os.listdir(self.file_save_path)})
+ self.save = OrderedDict(
+ {
+ name: os.path.join(self.file_save_path, name)
+ for name in os.listdir(self.file_save_path)
+ }
+ )
@staticmethod
def gen_file_name():
utc_now = datetime.utcnow()
cur_time = utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None)
- return "%s.json" % str(cur_time.strftime('%Y%m%d%H%M%S'))
+ return "%s.json" % str(cur_time.strftime("%Y%m%d%H%M%S"))
def _init_config(self):
if not os.path.exists(self.file_save_path):
@@ -44,7 +48,9 @@ class SaveFile:
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(cur_file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(
+ os.open(cur_file_path, flags, modes), "w", encoding="utf-8"
+ ) as file:
file.write(json.dumps(data_to_write))
os.chmod(cur_file_path, 0o640)
os.chown(cur_file_path, self.uid, 1100)
diff --git a/ct_om/service/cantian_exporter/exporter/tool.py b/ct_om/service/cantian_exporter/exporter/tool.py
index e159a84bf0694d77606d77c077c157faa03c9828..81b3f8945da5c429919343a409c5f6ba22a9a098 100644
--- a/ct_om/service/cantian_exporter/exporter/tool.py
+++ b/ct_om/service/cantian_exporter/exporter/tool.py
@@ -13,11 +13,11 @@ def close_child_process(proc):
os.killpg(proc.pid, signal.SIGKILL)
except ProcessLookupError as err:
_ = err
- return 'success'
+ return "success"
except Exception as err:
return str(err)
- return 'success'
+ return "success"
def _exec_popen(cmd):
@@ -27,8 +27,14 @@ def _exec_popen(cmd):
return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=os.setsid,
+ )
pobj.stdin.write(cmd.encode())
pobj.stdin.write(os.linesep.encode())
try:
@@ -40,7 +46,11 @@ def _exec_popen(cmd):
kill_fork_process_res = close_child_process(pobj)
if kill_fork_process_res != "success":
- return FAIL, "", "kill fork process failed, err_details: {}".format(kill_fork_process_res)
+ return (
+ FAIL,
+ "",
+ "kill fork process failed, err_details: {}".format(kill_fork_process_res),
+ )
stdout, stderr = stdout.decode(), stderr.decode()
if stdout[-1:] == os.linesep:
@@ -57,8 +67,8 @@ class SimpleSql:
self.node_id = None
self.sql_sh_path = None
self.time_out = 5
- self.ctsql_ip_addr = '127.0.0.1'
- self.ctsql_port = '1611'
+ self.ctsql_ip_addr = "127.0.0.1"
+ self.ctsql_port = "1611"
self.__decrypt_pwd = None
def update_sys_data(self, cur_node_id, decrypt_pwd):
@@ -66,14 +76,19 @@ class SimpleSql:
self.__decrypt_pwd = decrypt_pwd
def query(self, sql_file):
- exec_cmd = "echo '{}' | ctsql sys@{}:{} -q -f \"{}\"".format(self.__decrypt_pwd, self.ctsql_ip_addr,
- self.ctsql_port, sql_file)
- return_code, stdout, stderr = _exec_popen('source ~/.bashrc&&{}'.format(exec_cmd))
+ exec_cmd = "echo '{}' | ctsql sys@{}:{} -q -f \"{}\"".format(
+ self.__decrypt_pwd, self.ctsql_ip_addr, self.ctsql_port, sql_file
+ )
+ return_code, stdout, stderr = _exec_popen(
+ "source ~/.bashrc&&{}".format(exec_cmd)
+ )
if return_code:
stderr = str(stderr)
stderr.replace(self.__decrypt_pwd, "*****")
- LOG.error("[sql shell task] node {} execute cmd '{}' "
- "failed, err: {}".format(self.node_id, self.sql_statement, str(stderr)))
+ LOG.error(
+ "[sql shell task] node {} execute cmd '{}' "
+ "failed, err: {}".format(self.node_id, self.sql_statement, str(stderr))
+ )
return return_code, stdout
diff --git a/ct_om/service/cantian_exporter/query_storage_info/get_dr_info.py b/ct_om/service/cantian_exporter/query_storage_info/get_dr_info.py
index ebe22672d693352b16e6c45630401c7bbda5aff7..c78722476d3e0bd11877f9a75d8ed9f8afaa3ecd 100644
--- a/ct_om/service/cantian_exporter/query_storage_info/get_dr_info.py
+++ b/ct_om/service/cantian_exporter/query_storage_info/get_dr_info.py
@@ -2,17 +2,22 @@
import json
import os
import sys
+
try:
from query_storage_info.rest_client import RestClient, read_helper
except ImportError:
# 非容灾场景不依赖requests库
pass
from query_storage_info.response_parse import ResponseParse
-from query_storage_info.rest_constant import Constant, \
- MetroDomainRunningStatus, VstorePairRunningStatus, HealthStatus
+from query_storage_info.rest_constant import (
+ Constant,
+ MetroDomainRunningStatus,
+ VstorePairRunningStatus,
+ HealthStatus,
+)
from exporter.log import EXPORTER_LOG as LOG
-sys.path.append('/opt/cantian/action/dbstor')
+sys.path.append("/opt/cantian/action/dbstor")
from kmc_adapter import CApiWrapper
@@ -49,10 +54,10 @@ class DRStatusCheck(object):
err_msg = "Login failed"
raise Exception(err_msg)
rsp_code, rsp_result, rsp_data = result.get_rsp_data()
- error_code = rsp_result.get('code')
+ error_code = rsp_result.get("code")
if rsp_code != 0 or error_code != 0:
- error_des = rsp_result.get('description')
- error_sgt = rsp_result.get('suggestion')
+ error_des = rsp_result.get("description")
+ error_sgt = rsp_result.get("suggestion")
err_msg = err_msg % (error_code, error_des, error_sgt)
raise Exception(err_msg)
return rsp_data
@@ -62,10 +67,10 @@ class DRStatusCheck(object):
err_msg = err_msg + ", Detail:[%s]%s.Suggestion:%s"
result = ResponseParse(res)
rsp_code, rsp_result, rsp_data = result.get_omtask_rsp_data()
- if rsp_code != 0 or (rsp_result.get('code') and rsp_result.get('code') != 0):
- error_des = rsp_result.get('description')
- error_sgt = rsp_result.get('suggestion')
- err_msg = err_msg % (rsp_result.get('code'), error_des, error_sgt)
+ if rsp_code != 0 or (rsp_result.get("code") and rsp_result.get("code") != 0):
+ error_des = rsp_result.get("description")
+ error_sgt = rsp_result.get("suggestion")
+ err_msg = err_msg % (rsp_result.get("code"), error_des, error_sgt)
raise Exception(err_msg)
return rsp_data
@@ -77,7 +82,9 @@ class DRStatusCheck(object):
self.dm_user = self.dr_deploy_params.get("dm_user")
dm_pwd = self.dr_deploy_params.get("dm_pwd")
if os.path.exists(DR_DEPLOY_PARAM):
- self.kmc_decrypt = CApiWrapper(primary_keystore=PRIMARY_KEYSTORE, standby_keystore=STANDBY_KEYSTORE)
+ self.kmc_decrypt = CApiWrapper(
+ primary_keystore=PRIMARY_KEYSTORE, standby_keystore=STANDBY_KEYSTORE
+ )
self.kmc_decrypt.initialize()
try:
self.dm_pwd = self.kmc_decrypt.decrypt(dm_pwd)
@@ -86,9 +93,13 @@ class DRStatusCheck(object):
return
self.kmc_decrypt.finalize()
# 恢复环境变量,避免cms命令执行失败
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
self.rest_client = RestClient((self.dm_ip, self.dm_user, self.dm_pwd))
self.rest_client.login()
@@ -100,7 +111,7 @@ class DRStatusCheck(object):
"device_id": self.remote_device_id,
"url": Constant.QUERY_SYSTEM_INFO.replace("{deviceId}", "xxx"),
"method": "GET",
- "body": {}
+ "body": {},
}
res = self.rest_client.normal_request(url, data=data, method="post")
err_msg = "Failed to query remote storage system info"
@@ -126,7 +137,10 @@ class DRStatusCheck(object):
:param remote_device_id: 远端设备id
:return:list
"""
- url = Constant.QUERY_REMOTE_DEVICE_INFO.format(deviceId=self.device_id) + f"/{remote_device_id}"
+ url = (
+ Constant.QUERY_REMOTE_DEVICE_INFO.format(deviceId=self.device_id)
+ + f"/{remote_device_id}"
+ )
res = self.rest_client.normal_request(url, "get")
err_msg = "Failed to query remote device info"
remote_device_info = self.result_parse(err_msg, res)
@@ -160,13 +174,17 @@ class DRStatusCheck(object):
return get_status(running_status, VstorePairRunningStatus)
def query_rep_link_status(self):
- url = Constant.IP_LINK.format(deviceId=self.device_id) + \
- "?DEVICEID=%s&LINKUSAGE=true&range=[0-10]" % self.remote_device_id
+ url = (
+ Constant.IP_LINK.format(deviceId=self.device_id)
+ + "?DEVICEID=%s&LINKUSAGE=true&range=[0-10]" % self.remote_device_id
+ )
res = self.rest_client.normal_request(url, "get")
err_msg = "Failed to rep link info"
ip_links = self.result_parse(err_msg, res)
- url = Constant.FC_LINK.format(deviceId=self.device_id) + \
- "?DEVICEID=%s&LINKUSAGE=true&range=[0-10]" % self.remote_device_id
+ url = (
+ Constant.FC_LINK.format(deviceId=self.device_id)
+ + "?DEVICEID=%s&LINKUSAGE=true&range=[0-10]" % self.remote_device_id
+ )
res = self.rest_client.normal_request(url, "get")
err_msg = "Failed to rep link info"
fc_links = self.result_parse(err_msg, res)
@@ -216,7 +234,7 @@ class DRStatusCheck(object):
"remote_device_status": "Abnormal",
"rep_link_status": "Abnormal",
"metro_domain_status": "Abnormal",
- "metro_vstore_status": "Abnormal"
+ "metro_vstore_status": "Abnormal",
}
self.remote_device_id = self.dr_deploy_params.get("remote_device_id")
hyper_domain_id = self.dr_deploy_params.get("hyper_domain_id")
@@ -248,7 +266,9 @@ class DRStatusCheck(object):
except Exception as err:
metro_domain_status = "Abnormal"
try:
- metro_vstore_status = self.query_hyper_metro_vstore_pair_info(vstore_pair_id)
+ metro_vstore_status = self.query_hyper_metro_vstore_pair_info(
+ vstore_pair_id
+ )
except Exception as err:
metro_vstore_status = "Abnormal"
try:
@@ -265,6 +285,6 @@ class DRStatusCheck(object):
"remote_device_status": remote_device_status,
"rep_link_status": rep_link_status,
"metro_domain_status": metro_domain_status,
- "metro_vstore_status": metro_vstore_status
+ "metro_vstore_status": metro_vstore_status,
}
return data
diff --git a/ct_om/service/cantian_exporter/query_storage_info/response_parse.py b/ct_om/service/cantian_exporter/query_storage_info/response_parse.py
index d803468ff12f91aaa1bcd073281e1cec9dffdbf9..404cbb147b2e06fb46e20554b3af5ee740b3f8f9 100644
--- a/ct_om/service/cantian_exporter/query_storage_info/response_parse.py
+++ b/ct_om/service/cantian_exporter/query_storage_info/response_parse.py
@@ -1,5 +1,6 @@
# coding=utf-8
+
class ResponseParse(object):
def __init__(self, res):
"""
@@ -14,11 +15,11 @@ class ResponseParse(object):
if status_code == 200:
res = self.res.json()
if "error" in res:
- ret_result = res.get('error')
+ ret_result = res.get("error")
else:
- ret_result = res.get('result')
- error_code = ret_result['code']
- error_des = ret_result['description']
+ ret_result = res.get("result")
+ error_code = ret_result["code"]
+ error_des = ret_result["description"]
if error_des is None or error_code == 0:
error_des = "success"
return status_code, int(error_code), error_des
@@ -31,10 +32,10 @@ class ResponseParse(object):
if status_code == 200:
rsp_code = 0
if "error" in self.res.json():
- ret_result = self.res.json().get('error')
+ ret_result = self.res.json().get("error")
if "result" in self.res.json():
- ret_result = self.res.json().get('result')
- ret_data = self.res.json().get('data')
+ ret_result = self.res.json().get("result")
+ ret_data = self.res.json().get("data")
return rsp_code, ret_result, ret_data
def get_omtask_rsp_data(self):
@@ -45,8 +46,8 @@ class ResponseParse(object):
if status_code == 200:
rsp_code = 0
if "error" in self.res.json():
- ret_result = self.res.json().get('error')
+ ret_result = self.res.json().get("error")
if "result" in self.res.json():
- ret_result = self.res.json().get('result')
- ret_data = self.res.json().get('data')
+ ret_result = self.res.json().get("result")
+ ret_data = self.res.json().get("data")
return rsp_code, ret_result, ret_data
diff --git a/ct_om/service/cantian_exporter/query_storage_info/rest_client.py b/ct_om/service/cantian_exporter/query_storage_info/rest_client.py
index c7c9aa6a509b42e02b3cdc759b16e709a1c554fc..4f806973490182f165b166b859f77aea9406c1df 100644
--- a/ct_om/service/cantian_exporter/query_storage_info/rest_client.py
+++ b/ct_om/service/cantian_exporter/query_storage_info/rest_client.py
@@ -26,66 +26,80 @@ class RestClient:
def gen_timestamp():
utc_now = datetime.utcnow()
cur_time = utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None)
- return str(cur_time.strftime('%Y%m%d%H%M%S'))
+ return str(cur_time.strftime("%Y%m%d%H%M%S"))
@staticmethod
def exception_handler(err_msg=None, cur_mode=None):
- err_info = '[current_mode] {}, [err_info] {}'.format(cur_mode, err_msg)
+ err_info = "[current_mode] {}, [err_info] {}".format(cur_mode, err_msg)
raise ExecutionError(err_info)
@staticmethod
def response_parse(res_data):
status_code = res_data.status_code
- err_code, err_details = -1, 'failed'
+ err_code, err_details = -1, "failed"
if status_code == 200:
exec_res = res_data.json()
- err_code, err_details = \
- exec_res.get('error').get('code'), exec_res.get('error').get('description')
+ err_code, err_details = exec_res.get("error").get("code"), exec_res.get(
+ "error"
+ ).get("description")
return status_code, int(err_code), err_details
def update_cookies(self, res):
- res_body, set_cookie = res.json().get('data'), res.headers.get('Set-Cookie')
+ res_body, set_cookie = res.json().get("data"), res.headers.get("Set-Cookie")
- self.token, self.device_id = res_body.get('iBaseToken'), res_body.get('deviceid')
+ self.token, self.device_id = res_body.get("iBaseToken"), res_body.get(
+ "deviceid"
+ )
- match_res = re.findall(r'session=ismsession=\w+;', set_cookie)
+ match_res = re.findall(r"session=ismsession=\w+;", set_cookie)
if match_res:
self.ism_session = match_res[0][:-1]
- def make_header(self, content_type='application/json'):
- header = {'Content-type': content_type}
+ def make_header(self, content_type="application/json"):
+ header = {"Content-type": content_type}
if self.token:
- header['iBaseToken'] = self.token
+ header["iBaseToken"] = self.token
if self.ism_session:
- header['Cookie'] = self.ism_session
+ header["Cookie"] = self.ism_session
return header
def login(self, keep_session=False):
- url = '{}{}:{}{}'.format(RestElemConstant.HTTPS, self.ip_addr, RestElemConstant.PORT, RestElemConstant.LOGIN)
+ url = "{}{}:{}{}".format(
+ RestElemConstant.HTTPS,
+ self.ip_addr,
+ RestElemConstant.PORT,
+ RestElemConstant.LOGIN,
+ )
user_info = {
- 'username': self.user_name,
- 'password': self.passwd,
- 'scope': 0,
- 'loginMode': 3,
- 'timeConversion': 0,
- 'isEncrypt': 'false'
+ "username": self.user_name,
+ "password": self.passwd,
+ "scope": 0,
+ "loginMode": 3,
+ "timeConversion": 0,
+ "isEncrypt": "false",
}
login_header = {
- 'Content-type': 'application/json',
- 'Cookie': '__LANGUAGE_KEY__=zh-CN; __IBASE_LANGUAGE_KEY__=zh-CN'
+ "Content-type": "application/json",
+ "Cookie": "__LANGUAGE_KEY__=zh-CN; __IBASE_LANGUAGE_KEY__=zh-CN",
}
requests.packages.urllib3.disable_warnings()
with requests.session() as session:
- res = session.post(url, data=json.dumps(user_info), headers=login_header, verify=False)
+ res = session.post(
+ url, data=json.dumps(user_info), headers=login_header, verify=False
+ )
status_code, err_code, err_details = self.response_parse(res)
if err_code:
- err_msg = ('Login DM failed {}, status_code: {}, err_code: {}, '
- 'err_details: {}'.format(self.ip_addr, status_code, err_code, err_details))
+ err_msg = (
+ "Login DM failed {}, status_code: {}, err_code: {}, "
+ "err_details: {}".format(
+ self.ip_addr, status_code, err_code, err_details
+ )
+ )
raise Exception(err_msg)
self.update_cookies(res)
@@ -98,7 +112,7 @@ class RestClient:
def logout(self):
url = RestElemConstant.LOGOUT.format(deviceId=self.device_id)
- res = self.normal_request(url, 'delete')
+ res = self.normal_request(url, "delete")
result = ResponseParse(res)
status_code, error_code, error_des = result.get_res_code()
if status_code != 200 or error_code != 0:
@@ -117,26 +131,34 @@ class RestClient:
url = Constant.HTTPS + self.ip_addr + ":" + Constant.PORT + url
if keep_session:
req = self.session
- self.token = self.res_login.get('data').get('ibasetoken')
+ self.token = self.res_login.get("data").get("ibasetoken")
else:
req = requests.session()
headers = self.make_header()
with req as session:
- if method == 'put':
- res = session.put(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'post':
- res = session.post(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'get':
- res = session.get(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'delete':
- res = session.delete(url, data=data, headers=headers, verify=False, timeout=timeout)
+ if method == "put":
+ res = session.put(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "post":
+ res = session.post(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "get":
+ res = session.get(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "delete":
+ res = session.delete(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
res.close()
return res
def read_helper(file_path):
- with open(file_path, 'r', encoding='utf-8') as f_handler:
+ with open(file_path, "r", encoding="utf-8") as f_handler:
deploy_data = f_handler.read()
return deploy_data
diff --git a/ct_om/service/cantian_exporter/query_storage_info/rest_constant.py b/ct_om/service/cantian_exporter/query_storage_info/rest_constant.py
index 42c617d79d243929abb9260282ef564fb8668840..78a23f77b966048c52cb0eadec7846322c18d350 100644
--- a/ct_om/service/cantian_exporter/query_storage_info/rest_constant.py
+++ b/ct_om/service/cantian_exporter/query_storage_info/rest_constant.py
@@ -1,55 +1,81 @@
class Constant:
- PORT = '8088'
- HTTPS = 'https://'
- LOGIN = '/deviceManager/rest/xxxxx/login'
- LOGOUT = '/deviceManager/rest/{deviceId}/sessions'
- QUERY_POOL = '/deviceManager/rest/{deviceId}/storagepool'
- CREATE_FS = '/deviceManager/rest/{deviceId}/filesystem'
- QUERY_FILE_SYSTEM_NUM = '/deviceManager/rest/{deviceId}/filesystem/count'
- DELETE_FS = '/deviceManager/rest/{deviceId}/filesystem/{id}'
- NFS_SERVICE = '/deviceManager/rest/{deviceId}/nfsservice'
- NFS_SHARE_ADD = '/deviceManager/rest/{deviceId}/NFSSHARE'
- NFS_SHARE_ADD_CLIENT = '/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT'
- NFS_SHARE_DELETE = '/deviceManager/rest/{deviceId}/NFSSHARE/{id}'
- NFS_SHARE_DEL_CLIENT = '/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT/{id}'
- NFS_SHARE_QUERY = '/deviceManager/rest/{deviceId}/NFSSHARE'
- QUERY_VSTORE = '/deviceManager/rest/{deviceId}/vstore/count'
- CREATE_VSTORE = '/deviceManager/rest/{deviceId}/vstore'
- DELETE_VSTORE = '/deviceManager/rest/{deviceId}/vstore/{id}'
+ PORT = "8088"
+ HTTPS = "https://"
+ LOGIN = "/deviceManager/rest/xxxxx/login"
+ LOGOUT = "/deviceManager/rest/{deviceId}/sessions"
+ QUERY_POOL = "/deviceManager/rest/{deviceId}/storagepool"
+ CREATE_FS = "/deviceManager/rest/{deviceId}/filesystem"
+ QUERY_FILE_SYSTEM_NUM = "/deviceManager/rest/{deviceId}/filesystem/count"
+ DELETE_FS = "/deviceManager/rest/{deviceId}/filesystem/{id}"
+ NFS_SERVICE = "/deviceManager/rest/{deviceId}/nfsservice"
+ NFS_SHARE_ADD = "/deviceManager/rest/{deviceId}/NFSSHARE"
+ NFS_SHARE_ADD_CLIENT = "/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT"
+ NFS_SHARE_DELETE = "/deviceManager/rest/{deviceId}/NFSSHARE/{id}"
+ NFS_SHARE_DEL_CLIENT = "/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT/{id}"
+ NFS_SHARE_QUERY = "/deviceManager/rest/{deviceId}/NFSSHARE"
+ QUERY_VSTORE = "/deviceManager/rest/{deviceId}/vstore/count"
+ CREATE_VSTORE = "/deviceManager/rest/{deviceId}/vstore"
+ DELETE_VSTORE = "/deviceManager/rest/{deviceId}/vstore/{id}"
CREATE_LIF = "/deviceManager/rest/{deviceId}/lif"
DELETE_LIF = "/deviceManager/rest/{deviceId}/lif?NAME={name}"
CREATE_CLONE_FS = "/deviceManager/rest/{deviceId}/filesystem"
SPLIT_CLONE_FS = "/deviceManager/rest/{deviceId}/clone_fs_split"
CREATE_FSSNAPSHOT = "/deviceManager/rest/{deviceId}/fssnapshot"
ROLLBACK_SNAPSHOT = "/deviceManager/rest/{deviceId}/fssnapshot/rollback_fssnapshot"
- QUERY_ROLLBACK_SNAPSHOT_PROCESS = "/deviceManager/rest/{deviceId}/FSSNAPSHOT/" \
- "query_fs_snapshot_rollback?PARENTNAME={fs_name}"
+ QUERY_ROLLBACK_SNAPSHOT_PROCESS = (
+ "/deviceManager/rest/{deviceId}/FSSNAPSHOT/"
+ "query_fs_snapshot_rollback?PARENTNAME={fs_name}"
+ )
QUERY_LOGIC_PORT_INFO = "/deviceManager/rest/{deviceId}/lif"
# 容灾查询操作
QUERY_SYSTEM_INFO = "/deviceManager/rest/{deviceId}/system/"
QUERY_REMOTE_DEVICE_INFO = "/deviceManager/rest/{deviceId}/remote_device"
QUERY_LICENSE_FEATURE = "/deviceManager/rest/{deviceId}/license/feature"
- QUERY_HYPER_METRO_FILE_SYSTEM_PAIR = "/deviceManager/rest/{deviceId}/HyperMetroPair/associate"
- QUERY_REPLICATION_FILE_SYSTEM_PAIR = "/deviceManager/rest/{deviceId}/replicationpair/associate"
- QUERY_FILESYSTEM_FOR_REPLICATION = "/deviceManager/rest/{deviceId}/filesystem_for_replication"
+ QUERY_HYPER_METRO_FILE_SYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/HyperMetroPair/associate"
+ )
+ QUERY_REPLICATION_FILE_SYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/replicationpair/associate"
+ )
+ QUERY_FILESYSTEM_FOR_REPLICATION = (
+ "/deviceManager/rest/{deviceId}/filesystem_for_replication"
+ )
# 容灾搭建操作
HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/FsHyperMetroDomain"
HYPER_METRO_VSTORE_PAIR = "/deviceManager/rest/{deviceId}/vstore_pair"
HYPER_METRO_FILESYSTEM_PAIR = "/deviceManager/rest/{deviceId}/HyperMetroPair"
- SPLIT_REMOTE_REPLICATION_FILESYSTEM_PAIR = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/split"
- SYNC_REMOTE_REPLICATION_FILESYSTEM_PAIR = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/sync"
- REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/{id}"
+ SPLIT_REMOTE_REPLICATION_FILESYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/split"
+ )
+ SYNC_REMOTE_REPLICATION_FILESYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/sync"
+ )
+ REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/{id}"
+ )
DELETE_HYPER_METRO_PAIR = "/deviceManager/rest/{deviceId}/HyperMetroPair/{id}"
DELETE_HYPER_METRO_VSTORE_PAIR = "/deviceManager/rest/{deviceId}/vstore_pair/{id}"
- SPLIT_FILESYSTEM_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/SplitFsHyperMetroDomain"
- DELETE_FILESYSTEM_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/FsHyperMetroDomain/{id}"
- CANCEL_SECONDARY_WRITE_LOCK = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/CANCEL_SECONDARY_WRITE_LOCK"
- SET_SECONDARY_WRITE_LOCK = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/SET_SECONDARY_WRITE_LOCK"
- SWAP_ROLE_FS_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/SwapRoleFsHyperMetroDomain"
+ SPLIT_FILESYSTEM_HYPER_METRO_DOMAIN = (
+ "/deviceManager/rest/{deviceId}/SplitFsHyperMetroDomain"
+ )
+ DELETE_FILESYSTEM_HYPER_METRO_DOMAIN = (
+ "/deviceManager/rest/{deviceId}/FsHyperMetroDomain/{id}"
+ )
+ CANCEL_SECONDARY_WRITE_LOCK = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/CANCEL_SECONDARY_WRITE_LOCK"
+ )
+ SET_SECONDARY_WRITE_LOCK = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/SET_SECONDARY_WRITE_LOCK"
+ )
+ SWAP_ROLE_FS_HYPER_METRO_DOMAIN = (
+ "/deviceManager/rest/{deviceId}/SwapRoleFsHyperMetroDomain"
+ )
SWAP_ROLE_REPLICATION_PAIR = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/switch"
- CHANGE_FS_HYPER_METRO_DOMAIN_SECOND_ACCESS = "/deviceManager/rest/{deviceId}/ChangeFsHyperMetroDomainSecondAccess"
+ CHANGE_FS_HYPER_METRO_DOMAIN_SECOND_ACCESS = (
+ "/deviceManager/rest/{deviceId}/ChangeFsHyperMetroDomainSecondAccess"
+ )
JOIN_FS_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/JoinFsHyperMetroDomain"
QUERY_HYPER_METRO_PAIR = "/deviceManager/rest/{deviceId}/HyperMetroPair/{id}"
IP_LINK = "/deviceManager/rest/{deviceId}/iplink"
@@ -66,9 +92,9 @@ CANTIAN_DOMAIN_PREFIX = "CantianDomain_%s"
class HealthStatus:
- Normal = "1" # 正常
- Faulty = "2" # 故障
- Invalid = "14" # 失效
+ Normal = "1" # 正常
+ Faulty = "2" # 故障
+ Invalid = "14" # 失效
class SystemRunningStatus:
@@ -80,21 +106,21 @@ class SystemRunningStatus:
class RemoteDeviceStatus:
- LinkUp = "10" # 已连接
- LinkDown = "11" # 未连接
- Disabled = "31" # 已禁用
- Connecting = "101" # 正在连接
- AirGapLinkDown = "118" # Air Gap断开
+ LinkUp = "10" # 已连接
+ LinkDown = "11" # 未连接
+ Disabled = "31" # 已禁用
+ Connecting = "101" # 正在连接
+ AirGapLinkDown = "118" # Air Gap断开
class ReplicationRunningStatus:
- Normal = "1" # 正常
- Synchronizing = "23" # 正在同步
- TobeRecovered = "33" # 待恢复
- Interrupted = "34" # 异常断开
- Split = "26" # 已分裂
- Invalid = "35" # 失效
- Standby = "110" # 备用
+ Normal = "1" # 正常
+ Synchronizing = "23" # 正在同步
+ TobeRecovered = "33" # 待恢复
+ Interrupted = "34" # 异常断开
+ Split = "26" # 已分裂
+ Invalid = "35" # 失效
+ Standby = "110" # 备用
class FilesystemRunningStatus:
@@ -105,54 +131,54 @@ class FilesystemRunningStatus:
class MetroDomainRunningStatus:
- Normal = "0" # 正常
- Recovering = "1" # 恢复中
- Faulty = "2" # 故障
- Split = "3" # 分裂
- ForceStarted = "4" # 强制拉起
- Invalid = "5" # 失效
+ Normal = "0" # 正常
+ Recovering = "1" # 恢复中
+ Faulty = "2" # 故障
+ Split = "3" # 分裂
+ ForceStarted = "4" # 强制拉起
+ Invalid = "5" # 失效
class VstorePairRunningStatus:
- Normal = "1" # 正常
- Unsynchronized = "25" # 未同步
- Split = "26" # 分裂
- Invalid = "35" # 失效
- ForceStarted = "93" # 强制启动
+ Normal = "1" # 正常
+ Unsynchronized = "25" # 未同步
+ Split = "26" # 分裂
+ Invalid = "35" # 失效
+ ForceStarted = "93" # 强制启动
class FilesystemPairRunningStatus:
- Normal = "1" # 正常
- Synchronizing = "23" # 同步中
- Invalid = "35" # 失效
- Paused = "41" # 暂停
- ForceStarted = "93" # 强制启动
+ Normal = "1" # 正常
+ Synchronizing = "23" # 同步中
+ Invalid = "35" # 失效
+ Paused = "41" # 暂停
+ ForceStarted = "93" # 强制启动
ToBeSynchronized = "100" # 待同步
- Creating = "119" # 创建中
+ Creating = "119" # 创建中
class SecresAccess:
- AccessDenied = "1" # 禁止访问
- ReadOnly = "2" # 只读
- ReadAndWrite = "3" # 读写
+ AccessDenied = "1" # 禁止访问
+ ReadOnly = "2" # 只读
+ ReadAndWrite = "3" # 读写
class PoolStatus:
- PreCopy = "14" # 预拷贝
- Rebuilt = "16" # 重构
- Online = "27" # 在线
- Offline = "28" # 离线
- Balancing = "32" # 正在均衡
- Initializing = "53" # 初始化中
- Deleting = "106" # 删除中
+ PreCopy = "14" # 预拷贝
+ Rebuilt = "16" # 重构
+ Online = "27" # 在线
+ Offline = "28" # 离线
+ Balancing = "32" # 正在均衡
+ Initializing = "53" # 初始化中
+ Deleting = "106" # 删除中
class PoolHealth:
- Normal = "1" # 正常
- Faulty = "2" # 故障
- Degraded = "5" # 降级
+ Normal = "1" # 正常
+ Faulty = "2" # 故障
+ Degraded = "5" # 降级
class DomainAccess:
ReadAndWrite = "3" # 读写
- ReadOnly = "1" # 只读
\ No newline at end of file
+ ReadOnly = "1" # 只读
diff --git a/ct_om/service/ctcli/display_as_table.py b/ct_om/service/ctcli/display_as_table.py
index 2ab9b3213f50ec27a993f222c7ad240583aab1f5..a7c74e7c1443692f3317421e3cf6be96bd93d428 100644
--- a/ct_om/service/ctcli/display_as_table.py
+++ b/ct_om/service/ctcli/display_as_table.py
@@ -4,8 +4,12 @@ class DisplayAsTable:
@staticmethod
def print_exception_info(err_type, err_detail=""):
- print(("[result] command execution failed, [err_type] {}, [err_detail] {}, "
- "use [ctctl help] to get the commands we support.").format(err_type, err_detail))
+ print(
+ (
+ "[result] command execution failed, [err_type] {}, [err_detail] {}, "
+ "use [ctctl help] to get the commands we support."
+ ).format(err_type, err_detail)
+ )
@staticmethod
def print_info(dict_info, key_header, key_max_len):
@@ -13,11 +17,15 @@ class DisplayAsTable:
print("\r")
for idx, val in enumerate(key_header):
dict_item_size = key_max_len[val] + 4
- data_str = str(dict_item[val]).center(dict_item_size, '-' if dict_item[val] == '-' else ' ')
+ data_str = str(dict_item[val]).center(
+ dict_item_size, "-" if dict_item[val] == "-" else " "
+ )
icon = "|"
if dict_item[val] == "-":
icon = "+"
- data_str = (icon if idx == 0 else '') + data_str[1: len(data_str)] + icon
+ data_str = (
+ (icon if idx == 0 else "") + data_str[1 : len(data_str)] + icon
+ )
print(data_str, end="")
print("\r")
@@ -57,7 +65,7 @@ class DisplayAsTable:
for idx, val in enumerate(key_header):
item_size = key_max_len_dict.get(val) + 4
data_str = val.center(item_size)
- data_str = ('|' if idx == 0 else '') + data_str[1:len(data_str)] + '|'
+ data_str = ("|" if idx == 0 else "") + data_str[1 : len(data_str)] + "|"
print(data_str, end="")
self.print_info(list_info, list(key_header), key_max_len_dict)
diff --git a/ct_om/service/ctcli/handle_info.py b/ct_om/service/ctcli/handle_info.py
index 3fc3be29d75de93d463d530a9c9c2fd400f19d2d..1340506b2676f668ed2fee092d92918c50c12acb 100644
--- a/ct_om/service/ctcli/handle_info.py
+++ b/ct_om/service/ctcli/handle_info.py
@@ -27,16 +27,16 @@ class HandleInfo:
self.print_table = DisplayAsTable()
self.commands_info = json_data_reader(str(Path(cur_abs_path, "commands.json")))
self.input_process_method = {
- 'common': self.common_params_handler,
- 'logs_collection': self.logs_collection_handler
+ "common": self.common_params_handler,
+ "logs_collection": self.logs_collection_handler,
}
@staticmethod
def _reg_match_dir(log_dir):
- reg_string = r'(/[-\w~]+)'
+ reg_string = r"(/[-\w~]+)"
reg_res = re.findall(reg_string, log_dir)
- connector = ''.join(reg_res)
- return connector == log_dir or '{}/'.format(connector) == log_dir
+ connector = "".join(reg_res)
+ return connector == log_dir or "{}/".format(connector) == log_dir
@staticmethod
def basic_params_check(right_params, input_params):
@@ -56,7 +56,9 @@ class HandleInfo:
def format_param_check(self, input_params):
format_key = input_params.get("format")
if not format_key or format_key not in ("json", "table"):
- err_msg = "format param type must be one of ('json', table), not {}".format(format_key)
+ err_msg = "format param type must be one of ('json', table), not {}".format(
+ format_key
+ )
return err_msg, ABNORMAL_STATE
if format_key == "table":
self.has_format = True
@@ -67,18 +69,25 @@ class HandleInfo:
self.data_to_uds.update({"command": str(self.cmd)})
str_params = [param for param in params if "=" in param]
- params_to_uds = {param.split("=")[0]: param.split("=")[1] for param in str_params}
+ params_to_uds = {
+ param.split("=")[0]: param.split("=")[1] for param in str_params
+ }
- if self.commands_info.get(self.cmd).get("params check") == 'True':
- check_res, err_code = self.basic_params_check(self.commands_info.get(self.cmd).get("check value"),
- params_to_uds)
+ if self.commands_info.get(self.cmd).get("params check") == "True":
+ check_res, err_code = self.basic_params_check(
+ self.commands_info.get(self.cmd).get("check value"), params_to_uds
+ )
if err_code:
- return self._params_exception_handler(err_type="params name check error", err_detail=check_res)
+ return self._params_exception_handler(
+ err_type="params name check error", err_detail=check_res
+ )
if "format" in params_to_uds:
check_res, err_code = self.format_param_check(params_to_uds)
if err_code:
- return self._params_exception_handler(err_type="format param error", err_detail=check_res)
+ return self._params_exception_handler(
+ err_type="format param error", err_detail=check_res
+ )
params_to_uds.pop("format")
@@ -86,41 +95,66 @@ class HandleInfo:
return self.data_to_uds, NORMAL_STATE
def logs_collection_handler(self, input_params):
- params_item = [(item_idx, item_val) for item_idx, item_val in enumerate(input_params) if '=' in item_val]
+ params_item = [
+ (item_idx, item_val)
+ for item_idx, item_val in enumerate(input_params)
+ if "=" in item_val
+ ]
if len(params_item) > 2 or len(params_item) <= 1:
- return self._params_exception_handler(err_type="wrong logs collection params number",
- err_detail="The input parameter names "
- "can only be 'log_dir' and 'type'")
+ return self._params_exception_handler(
+ err_type="wrong logs collection params number",
+ err_detail="The input parameter names "
+ "can only be 'log_dir' and 'type'",
+ )
head_idx, tail_idx = params_item[0][0], params_item[-1][0]
if (tail_idx - head_idx) > 1:
- err_msg = "input log_dir '{}' is invalid".format(" ".join(input_params[head_idx:tail_idx]))
- return self._params_exception_handler(err_type="invalid input log dir", err_detail=err_msg)
+ err_msg = "input log_dir '{}' is invalid".format(
+ " ".join(input_params[head_idx:tail_idx])
+ )
+ return self._params_exception_handler(
+ err_type="invalid input log dir", err_detail=err_msg
+ )
log_dir_param = input_params[head_idx]
- type_param = ''.join(input_params[tail_idx:])
- params_to_uds = {param.split("=")[0]: param.split("=")[1] for param in [log_dir_param, type_param]}
+ type_param = "".join(input_params[tail_idx:])
+ params_to_uds = {
+ param.split("=")[0]: param.split("=")[1]
+ for param in [log_dir_param, type_param]
+ }
- check_res, err_code = self.basic_params_check(self.commands_info.get(self.cmd).get("check value"),
- params_to_uds.keys())
+ check_res, err_code = self.basic_params_check(
+ self.commands_info.get(self.cmd).get("check value"), params_to_uds.keys()
+ )
if err_code:
- return self._params_exception_handler(err_type="params name check error", err_detail=check_res)
+ return self._params_exception_handler(
+ err_type="params name check error", err_detail=check_res
+ )
- log_dir, log_collection_type = params_to_uds.get('log_dir'), params_to_uds.get('type')
+ log_dir, log_collection_type = params_to_uds.get("log_dir"), params_to_uds.get(
+ "type"
+ )
if not self._reg_match_dir(log_dir):
- return self._params_exception_handler(err_type="input log dir invalid",
- err_detail="input log dir '{}' invalid".format(log_dir))
- if log_collection_type not in ('all', 'recent'):
- return self._params_exception_handler(err_type="logs collection type error",
- err_detail="logs collection type must be one of [all, recent]"
- " not '{}'".format(log_collection_type))
+ return self._params_exception_handler(
+ err_type="input log dir invalid",
+ err_detail="input log dir '{}' invalid".format(log_dir),
+ )
+ if log_collection_type not in ("all", "recent"):
+ return self._params_exception_handler(
+ err_type="logs collection type error",
+ err_detail="logs collection type must be one of [all, recent]"
+ " not '{}'".format(log_collection_type),
+ )
self.data_to_uds = {"command": self.cmd, "param": params_to_uds}
return self.data_to_uds, NORMAL_STATE
def is_unilateral_execution(self):
"""commands processed only on the ctcli side"""
- info_dict = {cmd: cmd_detail.get("description") for cmd, cmd_detail in self.commands_info.items()}
+ info_dict = {
+ cmd: cmd_detail.get("description")
+ for cmd, cmd_detail in self.commands_info.items()
+ }
self.print_table.display_single_table(info_dict, mode="help")
def input_params_handler(self, params):
@@ -132,11 +166,13 @@ class HandleInfo:
if not params_item:
self.cmd = " ".join(params)
else:
- self.cmd = " ".join(params[:params_item[0][0]])
+ self.cmd = " ".join(params[: params_item[0][0]])
if self.cmd not in self.commands_info:
- return self._params_exception_handler(err_type="input commands error",
- err_detail="'ctctl {}' not supported".format(self.cmd))
+ return self._params_exception_handler(
+ err_type="input commands error",
+ err_detail="'ctctl {}' not supported".format(self.cmd),
+ )
handler_method = self.commands_info.get(self.cmd).get("handler")
return self.input_process_method.get(handler_method)(params)
@@ -155,9 +191,11 @@ class HandleInfo:
else:
self.receipt_data_processing(receipt_info.get("data"))
else:
- err_msg = "[ctctl {}] may fail, the receipt info is {}, which is an empty string, " \
- "does not conform to the format returned by ctmgr, " \
- "check logs for more details.".format(self.cmd, ctmgr_info)
+ err_msg = (
+ "[ctctl {}] may fail, the receipt info is {}, which is an empty string, "
+ "does not conform to the format returned by ctmgr, "
+ "check logs for more details.".format(self.cmd, ctmgr_info)
+ )
self.print_table.display_table(err_msg)
def receipt_data_processing(self, data_info):
diff --git a/ct_om/service/ctcli/main.py b/ct_om/service/ctcli/main.py
index 2e7471c4ee18f7a37f30fadbb0f16c365469099a..6ffdbfa95fda8e0ac31262eda29f836235936f72 100644
--- a/ct_om/service/ctcli/main.py
+++ b/ct_om/service/ctcli/main.py
@@ -11,6 +11,6 @@ def main(params):
handle_info.receipt_info_handler(receive_data)
-if __name__ == '__main__':
+if __name__ == "__main__":
input_params = sys.argv[1:]
main(input_params)
diff --git a/ct_om/service/ctcli/params_factory/param_config.py b/ct_om/service/ctcli/params_factory/param_config.py
index b45c4cc7c1cb6bff462e02032e91faea83151b64..10896d33a90ecd1779d76de163dfda8090618e7d 100644
--- a/ct_om/service/ctcli/params_factory/param_config.py
+++ b/ct_om/service/ctcli/params_factory/param_config.py
@@ -9,18 +9,22 @@ def log_query(log_path, *args):
if args[0]:
return info
tmp = json.dumps(info)
- tmp = "{" + tmp[1: len(tmp) - 1] + "}"
+ tmp = "{" + tmp[1 : len(tmp) - 1] + "}"
return tmp
def collection_logs(receipt_info, *args):
if not receipt_info:
- return "[ctctl collection logs] may fail, the receipt info is {}, " \
- "which is an empty string".format(receipt_info)
+ return (
+ "[ctctl collection logs] may fail, the receipt info is {}, "
+ "which is an empty string".format(receipt_info)
+ )
if "timed out" in receipt_info:
- return "start log collection successful, " \
- "use [ctctl logs progress query] to get current collection progress"
+ return (
+ "start log collection successful, "
+ "use [ctctl logs progress query] to get current collection progress"
+ )
return receipt_info
@@ -29,9 +33,11 @@ def logs_progress_query(log_progress_path, *args):
if log_progress_path:
load_info = json_file_reader(log_progress_path)
if not load_info:
- return "no log collection information, " \
- "may be the log is still being generated or " \
- "no log collection has been performed"
+ return (
+ "no log collection information, "
+ "may be the log is still being generated or "
+ "no log collection has been performed"
+ )
return [json.loads(item) for item in load_info.split("\n") if item]
@@ -42,5 +48,5 @@ PARAM_PREPARE = {
"help": "direct execution",
"log query": log_query,
"collection logs": collection_logs,
- "logs progress query": logs_progress_query
+ "logs progress query": logs_progress_query,
}
diff --git a/ct_om/service/ctcli/params_factory/tools.py b/ct_om/service/ctcli/params_factory/tools.py
index 17e0b7ea8b0eb7bf95075dc4040c7e7da4c35a08..d2511a7f4acf0afaac560afdbc349258bc8b01f7 100644
--- a/ct_om/service/ctcli/params_factory/tools.py
+++ b/ct_om/service/ctcli/params_factory/tools.py
@@ -14,13 +14,17 @@ def table_log_reader(log_path, size=0):
tmp_list_info = [item.split(",") for item in ori_log_info.split("\n") if item]
key_set = ("client_pid", "client_uid", "command", "running_status", "begin_time")
- info_dict = [{item.split("=")[0].strip(): item.split("=")[1][1:-1]
- for item in log
- if item.split("=")[0].strip() in key_set}
- for log in tmp_list_info]
+ info_dict = [
+ {
+ item.split("=")[0].strip(): item.split("=")[1][1:-1]
+ for item in log
+ if item.split("=")[0].strip() in key_set
+ }
+ for log in tmp_list_info
+ ]
if size:
res_size = min(size, len(info_dict))
- return info_dict[len(info_dict) - res_size:]
+ return info_dict[len(info_dict) - res_size :]
return info_dict
diff --git a/ct_om/service/ctcli/uds_client.py b/ct_om/service/ctcli/uds_client.py
index 892eebbf1f3faa22eb9776887445064c786d8c03..02f785a7f15eff23c6f6ca173e4da8531d10b2a5 100644
--- a/ct_om/service/ctcli/uds_client.py
+++ b/ct_om/service/ctcli/uds_client.py
@@ -1,7 +1,7 @@
import json
import socket
-SERVER_ADDRESS = '/opt/cantian/ct_om/service/ct_om.sock'
+SERVER_ADDRESS = "/opt/cantian/ct_om/service/ct_om.sock"
SOCKET_TYPE = socket.SOCK_STREAM
SOCKET_FAMILY = socket.AF_UNIX
RECEIVE_DATA_SIZE = 1024
diff --git a/ct_om/service/ctmgr/checker/collection_logs_checker.py b/ct_om/service/ctmgr/checker/collection_logs_checker.py
index 8fdcf8a13d68e24412f3aa5ad01f12e301407315..57441bed9a8e389b09da873734c2c3817f2b8e86 100644
--- a/ct_om/service/ctmgr/checker/collection_logs_checker.py
+++ b/ct_om/service/ctmgr/checker/collection_logs_checker.py
@@ -5,9 +5,13 @@ class LogEnumCheck:
@staticmethod
def check(input_params_dict):
- _type = input_params_dict.get('type')
- if _type not in ('recent', 'all'):
- TASK_LOG.error('collection logs fail, type must be recent or all, type is {}'.format(_type))
+ _type = input_params_dict.get("type")
+ if _type not in ("recent", "all"):
+ TASK_LOG.error(
+ "collection logs fail, type must be recent or all, type is {}".format(
+ _type
+ )
+ )
return False
return True
diff --git a/ct_om/service/ctmgr/checkers.py b/ct_om/service/ctmgr/checkers.py
index cd99783100944bca26ca803e9e936380f382f401..031add631db74903c962b3098a244ed04aeb9a5d 100644
--- a/ct_om/service/ctmgr/checkers.py
+++ b/ct_om/service/ctmgr/checkers.py
@@ -7,7 +7,7 @@ def check_none(value):
def check_required(input_dict, check_item, check_rule):
- check_value = input_dict.get(check_item, '')
+ check_value = input_dict.get(check_item, "")
if check_rule:
return len(str(check_value)) > 0
@@ -16,13 +16,13 @@ def check_required(input_dict, check_item, check_rule):
def check_type(input_dict, check_item, check_rule):
type_list = {
- 'int': int,
- 'string': str,
- 'list': list,
- 'tuple': tuple,
- 'dict': dict,
- 'float': float,
- 'bool': bool,
+ "int": int,
+ "string": str,
+ "list": list,
+ "tuple": tuple,
+ "dict": dict,
+ "float": float,
+ "bool": bool,
}
check_value = input_dict.get(check_item, None)
if check_none(check_value):
@@ -58,7 +58,7 @@ def check_enum(input_dict, check_item, check_rule):
if check_none(check_value):
return True
- enum_list = check_rule.split('|')
+ enum_list = check_rule.split("|")
if isinstance(check_value, bool):
check_value = str(check_value).upper()
@@ -72,7 +72,7 @@ def check_int_range(input_dict, check_item, check_rule):
if check_none(check_value):
return True
- min_value, max_value = [int(x) for x in check_rule.split('~')]
+ min_value, max_value = [int(x) for x in check_rule.split("~")]
return min_value <= int(check_value) <= max_value
@@ -82,7 +82,7 @@ def check_str_length_range(input_dict, check_item, check_rule):
if check_none(check_value):
return True
- min_value, max_value = [int(x) for x in check_rule.split('~')]
+ min_value, max_value = [int(x) for x in check_rule.split("~")]
return min_value <= len(str(check_value)) <= max_value
@@ -127,10 +127,12 @@ def check_str_list_range(input_dict, check_item, check_rule):
if check_none(check_value):
return True
- if not check_str_list(input_dict=input_dict, check_item=check_item, check_rule=check_rule):
+ if not check_str_list(
+ input_dict=input_dict, check_item=check_item, check_rule=check_rule
+ ):
return False
- min_value, max_value = [int(x) for x in check_rule.split('~')]
+ min_value, max_value = [int(x) for x in check_rule.split("~")]
return min_value <= len(check_value) <= max_value
@@ -164,10 +166,12 @@ def check_int_list_range(input_dict, check_item, check_rule):
if check_none(check_value):
return True
- if not check_int_list(input_dict=input_dict, check_item=check_item, check_rule=check_rule):
+ if not check_int_list(
+ input_dict=input_dict, check_item=check_item, check_rule=check_rule
+ ):
return False
- min_value, max_value = [int(x) for x in check_rule.split('~')]
+ min_value, max_value = [int(x) for x in check_rule.split("~")]
return min_value <= len(check_value) <= max_value
@@ -191,7 +195,7 @@ def check_array_length(input_dict, check_item, check_rule):
if not isinstance(check_value, list):
return False
- min_value, max_value = [int(x) for x in check_rule.split('~')]
+ min_value, max_value = [int(x) for x in check_rule.split("~")]
return min_value <= len(check_value) <= max_value
@@ -210,5 +214,5 @@ CHECKER = {
"intList": check_int_list,
"intListRange": check_int_list_range,
"nonRepeatList": check_non_repeat_list,
- "arrayLength": check_array_length
-}
\ No newline at end of file
+ "arrayLength": check_array_length,
+}
diff --git a/ct_om/service/ctmgr/common/common_tool.py b/ct_om/service/ctmgr/common/common_tool.py
index 8c7b4be5bb9e12c71c904a3b936af5d4add9fd9e..1544884b6972b683cef68f234fbfd7f891568acc 100644
--- a/ct_om/service/ctmgr/common/common_tool.py
+++ b/ct_om/service/ctmgr/common/common_tool.py
@@ -5,4 +5,4 @@ class TimeTool:
@staticmethod
def get_current_time():
- return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
diff --git a/ct_om/service/ctmgr/common/output_tool.py b/ct_om/service/ctmgr/common/output_tool.py
index 25b663c3d3b6e2ef4ce7d8c1fa7886efd7da3df9..acc162b376ab39fb0c5991dc9ccf20772b30c385 100644
--- a/ct_om/service/ctmgr/common/output_tool.py
+++ b/ct_om/service/ctmgr/common/output_tool.py
@@ -22,13 +22,8 @@ class CommonResult:
def __str__(self):
result = {
- "data": {
- "ctmgr_common_output": self.output_data
- },
- "error": {
- "code": int(self.error_code),
- "description": self.description
- }
+ "data": {"ctmgr_common_output": self.output_data},
+ "error": {"code": int(self.error_code), "description": self.description},
}
return json.dumps(result)
@@ -43,7 +38,7 @@ class CommonResult:
self.description = str(description)
-if __name__ == '__main__':
+if __name__ == "__main__":
res = CommonResult()
res.set_output_data("task_obj.task_execute(input_params_dict)")
res.set_output_data("doing XXX success")
diff --git a/ct_om/service/ctmgr/log_tool/om_log.py b/ct_om/service/ctmgr/log_tool/om_log.py
index 079a14df05b04a39acf0ff970ca94fb8e676be13..52752857f25c7acf4005ef5c49ec155ad7b3d944 100644
--- a/ct_om/service/ctmgr/log_tool/om_log.py
+++ b/ct_om/service/ctmgr/log_tool/om_log.py
@@ -10,10 +10,26 @@ LOG_DIR_MODE_OCT = 0o700
LOG_FILE_MODE = 640
LOG_FILE_MODE_OCT = 0o640
SENSITIVE_STR = [
- 'Password', 'passWord', 'PASSWORD', 'password', 'Pswd',
- 'PSWD', 'pwd', 'signature', 'HmacSHA256', 'newPasswd',
- 'private', 'certfile', 'secret', 'token', 'Token', 'pswd',
- 'passwd', 'mysql -u', 'session', 'cookie'
+ "Password",
+ "passWord",
+ "PASSWORD",
+ "password",
+ "Pswd",
+ "PSWD",
+ "pwd",
+ "signature",
+ "HmacSHA256",
+ "newPasswd",
+ "private",
+ "certfile",
+ "secret",
+ "token",
+ "Token",
+ "pswd",
+ "passwd",
+ "mysql -u",
+ "session",
+ "cookie",
]
@@ -29,7 +45,7 @@ def _get_log_file_path(project):
return os.path.join(logger_dir, "{}.log".format(project))
- return ''
+ return ""
class DefaultLogFilter(logging.Filter):
@@ -53,8 +69,10 @@ def setup(project_name):
log_path = _get_log_file_path(project_name)
if log_path:
file_log = handlers.RotatingFileHandler(
- log_path, maxBytes=log_config.get("log_file_max_size"),
- backupCount=log_config.get("log_file_backup_count"))
+ log_path,
+ maxBytes=log_config.get("log_file_max_size"),
+ backupCount=log_config.get("log_file_backup_count"),
+ )
log_root.addHandler(file_log)
log_root.addFilter(DefaultLogFilter())
@@ -65,7 +83,9 @@ def setup(project_name):
handler.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")))
+ datefmt=log_config.get("log_date_format"),
+ )
+ )
if log_config.get("debug"):
log_root.setLevel(logging.DEBUG)
@@ -84,7 +104,10 @@ class LOGGER:
"""
审计日志需要参数: client_pid, client_uid, cmd, result, begin_time, finish_time
"""
- def __init__(self, client_pid, client_uid, begin_time, finish_time='', request_time=''):
+
+ def __init__(
+ self, client_pid, client_uid, begin_time, finish_time="", request_time=""
+ ):
self.begin_time = begin_time
self.request_time = request_time
self.finish_time = finish_time
@@ -98,9 +121,18 @@ class LOGGER:
self.finish_time = finish_time
def format_log_message(self, cmd, result):
- return 'client_pid=[{}],client_uid=[{}],command=[{}],running_status=[{}],begin_time=[{}],request_time=[{}],' \
- 'finish_time=[{}]'.format(
- self.client_pid, self.client_uid, cmd, result, self.begin_time, self.request_time, self.finish_time)
+ return (
+ "client_pid=[{}],client_uid=[{}],command=[{}],running_status=[{}],begin_time=[{}],request_time=[{}],"
+ "finish_time=[{}]".format(
+ self.client_pid,
+ self.client_uid,
+ cmd,
+ result,
+ self.begin_time,
+ self.request_time,
+ self.finish_time,
+ )
+ )
def info(self, cmd, result):
LOG.info(self.format_log_message(cmd, result))
diff --git a/ct_om/service/ctmgr/log_tool/om_log_config.py b/ct_om/service/ctmgr/log_tool/om_log_config.py
index d34ba51c34b6b302acab627344707b31db612f7b..fc493757696f6db6c90c06d59bc8720cd711d4c1 100644
--- a/ct_om/service/ctmgr/log_tool/om_log_config.py
+++ b/ct_om/service/ctmgr/log_tool/om_log_config.py
@@ -8,15 +8,15 @@ CONSOLE_CONF = {
"log": {
"use_syslog": False,
"debug": False,
- "log_dir": str(Path('{}/../ctmgr_log'.format(str(dir_name)))),
+ "log_dir": str(Path("{}/../ctmgr_log".format(str(dir_name)))),
"log_file_max_size": 1048576,
"log_file_backup_count": 5,
"log_date_format": "%Y-%m-%d %H:%M:%S",
"logging_default_format_string": "time=[%(asctime)s],level=[%(levelname)s],pid=[%(process)d],"
- "thread=[%(threadName)s],tid=[%(thread)d],"
- "file=[%(filename)s:%(lineno)d %(funcName)s],%(message)s",
+ "thread=[%(threadName)s],tid=[%(thread)d],"
+ "file=[%(filename)s:%(lineno)d %(funcName)s],%(message)s",
"logging_context_format_string": "time=[%(asctime)s],level=[%(levelname)s],pid=[%(process)d],"
- "thread=[%(threadName)s],tid=[%(thread)d],"
- "file=[%(filename)s:%(lineno)d %(funcName)s],%(message)s"
+ "thread=[%(threadName)s],tid=[%(thread)d],"
+ "file=[%(filename)s:%(lineno)d %(funcName)s],%(message)s",
}
}
diff --git a/ct_om/service/ctmgr/logs_collection/execute.py b/ct_om/service/ctmgr/logs_collection/execute.py
index 6c01b65ed0bd0b89732d04d13bc41020585ac7a5..779a8ad29eac3e3a1bdc8fc950c911722b3db6c7 100644
--- a/ct_om/service/ctmgr/logs_collection/execute.py
+++ b/ct_om/service/ctmgr/logs_collection/execute.py
@@ -5,16 +5,20 @@ import argparse
from functools import wraps
from pathlib import Path
-sys.path.append('/opt/cantian/ct_om/service')
-sys.path.append('/opt/cantian/ct_om/service/ctmgr')
+sys.path.append("/opt/cantian/ct_om/service")
+sys.path.append("/opt/cantian/ct_om/service/ctmgr")
from ctmgr.logs_collection.tools import LockFile
from ctmgr.log_tool.om_log import LOGS_COLLECTION as LOG
from ctmgr.logs_collection.logs_collection import LogsCollection
-parser = argparse.ArgumentParser(description='Required parameters for running this script')
-parser.add_argument('--path', '-p', help='path to save all tar.gz bags', required=True)
-parser.add_argument('--type', '-t', help='recent: recent logs, all: all logs', required=True)
+parser = argparse.ArgumentParser(
+ description="Required parameters for running this script"
+)
+parser.add_argument("--path", "-p", help="path to save all tar.gz bags", required=True)
+parser.add_argument(
+ "--type", "-t", help="recent: recent logs, all: all logs", required=True
+)
p_args = parser.parse_args()
cur_abs_path, _ = os.path.split(os.path.abspath(__file__))
@@ -24,6 +28,7 @@ def exter_attack(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
+
return wrapper
@@ -36,13 +41,17 @@ def main(target_path, mode):
try:
lock_file.lock(file_handler)
except IOError:
- LOG.info('there is already a log collection process, another process[pid:{}]'
- 'try to lock the file, but failed.'.format(os.getpid()))
- print('there is already a log collection process, please try again later...')
+ LOG.info(
+ "there is already a log collection process, another process[pid:{}]"
+ "try to lock the file, but failed.".format(os.getpid())
+ )
+ print("there is already a log collection process, please try again later...")
else:
- LOG.info('success to lock the file, current process id:{}'.format(os.getpid()))
+ LOG.info("success to lock the file, current process id:{}".format(os.getpid()))
execute(mode, target_path)
- print('log collection ends, use [ctctl logs progress query] to get log collection details')
+ print(
+ "log collection ends, use [ctctl logs progress query] to get log collection details"
+ )
finally:
lock_file.unlock(file_handler)
file_handler.close()
diff --git a/ct_om/service/ctmgr/logs_collection/logs_collection.py b/ct_om/service/ctmgr/logs_collection/logs_collection.py
index 295cb16df5fd98cee9baff154495abe28b1e7de5..c15ffd2e20d893735f914b559b090df61b029cab 100644
--- a/ct_om/service/ctmgr/logs_collection/logs_collection.py
+++ b/ct_om/service/ctmgr/logs_collection/logs_collection.py
@@ -16,7 +16,7 @@ cur_abs_path, _ = os.path.split(os.path.abspath(__file__))
def json_data_reader(data_path):
- with open(data_path, 'r', encoding='utf-8') as file:
+ with open(data_path, "r", encoding="utf-8") as file:
info = file.read()
return json.loads(info)
@@ -32,16 +32,24 @@ class LogsCollection:
self.max_gather_vol = 0
self.sh_task_time_out = 600
self.record_progress = RecordLogPackingProgress(cur_abs_path)
- self.config_info = json_data_reader(str(Path(cur_abs_path, 'config.json')))
+ self.config_info = json_data_reader(str(Path(cur_abs_path, "config.json")))
@staticmethod
def get_cur_timestamp(flag=None):
utc_now = datetime.utcnow()
- if flag == 'name':
- return utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None).strftime('%Y%m%d%H%M%S')
+ if flag == "name":
+ return (
+ utc_now.replace(tzinfo=timezone.utc)
+ .astimezone(tz=None)
+ .strftime("%Y%m%d%H%M%S")
+ )
- return utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None).strftime('%Y-%m-%d %H:%M:%S')
+ return (
+ utc_now.replace(tzinfo=timezone.utc)
+ .astimezone(tz=None)
+ .strftime("%Y-%m-%d %H:%M:%S")
+ )
@staticmethod
def path_authority_judgment(file_path):
@@ -56,7 +64,7 @@ class LogsCollection:
@staticmethod
def reg_handler(log_name_prefix, match_string):
"""匹配出待采集的文件"""
- if match_string.startswith(log_name_prefix) and match_string.endswith('tar.gz'):
+ if match_string.startswith(log_name_prefix) and match_string.endswith("tar.gz"):
return True
return False
@@ -68,13 +76,13 @@ class LogsCollection:
LOG.info("Modify file mode before collection.")
cmd = "sudo /opt/cantian/action/change_log_priority.sh"
res = self.shell_task(cmd, "modify file mode")
- if res != 'success':
+ if res != "success":
err_msg = "Modify files mode failed."
LOG.error(err_msg)
LOG.info("Modify file success.")
def packing_files(self, log_file_list, tar_name, mode):
- """ 将当前模块日志归档为一个压缩文件
+ """将当前模块日志归档为一个压缩文件
:param log_file_list: 当前模块日志列表
:param tar_name: 压缩文件名
@@ -84,23 +92,29 @@ class LogsCollection:
for log_file in log_file_list:
log_names.append(log_file)
log_directory, log_name = os.path.split(log_file)
- log_name_prefix = log_name.split('.')[0]
- archive_logs = [(item, get_file_creation_time(str(Path(log_directory, item))))
- for item in os.listdir(log_directory)
- if self.reg_handler(log_name_prefix, item)]
+ log_name_prefix = log_name.split(".")[0]
+ archive_logs = [
+ (item, get_file_creation_time(str(Path(log_directory, item))))
+ for item in os.listdir(log_directory)
+ if self.reg_handler(log_name_prefix, item)
+ ]
# 近期日志采集场景,仅采集实时日志和归档日志
- if mode == 'recent':
+ if mode == "recent":
archive_logs.sort(key=lambda x: (x[1], x[0]), reverse=True)
cur_size = int(os.path.getsize(str(Path(log_directory, log_name))))
for archive_log_name, _ in archive_logs:
if cur_size >= self.max_gather_vol:
break
- cur_size += int(os.path.getsize(str(Path(log_directory, archive_log_name))))
+ cur_size += int(
+ os.path.getsize(str(Path(log_directory, archive_log_name)))
+ )
log_names.append(os.path.join(log_directory, archive_log_name))
else:
- log_names.extend([os.path.join(log_directory, name) for name, _ in archive_logs])
+ log_names.extend(
+ [os.path.join(log_directory, name) for name, _ in archive_logs]
+ )
- with tarfile.open(f'{tar_name}', 'w:gz') as tar:
+ with tarfile.open(f"{tar_name}", "w:gz") as tar:
for pack_name in log_names:
tar.add(pack_name)
@@ -111,39 +125,61 @@ class LogsCollection:
:param task_type: 用于区分当前方法的用途
:return: 字符串,'success' or 'fail'
"""
- res_state = 'success'
+ res_state = "success"
try:
- proc = subprocess.Popen(shlex.split(spec_cmd), stdout=subprocess.PIPE, shell=False)
+ proc = subprocess.Popen(
+ shlex.split(spec_cmd), stdout=subprocess.PIPE, shell=False
+ )
except Exception as err:
- LOG.error('[{}] execute cmd: {} failed, err_msg: {}'.format(task_type, spec_cmd, err))
- return 'fail'
+ LOG.error(
+ "[{}] execute cmd: {} failed, err_msg: {}".format(
+ task_type, spec_cmd, err
+ )
+ )
+ return "fail"
try:
res, state = proc.communicate(timeout=self.sh_task_time_out)
except Exception as err:
- LOG.error('[{}] execute cmd: {} failed, err_msg: {}'.format(task_type, spec_cmd, err))
- return 'fail'
+ LOG.error(
+ "[{}] execute cmd: {} failed, err_msg: {}".format(
+ task_type, spec_cmd, err
+ )
+ )
+ return "fail"
if state:
- LOG.error('[{}] execute cmd: {} failed, res_err: {}'.format(task_type, spec_cmd, res.decode('utf-8')))
- res_state = 'fail'
+ LOG.error(
+ "[{}] execute cmd: {} failed, res_err: {}".format(
+ task_type, spec_cmd, res.decode("utf-8")
+ )
+ )
+ res_state = "fail"
else:
- LOG.info('[{}] execute cmd: {} succeed, res_msg: {}'.format(task_type, spec_cmd, res.decode('utf-8')))
+ LOG.info(
+ "[{}] execute cmd: {} succeed, res_msg: {}".format(
+ task_type, spec_cmd, res.decode("utf-8")
+ )
+ )
return res_state
def generate_logs(self, item, main_path, mode):
res = []
- prefix_to_type = {'py': 'python3', 'sh': 'sh'}
- script_type, script_path = item.get('script_type'), item.get('script_path')
- size = item.get('size')
- file_dir = item.get('dir')
- tar_name = item.get('tar_name')
+ prefix_to_type = {"py": "python3", "sh": "sh"}
+ script_type, script_path = item.get("script_type"), item.get("script_path")
+ size = item.get("size")
+ file_dir = item.get("dir")
+ tar_name = item.get("tar_name")
if mode == "recent":
file_dir = file_dir + "| head -n %s" % size
- cmd = (prefix_to_type.get(script_type) + ' ' + script_path) % (main_path, file_dir, tar_name)
- state = self.shell_task(cmd, 'generate logs')
+ cmd = (prefix_to_type.get(script_type) + " " + script_path) % (
+ main_path,
+ file_dir,
+ tar_name,
+ )
+ state = self.shell_task(cmd, "generate logs")
res.append(state)
return res
@@ -152,8 +188,12 @@ class LogsCollection:
if os.path.exists(target_path):
self.removing_dirs(target_path)
- self.record_progress.record_cur_progress((cur_time, cur_time), 'for details see logs',
- 'for details see logs', ('fail', 'time_out', 'None'))
+ self.record_progress.record_cur_progress(
+ (cur_time, cur_time),
+ "for details see logs",
+ "for details see logs",
+ ("fail", "time_out", "None"),
+ )
def removing_dirs(self, dir_to_remove):
"""递归删除包含输入目录在内的所有文件
@@ -171,44 +211,65 @@ class LogsCollection:
os.remove(dir_to_remove)
def sub_module_packing(self, item, main_path, idx, mode):
- start_time, collect_state = self.get_cur_timestamp(), 'done'
+ start_time, collect_state = self.get_cur_timestamp(), "done"
- self.max_gather_vol = int(item.get('size')) * self.packing_ratio * pow(1024, 2)
- tar_name, generate_type = item.get('tar_name'), item.get('generate_type')
+ self.max_gather_vol = int(item.get("size")) * self.packing_ratio * pow(1024, 2)
+ tar_name, generate_type = item.get("tar_name"), item.get("generate_type")
is_repeat_generate = item.get("is_repeat_generate", False)
log_name = None
if generate_type == "script generated":
- log_file_dir = item.get('dir')
+ log_file_dir = item.get("dir")
log_directory, log_name = os.path.split(log_file_dir)
gen_res = self.generate_logs(item, main_path, mode)
statistics_res = Counter(gen_res)
- LOG.info(f"[generate logs ends], end_time:{self.get_cur_timestamp()}, all:{len(gen_res)}, "
- f"success: {statistics_res.get('success', 0)}, fail: {statistics_res.get('fail', 0)}")
+ LOG.info(
+ f"[generate logs ends], end_time:{self.get_cur_timestamp()}, all:{len(gen_res)}, "
+ f"success: {statistics_res.get('success', 0)}, fail: {statistics_res.get('fail', 0)}"
+ )
else:
- log_file_list = [item.get('dir')] if not is_repeat_generate \
- else glob.glob(item.get('dir'), recursive=True)
+ log_file_list = (
+ [item.get("dir")]
+ if not is_repeat_generate
+ else glob.glob(item.get("dir"), recursive=True)
+ )
for log_file_dir in log_file_list:
# 分离日志目录和日志名
log_directory, log_name = os.path.split(log_file_dir)
- LOG.info('[submodule log collection starts] child_module: {}, '
- 'generate_type: {}'.format(log_name, generate_type))
+ LOG.info(
+ "[submodule log collection starts] child_module: {}, "
+ "generate_type: {}".format(log_name, generate_type)
+ )
if not os.path.exists(log_file_dir):
LOG.error(
- 'log_source_path: {} does not exist log collection failed and exited'.format(log_file_dir))
- self.record_cur_progress(("done", 'not exist'), (idx, generate_type, log_name), start_time)
+ "log_source_path: {} does not exist log collection failed and exited".format(
+ log_file_dir
+ )
+ )
+ self.record_cur_progress(
+ ("done", "not exist"),
+ (idx, generate_type, log_name),
+ start_time,
+ )
return False
- if not self.path_authority_judgment(log_file_dir) or not self.path_authority_judgment(log_directory):
+ if not self.path_authority_judgment(
+ log_file_dir
+ ) or not self.path_authority_judgment(log_directory):
LOG.error(
- "log_file_dir: '{}' or log_content: '{}' permission denied".format(log_file_dir, log_directory))
+ "log_file_dir: '{}' or log_content: '{}' permission denied".format(
+ log_file_dir, log_directory
+ )
+ )
self.pre_execute()
sub_module_tar_path = os.path.join(main_path, tar_name)
self.packing_files(log_file_list, sub_module_tar_path, mode)
- self.record_cur_progress((collect_state, 'None'), (idx, generate_type, log_name), start_time)
+ self.record_cur_progress(
+ (collect_state, "None"), (idx, generate_type, log_name), start_time
+ )
return True
def record_cur_progress(self, collect_state, submodule_info, start_time):
@@ -216,14 +277,20 @@ class LogsCollection:
idx, generate_type, name_pre = submodule_info
end_time = self.get_cur_timestamp()
try:
- cur_percent = str('%.2f' % ((idx + 1) * 100 / len(self.config_info))) + '%'
+ cur_percent = str("%.2f" % ((idx + 1) * 100 / len(self.config_info))) + "%"
except ZeroDivisionError:
- cur_percent = 'error'
- self.record_progress.record_cur_progress((start_time, end_time),
- name_pre, generate_type, (state, err_type, cur_percent))
-
- LOG.info('[submodule log collection ends] child_module: {}, pack_status: {}, '
- 'cur_progress: {}'.format(name_pre, state, cur_percent))
+ cur_percent = "error"
+ self.record_progress.record_cur_progress(
+ (start_time, end_time),
+ name_pre,
+ generate_type,
+ (state, err_type, cur_percent),
+ )
+
+ LOG.info(
+ "[submodule log collection ends] child_module: {}, pack_status: {}, "
+ "cur_progress: {}".format(name_pre, state, cur_percent)
+ )
@timeout(1800)
def execute(self, target_path, mode):
@@ -231,9 +298,11 @@ class LogsCollection:
target: 由用户指定的压缩包存放路径
mode: all:打包近期日志, recent:全量打包
"""
- LOG.info('[logs collection] starts')
+ LOG.info("[logs collection] starts")
self.pre_execute()
- LOG.info(f'[generate logs starts], start_time:{self.get_cur_timestamp()}, mode: {mode}')
+ LOG.info(
+ f"[generate logs starts], start_time:{self.get_cur_timestamp()}, mode: {mode}"
+ )
if not os.path.exists(target_path):
try:
@@ -245,8 +314,8 @@ class LogsCollection:
finally:
os.chmod(target_path, 0o750)
- time_stamp = self.get_cur_timestamp(flag='name')
- final_tar_file_name = 'cantian_log_{}.tar.gz'.format(time_stamp)
+ time_stamp = self.get_cur_timestamp(flag="name")
+ final_tar_file_name = "cantian_log_{}.tar.gz".format(time_stamp)
main_path = os.path.join(target_path, time_stamp)
try:
os.mkdir(main_path)
@@ -255,17 +324,25 @@ class LogsCollection:
LOG.error(err_msg)
raise IOError(err_msg) from err
- res = Counter([str(self.sub_module_packing(item, main_path, idx, mode))
- for idx, item in enumerate(self.config_info)])
+ res = Counter(
+ [
+ str(self.sub_module_packing(item, main_path, idx, mode))
+ for idx, item in enumerate(self.config_info)
+ ]
+ )
# 将各模块日志压缩为一个归档日志
final_tar_file_path = str(Path(target_path, final_tar_file_name))
os.chdir(main_path)
- with tarfile.open(f'{final_tar_file_path}', 'w:gz') as tar:
+ with tarfile.open(f"{final_tar_file_path}", "w:gz") as tar:
for file_name in os.listdir(main_path):
tar.add(file_name)
os.chdir(cur_abs_path)
# 修改归档日志权限
os.chmod(final_tar_file_path, 0o440)
self.removing_dirs(main_path)
- LOG.info('[logs collection ends], success: {}, fail: {}'.format(res.get('True', 0), res.get('False', 0)))
+ LOG.info(
+ "[logs collection ends], success: {}, fail: {}".format(
+ res.get("True", 0), res.get("False", 0)
+ )
+ )
diff --git a/ct_om/service/ctmgr/logs_collection/tools.py b/ct_om/service/ctmgr/logs_collection/tools.py
index bd9da6af9f83c650abe91aa7bfd1acb93b35619e..53c9342b65912a8fe520547accfdade7a0293fda 100644
--- a/ct_om/service/ctmgr/logs_collection/tools.py
+++ b/ct_om/service/ctmgr/logs_collection/tools.py
@@ -12,9 +12,14 @@ def timeout(sec):
@functools.wraps(func)
def wrapped_func(*args):
def handle_timeout(signum, frame):
- err_msg = 'err_type: execution timeout, err_msg: ' \
- 'cmd "ctctl collection logs" timed out after {} minutes'.format(sec // 60)
+ err_msg = (
+ "err_type: execution timeout, err_msg: "
+ 'cmd "ctctl collection logs" timed out after {} minutes'.format(
+ sec // 60
+ )
+ )
raise TimeoutError(err_msg)
+
signal.signal(signal.SIGALRM, handle_timeout)
signal.alarm(sec)
try:
@@ -22,12 +27,15 @@ def timeout(sec):
finally:
signal.alarm(0)
return result
+
return wrapped_func
+
return decorator
class LockFile:
"""持锁状态下对文件标识符进行修改"""
+
@staticmethod
def lock(handle):
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
@@ -45,7 +53,9 @@ class RecordLogPackingProgress:
def clear_original_data(self):
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(self.record_file, flags, modes), "w", encoding="utf-8") as file:
+ with os.fdopen(
+ os.open(self.record_file, flags, modes), "w", encoding="utf-8"
+ ) as file:
file.truncate()
def record_cur_progress(self, time_interval, module, generate_type, state):
@@ -55,12 +65,14 @@ class RecordLogPackingProgress:
info_dict = dict()
modes = stat.S_IWUSR | stat.S_IRUSR
flags = os.O_WRONLY | os.O_CREAT
- with os.fdopen(os.open(self.record_file, flags, modes), "a", encoding="utf-8") as file:
+ with os.fdopen(
+ os.open(self.record_file, flags, modes), "a", encoding="utf-8"
+ ) as file:
info_dict.update({"start_time": start_time})
info_dict.update({"end_time": end_time})
info_dict.update({"log_name": module})
info_dict.update({"generate_type": generate_type})
info_dict.update({"status": status})
- info_dict.update({"err_type": err_type})
+ info_dict.update({"err_type": err_type})
info_dict.update({"percentage": percentage})
file.write(json.dumps(info_dict) + "\n")
diff --git a/ct_om/service/ctmgr/task.py b/ct_om/service/ctmgr/task.py
index 668488f799953beb40ffc1f6dcccb2db74f05569..3c487dd8a6f9dacfc9a6e7306a954ca7be22f68d 100644
--- a/ct_om/service/ctmgr/task.py
+++ b/ct_om/service/ctmgr/task.py
@@ -15,14 +15,18 @@ task_dir = {} # 用于保存任务类,格式:{命令: 命令对应的类}
def build_uds_obj(recv_commend, commend_data):
- param_check = commend_data.get('paramCheck', {})
- rest_obj = UdsTask(task_name=recv_commend, handler='cantian', param_check_dict=param_check)
+ param_check = commend_data.get("paramCheck", {})
+ rest_obj = UdsTask(
+ task_name=recv_commend, handler="cantian", param_check_dict=param_check
+ )
- param_check = commend_data.get('checkFunc')
+ param_check = commend_data.get("checkFunc")
try:
check_func = get_check_func(param_check)
except Exception as error:
- DEPLOY_LOG.error('paramCheck format error in tasks.json element: {}'.format(recv_commend))
+ DEPLOY_LOG.error(
+ "paramCheck format error in tasks.json element: {}".format(recv_commend)
+ )
raise error
if check_func:
@@ -32,15 +36,22 @@ def build_uds_obj(recv_commend, commend_data):
def build_cmd_obj(recv_commend, commend_data):
- cmd_line = commend_data.get('cmd')
- param_check = commend_data.get('paramCheck', {})
- rest_obj = CmdTask(task_name=recv_commend, handler='cmd', cmd_line=cmd_line, param_check_dict=param_check)
-
- param_check = commend_data.get('checkFunc')
+ cmd_line = commend_data.get("cmd")
+ param_check = commend_data.get("paramCheck", {})
+ rest_obj = CmdTask(
+ task_name=recv_commend,
+ handler="cmd",
+ cmd_line=cmd_line,
+ param_check_dict=param_check,
+ )
+
+ param_check = commend_data.get("checkFunc")
try:
check_func = get_check_func(param_check)
except Exception as error:
- DEPLOY_LOG.error('paramCheck format error in tasks.json element: {}'.format(recv_commend))
+ DEPLOY_LOG.error(
+ "paramCheck format error in tasks.json element: {}".format(recv_commend)
+ )
raise error
if check_func:
@@ -50,17 +61,24 @@ def build_cmd_obj(recv_commend, commend_data):
def build_shell_obj(recv_commend, commend_data):
- file_path = commend_data.get('filePath')
- sh_input = commend_data.get('sh_input')
- param_check = commend_data.get('paramCheck', {})
- rest_obj = ShellTask(task_name=recv_commend, handler='shell', file_path=file_path, sh_input=sh_input,
- param_check_dict=param_check)
-
- param_check = commend_data.get('checkFunc')
+ file_path = commend_data.get("filePath")
+ sh_input = commend_data.get("sh_input")
+ param_check = commend_data.get("paramCheck", {})
+ rest_obj = ShellTask(
+ task_name=recv_commend,
+ handler="shell",
+ file_path=file_path,
+ sh_input=sh_input,
+ param_check_dict=param_check,
+ )
+
+ param_check = commend_data.get("checkFunc")
try:
check_func = get_check_func(param_check)
except Exception as error:
- DEPLOY_LOG.error('paramCheck format error in tasks.json element: {}'.format(recv_commend))
+ DEPLOY_LOG.error(
+ "paramCheck format error in tasks.json element: {}".format(recv_commend)
+ )
raise error
if check_func:
@@ -70,17 +88,24 @@ def build_shell_obj(recv_commend, commend_data):
def build_py_obj(recv_commend, commend_data):
- file_path = commend_data.get('filePath')
- py_input = commend_data.get('py_input')
- param_check = commend_data.get('paramCheck', {})
- rest_obj = PyTask(task_name=recv_commend, handler='py', file_path=file_path, py_input=py_input,
- param_check_dict=param_check)
-
- param_check = commend_data.get('checkFunc')
+ file_path = commend_data.get("filePath")
+ py_input = commend_data.get("py_input")
+ param_check = commend_data.get("paramCheck", {})
+ rest_obj = PyTask(
+ task_name=recv_commend,
+ handler="py",
+ file_path=file_path,
+ py_input=py_input,
+ param_check_dict=param_check,
+ )
+
+ param_check = commend_data.get("checkFunc")
try:
check_func = get_check_func(param_check)
except Exception as error:
- DEPLOY_LOG.error('paramCheck format error in tasks.json element: {}'.format(recv_commend))
+ DEPLOY_LOG.error(
+ "paramCheck format error in tasks.json element: {}".format(recv_commend)
+ )
raise error
if check_func:
@@ -90,46 +115,61 @@ def build_py_obj(recv_commend, commend_data):
def build_audit_obj(recv_commend, commend_data):
- file_path = commend_data.get('filePath')
- param_check = commend_data.get('paramCheck', {})
- rest_obj = AuditTask(task_name=recv_commend, handler='audit_py', file_path=file_path, py_input='',
- params_check_dict=param_check)
+ file_path = commend_data.get("filePath")
+ param_check = commend_data.get("paramCheck", {})
+ rest_obj = AuditTask(
+ task_name=recv_commend,
+ handler="audit_py",
+ file_path=file_path,
+ py_input="",
+ params_check_dict=param_check,
+ )
return rest_obj
def build_log_obj(recv_commend, commend_data):
- param_check = commend_data.get('paramCheck', {})
- rest_obj = QueryLogTask(task_name=recv_commend, handler='log', params_check_dict=param_check)
+ param_check = commend_data.get("paramCheck", {})
+ rest_obj = QueryLogTask(
+ task_name=recv_commend, handler="log", params_check_dict=param_check
+ )
return rest_obj
def build_inspection_obj(recv_commend, commend_data):
- param_check = commend_data.get('paramCheck', {})
- rest_odj = InspectionTask(task_name=recv_commend, handler="inspection_py", params_check_dict=param_check)
+ param_check = commend_data.get("paramCheck", {})
+ rest_odj = InspectionTask(
+ task_name=recv_commend, handler="inspection_py", params_check_dict=param_check
+ )
return rest_odj
def build_log_progress_query_obj(recv_commend, commend_data):
- param_check = commend_data.get('paramCheck', {})
- query_obj = QueryLogProgressTask(task_name=recv_commend, handler='log_progress', params_check_dict=param_check)
+ param_check = commend_data.get("paramCheck", {})
+ query_obj = QueryLogProgressTask(
+ task_name=recv_commend, handler="log_progress", params_check_dict=param_check
+ )
return query_obj
def build_logs_collection_obj(recv_commend, commend_data):
- log_path, input_param = commend_data.get('filePath'), commend_data.get('py_input')
- param_check = commend_data.get('paramCheck', {})
- logs_collect_obj = LogsCollectionTask(task_name=recv_commend,
- handler='log_py',
- file_path=log_path,
- py_input=input_param,
- params_check_dict=param_check)
-
- param_check = commend_data.get('checkFunc')
+ log_path, input_param = commend_data.get("filePath"), commend_data.get("py_input")
+ param_check = commend_data.get("paramCheck", {})
+ logs_collect_obj = LogsCollectionTask(
+ task_name=recv_commend,
+ handler="log_py",
+ file_path=log_path,
+ py_input=input_param,
+ params_check_dict=param_check,
+ )
+
+ param_check = commend_data.get("checkFunc")
try:
check_func = get_check_func(param_check)
except Exception as error:
- DEPLOY_LOG.error('paramCheck format error in tasks.json element: {}'.format(recv_commend))
+ DEPLOY_LOG.error(
+ "paramCheck format error in tasks.json element: {}".format(recv_commend)
+ )
raise error
if check_func:
@@ -139,48 +179,56 @@ def build_logs_collection_obj(recv_commend, commend_data):
def get_check_func(param_check):
- check_path = param_check.split('.')
+ check_path = param_check.split(".")
if len(check_path) == 2:
check_file_name, check_class_name = check_path
- check_func = getattr(import_module('checker.{}'.format(check_file_name)), check_class_name).check
+ check_func = getattr(
+ import_module("checker.{}".format(check_file_name)), check_class_name
+ ).check
return check_func
elif len(check_path) == 1 and not check_path[0]:
- return ''
+ return ""
else:
- DEPLOY_LOG.error('filePath: {} in tasks.json with error format'.format(param_check))
- raise Exception('filePath: {} in tasks.json with error format'.format(param_check))
+ DEPLOY_LOG.error(
+ "filePath: {} in tasks.json with error format".format(param_check)
+ )
+ raise Exception(
+ "filePath: {} in tasks.json with error format".format(param_check)
+ )
def load_tasks():
dir_name, _ = os.path.split(os.path.abspath(__file__))
- task_config = str(Path('{}/tasks.json'.format(dir_name)))
- with open(task_config, 'r', encoding='utf8') as file_path:
+ task_config = str(Path("{}/tasks.json".format(dir_name)))
+ with open(task_config, "r", encoding="utf8") as file_path:
json_data = json.load(file_path)
for recv_commend, commend_data in json_data.items():
- handler = commend_data.get('handler')
+ handler = commend_data.get("handler")
try:
- task_dir[recv_commend] = BUILD_OBJ_FUNCS.get(handler)(recv_commend, commend_data)
+ task_dir[recv_commend] = BUILD_OBJ_FUNCS.get(handler)(
+ recv_commend, commend_data
+ )
except Exception as error:
- DEPLOY_LOG.error('load task.json fail, error: {}'.format(error))
+ DEPLOY_LOG.error("load task.json fail, error: {}".format(error))
raise error
DEPLOY_LOG.info("load task.json success")
BUILD_OBJ_FUNCS = {
- 'cantian': build_uds_obj,
- 'cmd': build_cmd_obj,
- 'shell': build_shell_obj,
- 'py': build_py_obj,
- 'audit_py': build_audit_obj,
- 'log': build_log_obj,
- 'inspection_py': build_inspection_obj,
- 'log_progress': build_log_progress_query_obj,
- 'log_py': build_logs_collection_obj
+ "cantian": build_uds_obj,
+ "cmd": build_cmd_obj,
+ "shell": build_shell_obj,
+ "py": build_py_obj,
+ "audit_py": build_audit_obj,
+ "log": build_log_obj,
+ "inspection_py": build_inspection_obj,
+ "log_progress": build_log_progress_query_obj,
+ "log_py": build_logs_collection_obj,
}
-if __name__ == '__main__':
- input_params = {'key1': 'path/path/xxx.log', 'key2': 0}
+if __name__ == "__main__":
+ input_params = {"key1": "path/path/xxx.log", "key2": 0}
load_tasks()
diff --git a/ct_om/service/ctmgr/task_obj.py b/ct_om/service/ctmgr/task_obj.py
index 6fc0b3440a0580d9c0ae70f7aea7578438e49b64..5cd5362d67792890c08562f7ee238698d93309a6 100644
--- a/ct_om/service/ctmgr/task_obj.py
+++ b/ct_om/service/ctmgr/task_obj.py
@@ -10,8 +10,8 @@ from checkers import CHECKER
from common.output_tool import CommonResult
from common.common_tool import TimeTool
-FAIL = 'fail'
-SUCCESS = 'success'
+FAIL = "fail"
+SUCCESS = "success"
class TASK(metaclass=ABCMeta):
@@ -37,7 +37,9 @@ class TASK(metaclass=ABCMeta):
TASK_LOG.error("check function %s not exist" % param_key)
continue
- if not CHECKER.get(param_key)(input_params_dict, check_key, param_value):
+ if not CHECKER.get(param_key)(
+ input_params_dict, check_key, param_value
+ ):
TASK_LOG.error("%s check %s not pass" % (check_key, param_key))
return False
@@ -70,16 +72,20 @@ class UdsTask(TASK):
try:
res = uds_client.client_socket(output_data)
except Exception as error:
- result = CommonResult(output_data='cmd {} send uds request failed'.format(self.task_name), error_code=1,
- description='cmd {} send uds request failed \n [ERROR] {}'.format(
- self.task_name, error))
+ result = CommonResult(
+ output_data="cmd {} send uds request failed".format(self.task_name),
+ error_code=1,
+ description="cmd {} send uds request failed \n [ERROR] {}".format(
+ self.task_name, error
+ ),
+ )
return result
result.set_output_data(res)
task_logger.set_finish_time(TimeTool.get_current_time())
- error_code = eval(result.__str__()).get('error', {}).get('code', '')
- if str(error_code) != '0':
+ error_code = eval(result.__str__()).get("error", {}).get("code", "")
+ if str(error_code) != "0":
task_logger.error(self.task_name, FAIL)
else:
task_logger.info(self.task_name, SUCCESS)
@@ -101,22 +107,31 @@ class CmdTask(TASK):
result = CommonResult()
for param_key in input_params.keys():
- self.cmd_line = self.cmd_line.replace('${%s}' % param_key, str(input_params[param_key]))
+ self.cmd_line = self.cmd_line.replace(
+ "${%s}" % param_key, str(input_params[param_key])
+ )
task_logger.set_request_time(TimeTool.get_current_time())
try:
- command_result = subprocess.Popen(self.cmd_line.split(' '), stdout=subprocess.PIPE, shell=False)
+ command_result = subprocess.Popen(
+ self.cmd_line.split(" "), stdout=subprocess.PIPE, shell=False
+ )
except Exception as error:
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
- result = CommonResult(output_data='{} execute cmd {} failed'.format(self.task_name, self.cmd_line),
- error_code=1,
- description='{} execute cmd {} failed \n [ERROR] {}'.format(
- self.task_name, self.cmd_line, error))
+ result = CommonResult(
+ output_data="{} execute cmd {} failed".format(
+ self.task_name, self.cmd_line
+ ),
+ error_code=1,
+ description="{} execute cmd {} failed \n [ERROR] {}".format(
+ self.task_name, self.cmd_line, error
+ ),
+ )
return result
- res = command_result.communicate(timeout=self.time_out)[0].decode('utf-8')
+ res = command_result.communicate(timeout=self.time_out)[0].decode("utf-8")
result.set_output_data(res)
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, SUCCESS)
@@ -139,8 +154,11 @@ class ShellTask(TASK):
self.time_out = 5
if not self.basic_check():
- DEPLOY_LOG.error('[error] {} not exist, init ShellTask failed, please check param in cmd {}'.format(
- self.file_path, self.task_name))
+ DEPLOY_LOG.error(
+ "[error] {} not exist, init ShellTask failed, please check param in cmd {}".format(
+ self.file_path, self.task_name
+ )
+ )
raise Exception
def basic_check(self):
@@ -148,28 +166,35 @@ class ShellTask(TASK):
def task_execute(self, input_params, task_logger):
params_format_save = self.sh_input
- cmd_line = 'sh {}'.format(self.file_path)
+ cmd_line = "sh {}".format(self.file_path)
for param_key in input_params.keys():
- self.sh_input = self.sh_input.replace('${%s}' % param_key, str(input_params[param_key]))
- cmd_line = cmd_line + ' ' + str(self.sh_input)
+ self.sh_input = self.sh_input.replace(
+ "${%s}" % param_key, str(input_params[param_key])
+ )
+ cmd_line = cmd_line + " " + str(self.sh_input)
result = CommonResult()
task_logger.set_request_time(TimeTool.get_current_time())
try:
- sh_result = subprocess.Popen(cmd_line.split(' '), stdout=subprocess.PIPE, shell=False)
+ sh_result = subprocess.Popen(
+ cmd_line.split(" "), stdout=subprocess.PIPE, shell=False
+ )
except Exception as error:
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
- result.set_output_data('call shell file: {} failed'.format(self.file_path))
+ result.set_output_data("call shell file: {} failed".format(self.file_path))
result.set_error_code(1)
- result.set_description('{} call shell file: {} with params: {} failed \n [ERROR] {}'.format(
- self.task_name, self.file_path, self.sh_input, error))
+ result.set_description(
+ "{} call shell file: {} with params: {} failed \n [ERROR] {}".format(
+ self.task_name, self.file_path, self.sh_input, error
+ )
+ )
self.sh_input = params_format_save
return result
- res = sh_result.communicate(timeout=self.time_out)[0].decode('utf-8')
+ res = sh_result.communicate(timeout=self.time_out)[0].decode("utf-8")
result.set_output_data(res)
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, SUCCESS)
@@ -192,8 +217,11 @@ class PyTask(TASK):
self.py_input = py_input
self.time_out = 5
if not self.basic_check():
- DEPLOY_LOG.error('[error] {} not exist, init ShellTask failed, please check param in cmd {}'.format(
- self.file_path, self.task_name))
+ DEPLOY_LOG.error(
+ "[error] {} not exist, init ShellTask failed, please check param in cmd {}".format(
+ self.file_path, self.task_name
+ )
+ )
raise Exception
def basic_check(self):
@@ -201,40 +229,62 @@ class PyTask(TASK):
def task_execute(self, input_params, task_logger):
params_format_save = self.py_input
- cmd_line = 'python3 {}'.format(self.file_path)
+ cmd_line = "python3 {}".format(self.file_path)
for param_key in input_params.keys():
- self.py_input = self.py_input.replace('${%s}' % param_key, str(input_params[param_key]))
- cmd_line = cmd_line + ' ' + str(self.py_input)
+ self.py_input = self.py_input.replace(
+ "${%s}" % param_key, str(input_params[param_key])
+ )
+ cmd_line = cmd_line + " " + str(self.py_input)
result = CommonResult()
- TASK_LOG.info('task: {} calling py file: {}, using cmd: {}'.format(self.task_name, self.file_path, cmd_line))
+ TASK_LOG.info(
+ "task: {} calling py file: {}, using cmd: {}".format(
+ self.task_name, self.file_path, cmd_line
+ )
+ )
task_logger.set_request_time(TimeTool.get_current_time())
try:
- py_result = subprocess.Popen(cmd_line.split(' '), stdout=subprocess.PIPE, shell=False)
+ py_result = subprocess.Popen(
+ cmd_line.split(" "), stdout=subprocess.PIPE, shell=False
+ )
except Exception as error:
- TASK_LOG.error('task: {} calling py file: {}, using cmd: {} fail, error: {}'.format(
- self.task_name, self.file_path, cmd_line, error))
+ TASK_LOG.error(
+ "task: {} calling py file: {}, using cmd: {} fail, error: {}".format(
+ self.task_name, self.file_path, cmd_line, error
+ )
+ )
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
- result = CommonResult(output_data='{} call py file: {} failed'.format(self.task_name, self.file_path),
- error_code=1,
- description='{} call py file: {} with params: {} failed \n [ERROR] {}'.format(
- self.task_name, self.file_path, self.py_input, error))
+ result = CommonResult(
+ output_data="{} call py file: {} failed".format(
+ self.task_name, self.file_path
+ ),
+ error_code=1,
+ description="{} call py file: {} with params: {} failed \n [ERROR] {}".format(
+ self.task_name, self.file_path, self.py_input, error
+ ),
+ )
self.py_input = params_format_save
return result
try:
- res = py_result.communicate(timeout=self.time_out)[0].decode('utf-8')
+ res = py_result.communicate(timeout=self.time_out)[0].decode("utf-8")
except Exception as error:
- TASK_LOG.error('task: {} calling py file: {} fail to obtain result, error: {}'.format(
- self.task_name, self.file_path, error))
+ TASK_LOG.error(
+ "task: {} calling py file: {} fail to obtain result, error: {}".format(
+ self.task_name, self.file_path, error
+ )
+ )
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
- result = CommonResult(output_data='execute file: {} failed'.format(self.file_path),
- error_code=1,
- description='{} call py file: {} with params: {} failed \n [ERROR] {}'.format(
- self.task_name, self.file_path, self.py_input, error))
+ result = CommonResult(
+ output_data="execute file: {} failed".format(self.file_path),
+ error_code=1,
+ description="{} call py file: {} with params: {} failed \n [ERROR] {}".format(
+ self.task_name, self.file_path, self.py_input, error
+ ),
+ )
self.py_input = params_format_save
return result
diff --git a/ct_om/service/ctmgr/tasks/audit_task.py b/ct_om/service/ctmgr/tasks/audit_task.py
index a2ffdce0edf8eed82117e4ff21cf0eb0a6e0ad6a..241dcfe42a7f98edc11310c67bf529597d42022c 100644
--- a/ct_om/service/ctmgr/tasks/audit_task.py
+++ b/ct_om/service/ctmgr/tasks/audit_task.py
@@ -9,31 +9,35 @@ from log_tool.om_log import TASK_LOG
MAX_AUDIT_NUM = 10
DIR_NAME, _ = os.path.split(os.path.abspath(__file__))
-INSPECTION_PATH = str(Path('{}/../inspections'.format(DIR_NAME)))
+INSPECTION_PATH = str(Path("{}/../inspections".format(DIR_NAME)))
class AuditTask(PyTask):
def __init__(self, task_name, handler, file_path, py_input, params_check_dict):
super().__init__(task_name, handler, file_path, py_input, params_check_dict)
- self.output_data = ''
+ self.output_data = ""
self.audit_path = INSPECTION_PATH
def task_execute(self, input_params, task_logger):
- res = super(AuditTask, self).task_execute(input_params=input_params, task_logger=task_logger)
+ res = super(AuditTask, self).task_execute(
+ input_params=input_params, task_logger=task_logger
+ )
- audit_info = eval(eval(res.__str__()).get('data', {}).get('ctmgr_common_output', ''))
+ audit_info = eval(
+ eval(res.__str__()).get("data", {}).get("ctmgr_common_output", "")
+ )
- audit_result = audit_info.get('RESULT')
- if str(audit_result) == '0':
- self.output_data = str(audit_info).replace('\n', '')
+ audit_result = audit_info.get("RESULT")
+ if str(audit_result) == "0":
+ self.output_data = str(audit_info).replace("\n", "")
self.write_audit()
- TASK_LOG.info('show cantian status success')
- res.set_output_data(str(audit_info.get('CMS_STAT')))
+ TASK_LOG.info("show cantian status success")
+ res.set_output_data(str(audit_info.get("CMS_STAT")))
return res
else:
- TASK_LOG.error('show cantian status fail')
- res.set_output_data('')
+ TASK_LOG.error("show cantian status fail")
+ res.set_output_data("")
res.set_error_code(1)
return res
@@ -50,9 +54,9 @@ class AuditTask(PyTask):
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
utc_now = datetime.utcnow()
cur_time = utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None)
- audit_file = 'inspection_{}'.format(str(cur_time.strftime("%Y%m%d%H%M%S")))
- audit_file_path = str(Path(self.audit_path + '/' + audit_file))
- with os.fdopen(os.open(audit_file_path, flags, modes), 'w', encoding='utf-8') as file:
+ audit_file = "inspection_{}".format(str(cur_time.strftime("%Y%m%d%H%M%S")))
+ audit_file_path = str(Path(self.audit_path + "/" + audit_file))
+ with os.fdopen(
+ os.open(audit_file_path, flags, modes), "w", encoding="utf-8"
+ ) as file:
file.write(self.output_data)
-
-
diff --git a/ct_om/service/ctmgr/tasks/inspection/inspection_task.py b/ct_om/service/ctmgr/tasks/inspection/inspection_task.py
index f931bc12b0aec224cd2c2d6e385ec92dc5013741..7d088a4dbcfac5ade3e20d2c6f5c0cd09be811fd 100644
--- a/ct_om/service/ctmgr/tasks/inspection/inspection_task.py
+++ b/ct_om/service/ctmgr/tasks/inspection/inspection_task.py
@@ -13,11 +13,13 @@ from common.common_tool import TimeTool
MAX_AUDIT_NUM = 10
DIR_NAME, _ = os.path.split(os.path.abspath(__file__))
-INSPECTION_PATH = str(Path('{}/../../inspections'.format(DIR_NAME)))
-FAIL = 'fail'
-SUCCESS = 'success'
-SUCCESS_ENUM = [0, '0']
-INSPECTION_JSON_FILE = "/opt/cantian/ct_om/service/ctmgr/tasks/inspection/inspection_config.json"
+INSPECTION_PATH = str(Path("{}/../../inspections".format(DIR_NAME)))
+FAIL = "fail"
+SUCCESS = "success"
+SUCCESS_ENUM = [0, "0"]
+INSPECTION_JSON_FILE = (
+ "/opt/cantian/ct_om/service/ctmgr/tasks/inspection/inspection_config.json"
+)
class InspectionTask(TASK):
@@ -33,30 +35,32 @@ class InspectionTask(TASK):
@staticmethod
def read_inspection_config():
- with open(INSPECTION_JSON_FILE, encoding='utf-8') as file:
+ with open(INSPECTION_JSON_FILE, encoding="utf-8") as file:
inspection_map = json.load(file)
return inspection_map
@staticmethod
- def format_single_inspection_result(inspection_item, inspection_detail, execute_result, inspection_result):
+ def format_single_inspection_result(
+ inspection_item, inspection_detail, execute_result, inspection_result
+ ):
return_value = {
- 'inspection_item': inspection_item,
- 'component': inspection_detail.get("component"),
- 'inspection_result': execute_result,
- 'inspection_detail': inspection_result.get('data'),
- 'description_zn': inspection_detail.get("description_zn"),
- 'description_en': inspection_detail.get("description_en")
+ "inspection_item": inspection_item,
+ "component": inspection_detail.get("component"),
+ "inspection_result": execute_result,
+ "inspection_detail": inspection_result.get("data"),
+ "description_zn": inspection_detail.get("description_zn"),
+ "description_en": inspection_detail.get("description_en"),
}
if inspection_result and isinstance(inspection_result, dict):
- err_info = inspection_result.get('error', {})
- error_code = err_info.get('code')
+ err_info = inspection_result.get("error", {})
+ error_code = err_info.get("code")
if error_code is None:
return return_value
if error_code not in SUCCESS_ENUM:
- return_value['inspection_result'] = FAIL
+ return_value["inspection_result"] = FAIL
return return_value
@@ -76,11 +80,16 @@ class InspectionTask(TASK):
if input_params == "all":
inspection_items = list(self.inspection_map.keys())
- if not isinstance(input_params, list) and input_params != 'all':
- TASK_LOG.error("inspection input error, input value is: " + str(input_params))
- result = CommonResult(output_data="inspection input error, input value is: " + str(input_params),
- error_code=1,
- description="input must be string \"all\" or [xxx, xxx]")
+ if not isinstance(input_params, list) and input_params != "all":
+ TASK_LOG.error(
+ "inspection input error, input value is: " + str(input_params)
+ )
+ result = CommonResult(
+ output_data="inspection input error, input value is: "
+ + str(input_params),
+ error_code=1,
+ description='input must be string "all" or [xxx, xxx]',
+ )
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
return result
@@ -88,8 +97,9 @@ class InspectionTask(TASK):
for inspection_item in inspection_items:
inspection_detail = self.inspection_map.get(inspection_item)
- single_check_result, single_result = self.param_check_single(inspection_item,
- inspection_detail, task_logger)
+ single_check_result, single_result = self.param_check_single(
+ inspection_item, inspection_detail, task_logger
+ )
if not single_check_result:
if single_result:
return single_result
@@ -97,18 +107,25 @@ class InspectionTask(TASK):
continue
try:
- single_inspection_result = json.loads(self.task_execute_single(inspection_detail))
+ single_inspection_result = json.loads(
+ self.task_execute_single(inspection_detail)
+ )
except Exception as err:
- TASK_LOG.error("excute %s inspection failed with error: %s" % (inspection_item, str(err)))
- formated_inspection_result = self.format_single_inspection_result(inspection_item,
- inspection_detail, FAIL, {})
+ TASK_LOG.error(
+ "excute %s inspection failed with error: %s"
+ % (inspection_item, str(err))
+ )
+ formated_inspection_result = self.format_single_inspection_result(
+ inspection_item, inspection_detail, FAIL, {}
+ )
self.inspection_result.append(formated_inspection_result)
self.fail_list.append(inspection_item)
continue
- formated_inspection_result = self.format_single_inspection_result(inspection_item, inspection_detail,
- SUCCESS, single_inspection_result)
- if formated_inspection_result.get('inspection_result') == FAIL:
+ formated_inspection_result = self.format_single_inspection_result(
+ inspection_item, inspection_detail, SUCCESS, single_inspection_result
+ )
+ if formated_inspection_result.get("inspection_result") == FAIL:
self.inspection_result.append(formated_inspection_result)
self.fail_list.append(inspection_item)
continue
@@ -117,12 +134,18 @@ class InspectionTask(TASK):
self.success_list.append(inspection_item)
if not self.success_list:
- result.set_output_data("inspection item: %s failed" % ' '.join(self.fail_list))
+ result.set_output_data(
+ "inspection item: %s failed" % " ".join(self.fail_list)
+ )
elif not self.fail_list:
- result.set_output_data("inspection item: %s success" % ' '.join(self.success_list))
+ result.set_output_data(
+ "inspection item: %s success" % " ".join(self.success_list)
+ )
else:
- result.set_output_data("inspection item: %s success, inspection item: %s failed"
- % (' '.join(self.success_list), ' '.join(self.fail_list)))
+ result.set_output_data(
+ "inspection item: %s success, inspection item: %s failed"
+ % (" ".join(self.success_list), " ".join(self.fail_list))
+ )
self.write_audit()
@@ -135,17 +158,24 @@ class InspectionTask(TASK):
if not inspection_detail:
TASK_LOG.error("inspection item %s not exist" % inspection_item)
- result = CommonResult(output_data=inspection_item + " not exist", error_code=1,
- description="please check input, inspection item %s not exist"
- % inspection_item)
+ result = CommonResult(
+ output_data=inspection_item + " not exist",
+ error_code=1,
+ description="please check input, inspection item %s not exist"
+ % inspection_item,
+ )
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
return False, result
- if not os.path.exists(inspection_detail.get('inspection_file_path')):
- TASK_LOG.error("inspection file: %s not exist" % str(inspection_detail.get('inspection_file_path')))
- formated_inspection_result = self.format_single_inspection_result(inspection_item,
- inspection_detail, FAIL, None)
+ if not os.path.exists(inspection_detail.get("inspection_file_path")):
+ TASK_LOG.error(
+ "inspection file: %s not exist"
+ % str(inspection_detail.get("inspection_file_path"))
+ )
+ formated_inspection_result = self.format_single_inspection_result(
+ inspection_item, inspection_detail, FAIL, None
+ )
self.inspection_result.append(formated_inspection_result)
self.fail_list.append(inspection_item)
return False, result
@@ -153,13 +183,18 @@ class InspectionTask(TASK):
return True, result
def task_execute_single(self, inspection_detail):
- inspection_item_file = inspection_detail.get('inspection_file_path')
- inspection_item_input = inspection_detail.get('input_param')
-
- single_inspection_popen = subprocess.Popen(['/usr/bin/python3', inspection_item_file, inspection_item_input],
- stdout=subprocess.PIPE, shell=False)
- single_inspection_result = single_inspection_popen.communicate(timeout=self.time_out)[0].decode('utf-8')
- single_inspection_result = single_inspection_result.replace("\'", "\"")
+ inspection_item_file = inspection_detail.get("inspection_file_path")
+ inspection_item_input = inspection_detail.get("input_param")
+
+ single_inspection_popen = subprocess.Popen(
+ ["/usr/bin/python3", inspection_item_file, inspection_item_input],
+ stdout=subprocess.PIPE,
+ shell=False,
+ )
+ single_inspection_result = single_inspection_popen.communicate(
+ timeout=self.time_out
+ )[0].decode("utf-8")
+ single_inspection_result = single_inspection_result.replace("'", '"')
return single_inspection_result
@@ -174,7 +209,7 @@ class InspectionTask(TASK):
:param inspection_output: 调用巡检项脚本的输出
整理一键巡检格式, 结果写到 self.output_data
"""
- self.output_data['data'] = self.inspection_result
+ self.output_data["data"] = self.inspection_result
def write_audit(self):
self.format_inspection_result()
@@ -191,7 +226,16 @@ class InspectionTask(TASK):
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
utc_now = datetime.utcnow()
cur_time = utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None)
- audit_file = 'inspection_{}'.format(str(cur_time.strftime("%Y%m%d%H%M%S")))
- audit_file_path = str(Path(self.audit_path + '/' + audit_file))
- with os.fdopen(os.open(audit_file_path, flags, modes), 'w', encoding='utf-8') as file:
- file.write(json.dumps(self.output_data, indent=4, separators=(',', ': '), ensure_ascii=False))
+ audit_file = "inspection_{}".format(str(cur_time.strftime("%Y%m%d%H%M%S")))
+ audit_file_path = str(Path(self.audit_path + "/" + audit_file))
+ with os.fdopen(
+ os.open(audit_file_path, flags, modes), "w", encoding="utf-8"
+ ) as file:
+ file.write(
+ json.dumps(
+ self.output_data,
+ indent=4,
+ separators=(",", ": "),
+ ensure_ascii=False,
+ )
+ )
diff --git a/ct_om/service/ctmgr/tasks/log_progress_query.py b/ct_om/service/ctmgr/tasks/log_progress_query.py
index 8034fd82613962bbdadf12edd0bdc42784b192cf..5a8eec6ecbab479ee5a08445bb61de30ba1c179c 100644
--- a/ct_om/service/ctmgr/tasks/log_progress_query.py
+++ b/ct_om/service/ctmgr/tasks/log_progress_query.py
@@ -8,8 +8,10 @@ from common.common_tool import TimeTool
cur_abs_path, _ = os.path.split(os.path.abspath(__file__))
upper_path = os.path.abspath(os.path.join(cur_abs_path, ".."))
-LOG_FILE_PATH = str(Path('{}/logs_collection'.format(upper_path), 'log_packing_progress.json'))
-SUCCESS = 'success'
+LOG_FILE_PATH = str(
+ Path("{}/logs_collection".format(upper_path), "log_packing_progress.json")
+)
+SUCCESS = "success"
class QueryLogProgressTask(TASK):
@@ -20,11 +22,13 @@ class QueryLogProgressTask(TASK):
def task_execute(self, input_params, task_logger: LOGGER):
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, SUCCESS)
- TASK_LOG.info('doing logs collection progress query, path is: {}'.format(LOG_FILE_PATH))
+ TASK_LOG.info(
+ "doing logs collection progress query, path is: {}".format(LOG_FILE_PATH)
+ )
return CommonResult(output_data=LOG_FILE_PATH)
-if __name__ == '__main__':
+if __name__ == "__main__":
lt = QueryLogProgressTask(None, None)
lg = LOGGER(None, None, None)
lt.task_execute({}, lg)
diff --git a/ct_om/service/ctmgr/tasks/log_query.py b/ct_om/service/ctmgr/tasks/log_query.py
index e8428d59bf9f850838a55aa75c48a499cfa60abd..fcc2a1f4ffe0a023135c871bdf22047bfc254387 100644
--- a/ct_om/service/ctmgr/tasks/log_query.py
+++ b/ct_om/service/ctmgr/tasks/log_query.py
@@ -8,7 +8,7 @@ from common.common_tool import TimeTool
from log_tool.om_log import TASK_LOG
LOG_FILE_PATH = str(Path(LOG.handlers[0].baseFilename))
-SUCCESS = 'success'
+SUCCESS = "success"
class QueryLogTask(TASK):
@@ -19,11 +19,11 @@ class QueryLogTask(TASK):
def task_execute(self, input_params, task_logger: LOGGER):
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, SUCCESS)
- TASK_LOG.info('doing log queryn, path is: {}'.format(LOG_FILE_PATH))
+ TASK_LOG.info("doing log queryn, path is: {}".format(LOG_FILE_PATH))
return CommonResult(output_data=LOG_FILE_PATH)
-if __name__ == '__main__':
+if __name__ == "__main__":
lt = QueryLogTask(None, None)
lg = LOGGER(None, None, None)
lt.task_execute({}, lg)
diff --git a/ct_om/service/ctmgr/tasks/logs_collections_task.py b/ct_om/service/ctmgr/tasks/logs_collections_task.py
index 21e3447536072b0d2a66e7df0812c58e0c6e86eb..96b28182ad1e52d358f631536fd6f1840f4fc903 100644
--- a/ct_om/service/ctmgr/tasks/logs_collections_task.py
+++ b/ct_om/service/ctmgr/tasks/logs_collections_task.py
@@ -8,7 +8,7 @@ from log_tool.om_log import LOGGER, DEPLOY_LOG, TASK_LOG
from common.output_tool import CommonResult
from common.common_tool import TimeTool
-SUCCESS, FAIL = 'success', 'fail'
+SUCCESS, FAIL = "success", "fail"
cur_abs_path, _ = os.path.split(os.path.abspath(__file__))
@@ -20,8 +20,11 @@ class LogsCollectionTask(TASK):
self.py_input = py_input
self.time_out = 2
if not self.basic_check():
- DEPLOY_LOG.error('[error] {} not exist, init ShellTask failed, please check param in cmd {}'.format(
- self.file_path, self.task_name))
+ DEPLOY_LOG.error(
+ "[error] {} not exist, init ShellTask failed, please check param in cmd {}".format(
+ self.file_path, self.task_name
+ )
+ )
raise Exception
def basic_check(self):
@@ -29,25 +32,39 @@ class LogsCollectionTask(TASK):
def task_execute(self, input_params, task_logger):
params_format_save = self.py_input
- exec_cmd = 'python3 {}'.format(self.file_path)
+ exec_cmd = "python3 {}".format(self.file_path)
for param_key in input_params.keys():
- self.py_input = self.py_input.replace('${%s}' % param_key, str(input_params.get(param_key)))
- exec_cmd = exec_cmd + ' ' + str(self.py_input)
+ self.py_input = self.py_input.replace(
+ "${%s}" % param_key, str(input_params.get(param_key))
+ )
+ exec_cmd = exec_cmd + " " + str(self.py_input)
result = CommonResult()
- TASK_LOG.info('task: {} calling py file: {}, using cmd: {}'.format(self.task_name, self.file_path, exec_cmd))
+ TASK_LOG.info(
+ "task: {} calling py file: {}, using cmd: {}".format(
+ self.task_name, self.file_path, exec_cmd
+ )
+ )
task_logger.set_request_time(TimeTool.get_current_time())
try:
- py_result = subprocess.Popen(shlex.split(exec_cmd), stdout=subprocess.PIPE, shell=False)
+ py_result = subprocess.Popen(
+ shlex.split(exec_cmd), stdout=subprocess.PIPE, shell=False
+ )
except Exception as err:
- TASK_LOG.error('task: {} calling py file: {}, using cmd: {} fail, '
- 'error: {}'.format(self.task_name, self.file_path, exec_cmd, err))
+ TASK_LOG.error(
+ "task: {} calling py file: {}, using cmd: {} fail, "
+ "error: {}".format(self.task_name, self.file_path, exec_cmd, err)
+ )
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
- result = CommonResult(output_data='', error_code=1,
- description='{} call py file: {} with params: {} failed \n [ERROR] {}'.format(
- self.task_name, self.file_path, self.py_input, err))
+ result = CommonResult(
+ output_data="",
+ error_code=1,
+ description="{} call py file: {} with params: {} failed \n [ERROR] {}".format(
+ self.task_name, self.file_path, self.py_input, err
+ ),
+ )
self.py_input = params_format_save
return result
@@ -55,7 +72,7 @@ class LogsCollectionTask(TASK):
try:
std_res, std_err = py_result.communicate(timeout=self.time_out)
except Exception as err:
- TASK_LOG.info('task: {} background execution start'.format(self.task_name))
+ TASK_LOG.info("task: {} background execution start".format(self.task_name))
result.set_output_data(str(err))
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, SUCCESS)
@@ -66,17 +83,23 @@ class LogsCollectionTask(TASK):
# 日志采集脚本执行过程中异常退出
if not std_res or std_err:
- err_msg = 'cmd: {} itself failed'.format(exec_cmd)
- TASK_LOG.error('task: {} calling py file: {}, using cmd: {} fail, '
- 'error: {}'.format(self.task_name, self.file_path, exec_cmd, err_msg))
+ err_msg = "cmd: {} itself failed".format(exec_cmd)
+ TASK_LOG.error(
+ "task: {} calling py file: {}, using cmd: {} fail, "
+ "error: {}".format(self.task_name, self.file_path, exec_cmd, err_msg)
+ )
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, FAIL)
- result = CommonResult(output_data='', error_code=1,
- description='{} call py file: {} failed, [ERROR] {}'.format(
- self.task_name, self.file_path, err_msg))
+ result = CommonResult(
+ output_data="",
+ error_code=1,
+ description="{} call py file: {} failed, [ERROR] {}".format(
+ self.task_name, self.file_path, err_msg
+ ),
+ )
return result
- result.set_output_data(std_res.decode('utf-8'))
+ result.set_output_data(std_res.decode("utf-8"))
task_logger.set_finish_time(TimeTool.get_current_time())
task_logger.info(self.task_name, SUCCESS)
return result
diff --git a/ct_om/service/ctmgr/uds_client.py b/ct_om/service/ctmgr/uds_client.py
index 1b870a4ba13d89df015b3e21d7b7b3871bd8e76e..48f6dcb9ecf1c42297cee904f473c046249e93af 100644
--- a/ct_om/service/ctmgr/uds_client.py
+++ b/ct_om/service/ctmgr/uds_client.py
@@ -4,7 +4,7 @@ import socket
from log_tool.om_log import TASK_LOG
from common.output_tool import CommonResult
-SERVER_ADDRESS = ''
+SERVER_ADDRESS = ""
SOCKET_TYPE = socket.SOCK_STREAM
SOCKET_FAMILY = socket.AF_UNIX
@@ -19,23 +19,32 @@ def client_socket(send_data):
try:
sock.connect(SERVER_ADDRESS)
except socket.error as error:
- TASK_LOG.error('client connect failed, error: {}'.format(error))
- return CommonResult(output_data='client connect failed', error_code=1,
- description='connect failed, error: {}'.format(error))
+ TASK_LOG.error("client connect failed, error: {}".format(error))
+ return CommonResult(
+ output_data="client connect failed",
+ error_code=1,
+ description="connect failed, error: {}".format(error),
+ )
try:
sock.sendall(json.dumps(send_data).encode())
except socket.error as error:
- result = CommonResult(output_data='uds request failed', error_code=1,
- description='send data error: {}'.format(error))
+ result = CommonResult(
+ output_data="uds request failed",
+ error_code=1,
+ description="send data error: {}".format(error),
+ )
return result
try:
recv_data = sock.recv(RECEIVE_DATA_SIZE).decode()
except socket.error as error:
- result = CommonResult(output_data='uds request failed', error_code=1,
- description='receive data error: {}'.format(error))
- TASK_LOG.error('send/receive data error: {}'.format(error))
+ result = CommonResult(
+ output_data="uds request failed",
+ error_code=1,
+ description="receive data error: {}".format(error),
+ )
+ TASK_LOG.error("send/receive data error: {}".format(error))
return result
result.set_output_data(recv_data)
@@ -45,5 +54,5 @@ def client_socket(send_data):
return result
-if __name__ == '__main__':
+if __name__ == "__main__":
client_socket('{"command": "show cantina status", "param": {}}')
diff --git a/ct_om/service/ctmgr/uds_server.py b/ct_om/service/ctmgr/uds_server.py
index 8c020190377b35334d113ddad1beec4832d99e36..85de790de06dc79f9178b275efd0c55e3626dec2 100644
--- a/ct_om/service/ctmgr/uds_server.py
+++ b/ct_om/service/ctmgr/uds_server.py
@@ -15,7 +15,7 @@ threadPool = ThreadPoolExecutor(max_workers=5)
task_dir = task.task_dir
dir_name, _ = os.path.split(os.path.abspath(__file__))
-SERVER_ADDRESS = str(Path('{}/../ct_om.sock'.format(dir_name)))
+SERVER_ADDRESS = str(Path("{}/../ct_om.sock".format(dir_name)))
SOCKET_FAMILY = socket.AF_UNIX
SOCKET_TYPE = socket.SOCK_STREAM
@@ -25,17 +25,22 @@ USER_UID = (6004,)
def get_socket_msg(conn, client_addr):
- credit = conn.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED, struct.calcsize('3i'))
+ credit = conn.getsockopt(
+ socket.SOL_SOCKET, socket.SO_PEERCRED, struct.calcsize("3i")
+ )
# input_json为字符串类型json:"{'command': 'show cantina statu', 'param': {}}"
input_json = conn.recv(RECEIVE_DATA_SIZE).decode()
- pid, uid, gid = struct.unpack('3i', credit)
+ pid, uid, gid = struct.unpack("3i", credit)
if int(uid) not in USER_UID:
- result = CommonResult(output_data='uid from client is not correct', error_code=1,
- description='current uid from client is {}'.format(uid))
+ result = CommonResult(
+ output_data="uid from client is not correct",
+ error_code=1,
+ description="current uid from client is {}".format(uid),
+ )
conn.sendall(result.__str__().encode())
else:
- begin_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
+ begin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
task_log = LOGGER(client_pid=pid, client_uid=uid, begin_time=begin_time)
result = execute_task(input_json, task_log) # 接收cli命令并进行预检验,执行
conn.sendall(result.__str__().encode())
@@ -48,16 +53,16 @@ def server_socket():
os.unlink(SERVER_ADDRESS)
# 绑定套接字
if sock.bind(SERVER_ADDRESS):
- DEPLOY_LOG.error('socket bind error, SERVER_ADDRESS: {}'.format(SERVER_ADDRESS))
- raise Exception('socket bind error, SERVER_ADDRESS: {}'.format(SERVER_ADDRESS))
+ DEPLOY_LOG.error("socket bind error, SERVER_ADDRESS: {}".format(SERVER_ADDRESS))
+ raise Exception("socket bind error, SERVER_ADDRESS: {}".format(SERVER_ADDRESS))
else:
- DEPLOY_LOG.info('socket bind success, address is {}'.format(SERVER_ADDRESS))
+ DEPLOY_LOG.info("socket bind success, address is {}".format(SERVER_ADDRESS))
if sock.listen(1):
- DEPLOY_LOG.error('socket listen error')
- raise Exception('socket listen error')
+ DEPLOY_LOG.error("socket listen error")
+ raise Exception("socket listen error")
- DEPLOY_LOG.info('socket listen begin')
+ DEPLOY_LOG.info("socket listen begin")
while KEEP_LISTEN:
connection, client_address = sock.accept()
@@ -68,13 +73,16 @@ def server_socket():
def execute_task(input_data, task_log):
input_data_dict = ast.literal_eval(input_data)
- command = input_data_dict.get('command')
- input_params_dict = input_data_dict.get('param')
+ command = input_data_dict.get("command")
+ input_params_dict = input_data_dict.get("param")
if command not in task_dir.keys():
- task_log.error(cmd=command, result='fail')
- result = CommonResult(output_data='execute {} failed'.format(command), error_code=1,
- description='cli command: {} not valid'.format(command))
+ task_log.error(cmd=command, result="fail")
+ result = CommonResult(
+ output_data="execute {} failed".format(command),
+ error_code=1,
+ description="cli command: {} not valid".format(command),
+ )
return result
task_obj = task_dir[command]
@@ -83,21 +91,29 @@ def execute_task(input_data, task_log):
result = task_obj.task_execute(input_params_dict, task_log)
except Exception as err:
exc_type, exc_value, exc_tb = sys.exc_info()
- TASK_LOG.error("exece command: %s faild, inner error is: %s, detial is %s" %
- (command, str(err), str((exc_type, exc_value, exc_tb))))
- task_log.error(cmd=command, result='fail')
- result = CommonResult(output_data='execute {} failed'.format(command), error_code=1,
- description='param check failed for command: {}'.format(command))
+ TASK_LOG.error(
+ "exece command: %s faild, inner error is: %s, detial is %s"
+ % (command, str(err), str((exc_type, exc_value, exc_tb)))
+ )
+ task_log.error(cmd=command, result="fail")
+ result = CommonResult(
+ output_data="execute {} failed".format(command),
+ error_code=1,
+ description="param check failed for command: {}".format(command),
+ )
return result
else:
- task_log.error(cmd=command, result='fail')
- result = CommonResult(output_data='execute {} failed'.format(command), error_code=1,
- description='param check failed for command: {}'.format(command))
+ task_log.error(cmd=command, result="fail")
+ result = CommonResult(
+ output_data="execute {} failed".format(command),
+ error_code=1,
+ description="param check failed for command: {}".format(command),
+ )
return result
-if __name__ == '__main__':
+if __name__ == "__main__":
task.load_tasks()
server_socket()
diff --git a/pkg/admin/scripts/fetch_cls_stat.py b/pkg/admin/scripts/fetch_cls_stat.py
index 42277f7be110141a5c728aef1c3119079a52cf89..a6231a9fdd11b396cd53c743602a88c76f3e5b51 100644
--- a/pkg/admin/scripts/fetch_cls_stat.py
+++ b/pkg/admin/scripts/fetch_cls_stat.py
@@ -19,8 +19,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
py_version = platform.python_version()
if py_version[0] == "3":
@@ -51,17 +56,17 @@ def _exec_popen(cmd, values=None):
def get_user():
config_path = Path(CONFIG_PATH)
config_list = json.loads(config_path.read_text())
- return config_list["deploy_user"].split(':')[0]
+ return config_list["deploy_user"].split(":")[0]
def parse_node_stat(node_stat):
- keys = ['NODE_ID', 'NAME', 'STAT', 'PRE_STAT']
+ keys = ["NODE_ID", "NAME", "STAT", "PRE_STAT"]
values = node_stat.split()
stat_json = {}
- for (idx, key) in enumerate(keys):
+ for idx, key in enumerate(keys):
stat_json[key] = values[idx]
online = False
- if stat_json['STAT'] == 'ONLINE':
+ if stat_json["STAT"] == "ONLINE":
online = True
return (online, stat_json)
@@ -70,7 +75,7 @@ def fetch_cms_stat():
user = get_user()
cmd = 'su - %s -c "cms stat" | tail -n +2' % user
_, output, _ = _exec_popen(cmd)
- output = output.split('\n')
+ output = output.split("\n")
cms_stat_json = {}
if len(output) <= 1:
return (False, cms_stat_json)
@@ -81,19 +86,19 @@ def fetch_cms_stat():
detail_json.append(stat_json)
if online:
online_cnt += 1
- cms_stat_json['DETAIL'] = detail_json
+ cms_stat_json["DETAIL"] = detail_json
if online_cnt == 0:
- cms_stat_json['STATUS'] = 'OFFLINE'
+ cms_stat_json["STATUS"] = "OFFLINE"
elif online_cnt == len(output):
- cms_stat_json['STATUS'] = 'ONLINE'
+ cms_stat_json["STATUS"] = "ONLINE"
else:
- cms_stat_json['STATUS'] = 'PARTIALLY_ONLINE'
+ cms_stat_json["STATUS"] = "PARTIALLY_ONLINE"
return (True, cms_stat_json)
def gen_fault_result():
result_json = {}
- result_json['RESULT'] = -1
+ result_json["RESULT"] = -1
return json.dumps(result_json)
@@ -102,10 +107,10 @@ def fetch_cls_stat():
if not success:
return gen_fault_result()
status_json = {}
- status_json['CMS_STAT'] = cms_stat_json
- status_json['RESULT'] = 0
+ status_json["CMS_STAT"] = cms_stat_json
+ status_json["RESULT"] = 0
return json.dumps(status_json)
-if __name__ == '__main__':
+if __name__ == "__main__":
print(fetch_cls_stat())
diff --git a/pkg/deploy/action/cantian/Common.py b/pkg/deploy/action/cantian/Common.py
index 9f83a0950ab5ca8f9b37ebaf624b1a5653c391ee..9b480678f5826e4f99799921c679b12dd31795dd 100644
--- a/pkg/deploy/action/cantian/Common.py
+++ b/pkg/deploy/action/cantian/Common.py
@@ -10,6 +10,7 @@
import sys
+
sys.dont_write_bytecode = True
try:
@@ -30,8 +31,10 @@ class DefaultValue(object):
"""
Default value of some variables
"""
+
def __init__(self):
pass
+
# file mode
MAX_FILE_MODE = 640
MIN_FILE_MODE = 400
@@ -39,13 +42,13 @@ class DefaultValue(object):
MID_FILE_MODE = 500
KEY_DIRECTORY_MODE = 700
MAX_DIRECTORY_MODE = 750
- KEY_DIRECTORY_MODE_STR = '0700'
+ KEY_DIRECTORY_MODE_STR = "0700"
MIN_FILE_PERMISSION = 0o400
MID_FILE_PERMISSION = 0o500
KEY_FILE_PERMISSION = 0o600
KEY_DIRECTORY_PERMISSION = 0o700
CANTIAND_CONF_NAME = "cantiand.ini"
-
+
# get os version and python version
CURRENT_OS = platform.system()
PY_VERSION = platform.python_version()
@@ -66,7 +69,7 @@ class DefaultValue(object):
while True:
# find the top path to be created
(tmp_dir, top_dir_name) = os.path.split(tmp_dir)
- if (os.path.exists(tmp_dir) or top_dir_name == ""):
+ if os.path.exists(tmp_dir) or top_dir_name == "":
tmp_dir = os.path.join(tmp_dir, top_dir_name)
break
return tmp_dir
@@ -115,8 +118,13 @@ class DefaultValue(object):
bash_cmd = ["bash"]
if not stdin_list:
stdin_list = []
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
# in python 3, the stand output and stand error is
# unicode, we must decode it before return
diff --git a/pkg/deploy/action/cantian/bind_cpu_config.py b/pkg/deploy/action/cantian/bind_cpu_config.py
index 87458d73cfbd598dac473a917db99de8e3460160..a9ea00c72d9687e13a6b4b74a61ec5f510c7914f 100644
--- a/pkg/deploy/action/cantian/bind_cpu_config.py
+++ b/pkg/deploy/action/cantian/bind_cpu_config.py
@@ -10,6 +10,7 @@ import grp
from log import LOGGER
from get_config_info import get_value
+
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from update_config import update_dbstor_conf
@@ -21,12 +22,15 @@ MES_MODULE = "MES_BIND_CPU"
MES_CPU_INFO = "MES_CPU_INFO"
CANTIAN_NUMA_INFO = "CANTIAN_NUMA_CPU_INFO"
MYSQL_NUMA_INFO = "MYSQL_NUMA_CPU_INFO"
-MODULE_LIST = [XNET_MODULE, MES_MODULE] # 目前只支持xnet, mes模块的绑核,后续扩展秩序添加在这里即可
+MODULE_LIST = [
+ XNET_MODULE,
+ MES_MODULE,
+] # 目前只支持xnet, mes模块的绑核,后续扩展秩序添加在这里即可
dbstor_file_module_dict = {
"XNET_CPU": XNET_MODULE,
"MES_CPU": MES_MODULE,
"IOD_CPU": "",
- "ULOG_CPU": ""
+ "ULOG_CPU": "",
} # 用于更新dbstor.ini文件,后续扩展加上
BIND_NUMA_NODE_NUM = 2 # 物理机时绑定的 NUMA 前n个节点数,只支持绑前2个
@@ -43,9 +47,9 @@ def cpu_info_to_cpu_list(cpu_list_str):
cpu_list = []
- for part in cpu_list_str.split(','):
- if '-' in part:
- start, end = map(int, part.split('-'))
+ for part in cpu_list_str.split(","):
+ if "-" in part:
+ start, end = map(int, part.split("-"))
cpu_list.extend(range(start, end + 1))
else:
cpu_list.append(int(part))
@@ -58,10 +62,12 @@ def cpu_list_to_cpu_info(cpu_list):
Converts a list of CPU IDs (either as integers or a comma-separated string) into a string in the format '1-3,5-6'.
"""
if isinstance(cpu_list, str):
- cpu_list = cpu_list.split(',')
+ cpu_list = cpu_list.split(",")
if not all(isinstance(cpu, (int, str)) for cpu in cpu_list):
- raise ValueError("cpu_list should contain integers or strings that can be converted to integers")
+ raise ValueError(
+ "cpu_list should contain integers or strings that can be converted to integers"
+ )
cpu_list = sorted(set(map(int, cpu_list)))
@@ -87,11 +93,12 @@ def cpu_list_to_cpu_info(cpu_list):
return ",".join(ranges)
+
def get_json_config(path):
"""
Retrieves the NUMA configuration from the specified JSON file.
"""
- with open(path, 'r', encoding='utf-8') as file:
+ with open(path, "r", encoding="utf-8") as file:
config_json = json.load(file)
return config_json
@@ -122,7 +129,9 @@ def write_json_config_file(path, config):
except KeyError:
LOGGER.error(f"User '{user_name}' or group '{group_name}' not found.")
except PermissionError:
- LOGGER.error("Permission denied: cannot change file ownership. Run as root or with sufficient privileges.")
+ LOGGER.error(
+ "Permission denied: cannot change file ownership. Run as root or with sufficient privileges."
+ )
class NumaConfigBase:
@@ -141,8 +150,13 @@ class NumaConfigBase:
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
pobj.stdin.write(cmd.encode())
@@ -190,7 +204,7 @@ class NumaConfigBase:
"""
Performs pre-checks before proceeding with NUMA configuration.
"""
- if platform.machine() != 'aarch64':
+ if platform.machine() != "aarch64":
LOGGER.info("System is not aarch64")
return
@@ -199,7 +213,7 @@ class NumaConfigBase:
raise Exception(err_msg)
def get_module_bind_cpu_list(self, module_thread_num):
- """ 获取模块绑核的 CPU 列表 """
+ """获取模块绑核的 CPU 列表"""
result_ranges = []
count = module_thread_num
@@ -208,14 +222,19 @@ class NumaConfigBase:
# 过滤掉已经绑定的 CPU
if self.bind_cpu_list:
available_cpu_for_binding_filtered = {
- numa_id: [cpu for cpu in available_cpu_list if cpu not in self.bind_cpu_list]
+ numa_id: [
+ cpu for cpu in available_cpu_list if cpu not in self.bind_cpu_list
+ ]
for numa_id, available_cpu_list in self.available_cpu_for_binding_dict.items()
}
else:
available_cpu_for_binding_filtered = self.available_cpu_for_binding_dict
while count > 0:
- for numa_id, available_cpu_list in available_cpu_for_binding_filtered.items():
+ for (
+ numa_id,
+ available_cpu_list,
+ ) in available_cpu_for_binding_filtered.items():
if numa_pointer[numa_id] < len(available_cpu_list):
result_ranges.append(available_cpu_list[numa_pointer[numa_id]])
numa_pointer[numa_id] += 1
@@ -224,7 +243,9 @@ class NumaConfigBase:
break
for numa_id, available_cpu_list in available_cpu_for_binding_filtered.items():
- self.available_cpu_for_binding_dict[numa_id] = available_cpu_list[numa_pointer[numa_id]:]
+ self.available_cpu_for_binding_dict[numa_id] = available_cpu_list[
+ numa_pointer[numa_id] :
+ ]
return result_ranges
@@ -260,7 +281,11 @@ class NumaConfigBase:
try:
with open(cantian_conf_file, "r+", encoding="utf-8") as file:
config = file.readlines()
- existing_keys = {line.split("=", maxsplit=1)[0].strip() for line in config if "=" in line}
+ existing_keys = {
+ line.split("=", maxsplit=1)[0].strip()
+ for line in config
+ if "=" in line
+ }
updated_keys = set()
removed_keys = set()
@@ -274,7 +299,10 @@ class NumaConfigBase:
key, value = line.split("=", maxsplit=1)
key = key.strip()
- if key in cantiand_cpu_info and cantiand_cpu_info[key] in ("-del", "-remove"):
+ if key in cantiand_cpu_info and cantiand_cpu_info[key] in (
+ "-del",
+ "-remove",
+ ):
removed_keys.add(key)
continue
@@ -294,9 +322,13 @@ class NumaConfigBase:
file.truncate()
if updated_keys:
- LOGGER.info(f"Updated keys in {cantian_conf_file}: {', '.join(updated_keys)}")
+ LOGGER.info(
+ f"Updated keys in {cantian_conf_file}: {', '.join(updated_keys)}"
+ )
if removed_keys:
- LOGGER.info(f"Removed keys in {cantian_conf_file}: {', '.join(removed_keys)}")
+ LOGGER.info(
+ f"Removed keys in {cantian_conf_file}: {', '.join(removed_keys)}"
+ )
except Exception as e:
LOGGER.error(f"Failed to update {cantian_conf_file}: {e}")
@@ -323,18 +355,22 @@ class PhysicalCpuConfig(NumaConfigBase):
return False
def init_cpu_info(self):
- """ 获取物理机上的所有 CPU 相关信息 """
+ """获取物理机上的所有 CPU 相关信息"""
# 获取物理机上所有 CPU 的列表
- ret_code, result, stderr = self._exec_popen('/usr/bin/lscpu | grep -i "On-line CPU(s) list"')
+ ret_code, result, stderr = self._exec_popen(
+ '/usr/bin/lscpu | grep -i "On-line CPU(s) list"'
+ )
if ret_code:
raise Exception(f"Failed to get CPU list, err: {stderr}")
- _result = result.strip().split(':')
+ _result = result.strip().split(":")
if len(_result) != 2:
raise Exception(f"NUMA info parsing failed, result: {result}")
self.all_cpu_list = cpu_info_to_cpu_list(_result[1].strip())
- ret_code, result, stderr = self._exec_popen('/usr/bin/lscpu | grep -i "NUMA node[0-9] CPU(s)"')
+ ret_code, result, stderr = self._exec_popen(
+ '/usr/bin/lscpu | grep -i "NUMA node[0-9] CPU(s)"'
+ )
if ret_code:
raise Exception(f"Failed to get NUMA node info, err: {stderr}")
@@ -343,7 +379,7 @@ class PhysicalCpuConfig(NumaConfigBase):
# 解析 lscpu 输出中的 NUMA node 信息
lines = result.strip().splitlines()
for line in lines:
- match = re.search(r'NUMA node(\d+) CPU\(s\):\s+([\d,\-]+)', line)
+ match = re.search(r"NUMA node(\d+) CPU\(s\):\s+([\d,\-]+)", line)
if match:
numa_id = int(match.group(1))
cpu_range_str = match.group(2)
@@ -355,15 +391,21 @@ class PhysicalCpuConfig(NumaConfigBase):
# 更新 available_cpu_for_binding_dict,移除 CPU IDs from 0 to 11
self.available_cpu_for_binding_dict = {}
for numa_id, cpu_list in list(self.numa_info_dict.items())[:BIND_NUMA_NODE_NUM]:
- valid_cpu_list = [cpu for cpu in cpu_list if cpu >= 12] # 只保留 ID >= 12 的 CPU
+ valid_cpu_list = [
+ cpu for cpu in cpu_list if cpu >= 12
+ ] # 只保留 ID >= 12 的 CPU
self.available_cpu_for_binding_dict[numa_id] = valid_cpu_list
if not self.available_cpu_for_binding_dict or any(
- not valid_cpu_list for valid_cpu_list in self.available_cpu_for_binding_dict.values()):
- raise Exception("No valid CPU binding available for any NUMA node or some NUMA nodes have no valid CPUs.")
+ not valid_cpu_list
+ for valid_cpu_list in self.available_cpu_for_binding_dict.values()
+ ):
+ raise Exception(
+ "No valid CPU binding available for any NUMA node or some NUMA nodes have no valid CPUs."
+ )
def update_bind_cpu_info(self):
- """ 获取绑定的 CPU 列表,支持手动配置 """
+ """获取绑定的 CPU 列表,支持手动配置"""
numa_config = get_json_config(CONFIG_PATH)
bind_cpu_list = []
@@ -376,22 +418,32 @@ class PhysicalCpuConfig(NumaConfigBase):
continue
if module_id_key in numa_config and numa_config[module_id_key]:
- manually_configured_cpus = cpu_info_to_cpu_list(numa_config[module_id_key])
+ manually_configured_cpus = cpu_info_to_cpu_list(
+ numa_config[module_id_key]
+ )
if self.check_cpu_list_invalid(manually_configured_cpus):
- err_msg = (f"Invalid CPU binding in {module_id_key}. "
- f"Cannot use CPUs outside the available range or in 0-5.")
+ err_msg = (
+ f"Invalid CPU binding in {module_id_key}. "
+ f"Cannot use CPUs outside the available range or in 0-5."
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
duplicate_cpus = set(manually_configured_cpus) & set(self.bind_cpu_list)
if duplicate_cpus:
- err_msg = (f"Currently bound CPUs: {self.bind_cpu_list}. "
- f"Conflict in CPU binding for {module_id_key}: CPUs {duplicate_cpus} are already bound.")
+ err_msg = (
+ f"Currently bound CPUs: {self.bind_cpu_list}. "
+ f"Conflict in CPU binding for {module_id_key}: CPUs {duplicate_cpus} are already bound."
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
- LOGGER.info(f"{module_id_key} is manually configured, skipping CPU binding generation.")
+ LOGGER.info(
+ f"{module_id_key} is manually configured, skipping CPU binding generation."
+ )
bind_cpu_list.extend(manually_configured_cpus)
- self.bind_cpu_dict[module_id_key] = ",".join(map(str, manually_configured_cpus))
+ self.bind_cpu_dict[module_id_key] = ",".join(
+ map(str, manually_configured_cpus)
+ )
self.bind_cpu_list = bind_cpu_list
continue
@@ -401,7 +453,9 @@ class PhysicalCpuConfig(NumaConfigBase):
try:
module_info = int(module_info)
if not (1 <= module_info <= 10):
- LOGGER.warning(f"Module {module_name} thread number out of range (1-10).")
+ LOGGER.warning(
+ f"Module {module_name} thread number out of range (1-10)."
+ )
numa_config[module_id_key] = ""
continue
except ValueError:
@@ -419,7 +473,9 @@ class PhysicalCpuConfig(NumaConfigBase):
获取 MYSQL_CPU_INFO:将 NUMA 节点内移除绑定的 CPU 后,剩余 CPU 转换为 NUMA 节点范围字符串形式
"""
# 深拷贝 NUMA 节点信息
- remaining_numa_info = {numa_id: list(cpu_list) for numa_id, cpu_list in self.numa_info_dict.items()}
+ remaining_numa_info = {
+ numa_id: list(cpu_list) for numa_id, cpu_list in self.numa_info_dict.items()
+ }
# 从 NUMA 节点中移除已绑定的 CPU
for cpu in self.bind_cpu_list:
@@ -460,7 +516,9 @@ class PhysicalCpuConfig(NumaConfigBase):
# 如果存在 MES_MODULE,也需要写入cantiand.ini
mes_module_key = f"{MES_MODULE}_ID"
if cpu_config_info.get(mes_module_key):
- cantiand_cpu_info[MES_CPU_INFO] = cpu_list_to_cpu_info(cpu_config_info[mes_module_key])
+ cantiand_cpu_info[MES_CPU_INFO] = cpu_list_to_cpu_info(
+ cpu_config_info[mes_module_key]
+ )
else:
cantiand_cpu_info[MES_CPU_INFO] = "-del"
@@ -476,17 +534,18 @@ class PhysicalCpuConfig(NumaConfigBase):
self.update_cantian_mysql_info()
-
class ContainerCpuConfig(NumaConfigBase):
def __init__(self):
super().__init__()
def update_cpu_info(self):
- """ 获取容器中的所有 CPU 列表 """
- if not os.path.exists('/sys/fs/cgroup/cpuset/cpuset.cpus'):
+ """获取容器中的所有 CPU 列表"""
+ if not os.path.exists("/sys/fs/cgroup/cpuset/cpuset.cpus"):
raise Exception("cpuset.cpus path does not exist in container.")
- ret_code, result, stderr = self._exec_popen('cat /sys/fs/cgroup/cpuset/cpuset.cpus')
+ ret_code, result, stderr = self._exec_popen(
+ "cat /sys/fs/cgroup/cpuset/cpuset.cpus"
+ )
if ret_code:
raise Exception(f"Failed to get CPU list in container, err: {stderr}")
@@ -521,7 +580,9 @@ class ContainerCpuConfig(NumaConfigBase):
module_info = int(module_info)
if not (1 <= module_info <= 10):
numa_config[module_id_key] = ""
- LOGGER.warning(f"Module {module_name} thread number out of range (1-10).")
+ LOGGER.warning(
+ f"Module {module_name} thread number out of range (1-10)."
+ )
continue
except ValueError:
numa_config[module_id_key] = ""
@@ -575,7 +636,9 @@ class ContainerCpuConfig(NumaConfigBase):
# 如果存在 MES_MODULE,也需要写入cantiand.ini
mes_module_key = f"{MES_MODULE}_ID"
if mes_module_key in cpu_config_info and cpu_config_info[mes_module_key]:
- cantiand_cpu_info[MES_CPU_INFO] = cpu_list_to_cpu_info(cpu_config_info[mes_module_key])
+ cantiand_cpu_info[MES_CPU_INFO] = cpu_list_to_cpu_info(
+ cpu_config_info[mes_module_key]
+ )
else:
cantiand_cpu_info[MES_CPU_INFO] = "-del"
@@ -605,7 +668,7 @@ class ConfigManager:
write_json_config_file(CONFIG_PATH, numa_config)
def update_cpu_config(self):
- """ 更新 CPU 配置 """
+ """更新 CPU 配置"""
if get_value("cantian_in_container") == "0":
manager = PhysicalCpuConfig()
else:
@@ -625,4 +688,4 @@ if __name__ == "__main__":
else:
config_manager.update_cpu_config()
except Exception as e:
- LOGGER.error(f"An unexpected error occurred: {str(e)}")
\ No newline at end of file
+ LOGGER.error(f"An unexpected error occurred: {str(e)}")
diff --git a/pkg/deploy/action/cantian/cantian_funclib.py b/pkg/deploy/action/cantian/cantian_funclib.py
index 8783d3883cd3a97b3535d4e85863e42e8b56ef8b..f733e66227b02a353a65aba8172890af893ee114 100644
--- a/pkg/deploy/action/cantian/cantian_funclib.py
+++ b/pkg/deploy/action/cantian/cantian_funclib.py
@@ -5,6 +5,7 @@
import sys
import grp
+
sys.dont_write_bytecode = True
try:
import os
@@ -30,15 +31,17 @@ except ImportError as import_err:
py_verion = platform.python_version()
-SYS_PATH = os.environ["PATH"].split(':')
+SYS_PATH = os.environ["PATH"].split(":")
class CommonValue(object):
"""
common value for some variables
"""
+
def __init__(self):
pass
+
# file mode
MAX_FILE_MODE = 640
MIN_FILE_MODE = 400
@@ -52,7 +55,7 @@ class CommonValue(object):
MID_DIRECTORY_MODE_GROUP = 740
MAX_DIRECTORY_MODE = 750
- KEY_DIRECTORY_MODE_STR = '0700'
+ KEY_DIRECTORY_MODE_STR = "0700"
MIN_FILE_PERMISSION = 0o400
MID_FILE_PERMISSION = 0o500
@@ -64,6 +67,7 @@ class DefaultConfigValue(object):
"""
default value for cantiand, cms, gss config
"""
+
def __init__(self):
pass
@@ -86,7 +90,11 @@ class DefaultConfigValue(object):
"BUFFER_LRU_SEARCH_THRE": 40,
"BUFFER_PAGE_CLEAN_RATIO": 0.1,
"_DEADLOCK_DETECT_INTERVAL": 1000,
- "INTERCONNECT_CHANNEL_NUM": 3 if (mes_type == "UC" or mes_type == "UC_RDMA") and deploy_mode != "file" else 32,
+ "INTERCONNECT_CHANNEL_NUM": (
+ 3
+ if (mes_type == "UC" or mes_type == "UC_RDMA") and deploy_mode != "file"
+ else 32
+ ),
"_UNDO_AUTO_SHRINK": "FALSE",
"_CHECKPOINT_TIMED_TASK_DELAY": 100,
"DBWR_PROCESSES": 8,
@@ -147,7 +155,11 @@ class DefaultConfigValue(object):
"INSTANCE_ID": 0,
"INTERCONNECT_PORT": "1601",
"LSNR_PORT": 1611,
- "INTERCONNECT_TYPE": mes_type if (mes_type == "UC" or mes_type == "UC_RDMA") and deploy_mode != "file" else "TCP",
+ "INTERCONNECT_TYPE": (
+ mes_type
+ if (mes_type == "UC" or mes_type == "UC_RDMA") and deploy_mode != "file"
+ else "TCP"
+ ),
"INTERCONNECT_BY_PROFILE": "FALSE",
"INSTANCE_NAME": "cantian",
"ENABLE_SYSDBA_LOGIN": "FALSE",
@@ -161,8 +173,8 @@ class DefaultConfigValue(object):
"LSNR_ADDR": "127.0.0.1",
"SHARED_PATH": "",
"ARCHIVE_DEST_1": "",
- "MAX_ARCH_FILES_SIZE" : "300G",
- "PAGE_CLEAN_MODE" : "ALL",
+ "MAX_ARCH_FILES_SIZE": "300G",
+ "PAGE_CLEAN_MODE": "ALL",
"ENABLE_IDX_KEY_LEN_CHECK": "FALSE",
"EMPTY_STRING_AS_NULL": "FALSE",
"_CHECKPOINT_MERGE_IO": "FALSE",
@@ -174,9 +186,9 @@ class DefaultConfigValue(object):
"SHM_MEMORY_REDUCTION_RATIO": "1",
"MYSQL_DEPLOY_GROUP_ID": mysql_group_id,
"CTC_MAX_INST_PER_NODE": 6,
- "SQL_STATISTIC_TIME_LIMIT" :1000000
+ "SQL_STATISTIC_TIME_LIMIT": 1000000,
}
-
+
CANTIAND_DBG_CONFIG = {
"DBWR_PROCESSES": 8,
"SESSIONS": 8192,
@@ -209,7 +221,7 @@ class DefaultConfigValue(object):
"CR_POOL_SIZE": "2G",
"CR_POOL_COUNT": 4,
"VARIANT_MEMORY_AREA_SIZE": "1G",
- "REPLAY_PRELOAD_PROCESSES":0,
+ "REPLAY_PRELOAD_PROCESSES": 0,
"LOG_REPLAY_PROCESSES": 64,
"_LOG_MAX_FILE_SIZE": "1G",
"RECYCLEBIN": "FALSE",
@@ -221,7 +233,11 @@ class DefaultConfigValue(object):
"INSTANCE_ID": 0,
"INTERCONNECT_PORT": "1601",
"LSNR_PORT": 1611,
- "INTERCONNECT_TYPE": mes_type if (mes_type == "UC" or mes_type == "UC_RDMA") and deploy_mode != "file" else "TCP",
+ "INTERCONNECT_TYPE": (
+ mes_type
+ if (mes_type == "UC" or mes_type == "UC_RDMA") and deploy_mode != "file"
+ else "TCP"
+ ),
"INTERCONNECT_BY_PROFILE": "FALSE",
"INSTANCE_NAME": "cantian",
"ENABLE_SYSDBA_LOGIN": "FALSE",
@@ -244,23 +260,23 @@ class DefaultConfigValue(object):
"KMC_KEY_FILES": None,
"SHM_MEMORY_REDUCTION_RATIO": "1",
"MYSQL_DEPLOY_GROUP_ID": mysql_group_id,
- "CTC_MAX_INST_PER_NODE": 6
+ "CTC_MAX_INST_PER_NODE": 6,
}
MES_CONFIG = {
"MES_SSL_SWITCH": mes_ssl_switch,
"MES_SSL_KEY_PWD": None,
"MES_SSL_CRT_KEY_PATH": "/opt/cantian/common/config/certificates",
- "KMC_KEY_FILES": f"({PRIMARY_KEYSTORE}, {STANDBY_KEYSTORE})"
+ "KMC_KEY_FILES": f"({PRIMARY_KEYSTORE}, {STANDBY_KEYSTORE})",
}
if deploy_mode == "dss":
- CANTIAND_CONFIG.update({
- "CTSTORE_INST_PATH": "UDS:/opt/cantian/dss/.dss_unix_d_socket"
- })
- CANTIAND_DBG_CONFIG.update({
- "CTSTORE_INST_PATH": "UDS:/opt/cantian/dss/.dss_unix_d_socket"
- })
-
+ CANTIAND_CONFIG.update(
+ {"CTSTORE_INST_PATH": "UDS:/opt/cantian/dss/.dss_unix_d_socket"}
+ )
+ CANTIAND_DBG_CONFIG.update(
+ {"CTSTORE_INST_PATH": "UDS:/opt/cantian/dss/.dss_unix_d_socket"}
+ )
+
CANTIAND_CONFIG.update(MES_CONFIG)
CANTIAND_DBG_CONFIG.update(MES_CONFIG)
@@ -320,8 +336,13 @@ def exec_popen(cmd):
:return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if py_verion[0] == "3":
stdout, stderr = pobj.communicate(cmd.encode(), timeout=1800)
@@ -377,59 +398,97 @@ def check_path(path_type_in):
def system_check_linux(path_len, path_type_in):
i = 0
- a_ascii, a_uppercase_ascii, blank_ascii, num0_ascii, num9_ascii, sep1_ascii, sep2_ascii,\
- sep3_ascii, sep4_ascii, sep5_ascii, z_ascii, z_uppercase_ascii = check_ascii()
+ (
+ a_ascii,
+ a_uppercase_ascii,
+ blank_ascii,
+ num0_ascii,
+ num9_ascii,
+ sep1_ascii,
+ sep2_ascii,
+ sep3_ascii,
+ sep4_ascii,
+ sep5_ascii,
+ z_ascii,
+ z_uppercase_ascii,
+ ) = check_ascii()
ascii_list = [blank_ascii, sep1_ascii, sep2_ascii, sep4_ascii, sep5_ascii]
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if not (a_ascii <= char_check <= z_ascii
- or a_uppercase_ascii <= char_check <= z_uppercase_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in ascii_list):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or a_uppercase_ascii <= char_check <= z_uppercase_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in ascii_list
+ ):
return False
return True
def system_check_windows(path_len, path_type_in):
i = 0
- a_ascii, a_uppercase_ascii, blank_ascii, num0_ascii, num9_ascii, sep1_ascii, sep2_ascii,\
- sep3_ascii, sep4_ascii, sep5_ascii, z_ascii, z_uppercase_ascii = check_ascii()
+ (
+ a_ascii,
+ a_uppercase_ascii,
+ blank_ascii,
+ num0_ascii,
+ num9_ascii,
+ sep1_ascii,
+ sep2_ascii,
+ sep3_ascii,
+ sep4_ascii,
+ sep5_ascii,
+ z_ascii,
+ z_uppercase_ascii,
+ ) = check_ascii()
ascii_list = [blank_ascii, sep1_ascii, sep2_ascii, sep3_ascii, sep4_ascii]
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if not (a_ascii <= char_check <= z_ascii
- or a_uppercase_ascii <= char_check <= z_uppercase_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in ascii_list):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or a_uppercase_ascii <= char_check <= z_uppercase_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in ascii_list
+ ):
return False
return True
def check_ascii():
- a_ascii = ord('a')
- z_ascii = ord('z')
- a_uppercase_ascii = ord('A')
- z_uppercase_ascii = ord('Z')
- num0_ascii = ord('0')
- num9_ascii = ord('9')
- blank_ascii = ord(' ')
+ a_ascii = ord("a")
+ z_ascii = ord("z")
+ a_uppercase_ascii = ord("A")
+ z_uppercase_ascii = ord("Z")
+ num0_ascii = ord("0")
+ num9_ascii = ord("9")
+ blank_ascii = ord(" ")
sep1_ascii = ord(os.sep)
- sep2_ascii = ord('_')
- sep3_ascii = ord(':')
- sep4_ascii = ord('-')
- sep5_ascii = ord('.')
+ sep2_ascii = ord("_")
+ sep3_ascii = ord(":")
+ sep4_ascii = ord("-")
+ sep5_ascii = ord(".")
ascii_list = [
- a_ascii, a_uppercase_ascii, blank_ascii, num0_ascii, num9_ascii, sep1_ascii, sep2_ascii,
- sep3_ascii, sep4_ascii, sep5_ascii, z_ascii, z_uppercase_ascii
+ a_ascii,
+ a_uppercase_ascii,
+ blank_ascii,
+ num0_ascii,
+ num9_ascii,
+ sep1_ascii,
+ sep2_ascii,
+ sep3_ascii,
+ sep4_ascii,
+ sep5_ascii,
+ z_ascii,
+ z_uppercase_ascii,
]
return ascii_list
def check_ssh_connection(ips):
- '''
+ """
check ssh connection without password, if success to
connect the node user trust to the node has be created
- '''
+ """
failed_ip = []
success_ip = []
ssh = get_abs_path("ssh")
@@ -441,30 +500,39 @@ def check_ssh_connection(ips):
cmd += "-o ServerAliveInterval=100 -o ServerAliveCountMax=36 "
cmd += "-n 'echo Last login'"
process = Execution(cmd)
- idx =\
- process.expect(['Permission denied',
- 'Last login',
- 'Are you sure you want to continue connecting',
- 'Password', 'ssh:', TimeoutException,
- EOFException], 60)
+ idx = process.expect(
+ [
+ "Permission denied",
+ "Last login",
+ "Are you sure you want to continue connecting",
+ "Password",
+ "ssh:",
+ TimeoutException,
+ EOFException,
+ ],
+ 60,
+ )
if idx == 0:
failed_ip.append(ip)
elif idx == 1:
success_ip.append(ip)
process.send_line("exit")
elif idx == 2:
- process.send_line('yes')
- idx = process.expect(['Permission denied', 'Last login',
- 'Password', 'ssh:'], 60)
+ process.send_line("yes")
+ idx = process.expect(
+ ["Permission denied", "Last login", "Password", "ssh:"], 60
+ )
if idx == 0:
failed_ip.append(ip)
elif idx == 1:
success_ip.append(ip)
process.send_line("exit")
elif idx == 2:
- raise Exception("Check ssh connection"
- " failed,check your ssh"
- " configure file please.")
+ raise Exception(
+ "Check ssh connection"
+ " failed,check your ssh"
+ " configure file please."
+ )
elif idx == 3:
raise Exception(str(process.context_buffer))
@@ -478,10 +546,12 @@ def check_ssh_connection(ips):
# password authentication method,
# so we must expect Password key word
# to avoid to wait to timeout
- raise Exception("Check ssh"
- " connection failed,"
- " check your ssh"
- " configure file please.")
+ raise Exception(
+ "Check ssh"
+ " connection failed,"
+ " check your ssh"
+ " configure file please."
+ )
elif idx == 4:
raise Exception(str(process.context_buffer))
@@ -495,6 +565,7 @@ class CommandTool(object):
"""
class for CommandTool
"""
+
def __init__(self, log):
self.log = log
@@ -507,16 +578,16 @@ class CommandTool(object):
raise SshToolException("Can't find bash command.")
def __execute(self, arg):
- '''
+ """
execute shell command by ssh to login remote host
arg - list for argument, ip address and shell command
- '''
+ """
ip = arg[0]
cmd = arg[1]
ssh_options = " -o ServerAliveInterval=100 "
ssh_options += " -o ServerAliveCountMax=36 "
cmd = "export TMOUT=0; %s" % cmd
- ssh_cmd = "ssh %s %s \"%s\"" % (ssh_options, ip, cmd)
+ ssh_cmd = 'ssh %s %s "%s"' % (ssh_options, ip, cmd)
return [ip, exec_popen(ssh_cmd)]
def __scp(self, arg):
@@ -534,14 +605,14 @@ class CommandTool(object):
def __interactive_input(self, process, ip, pw1, pw2):
- pw_str = 'Please enter password'
+ pw_str = "Please enter password"
self.log("Expect(%s) on: [%s]" % (ip, pw_str))
- process.expect(['Please enter password'])
+ process.expect(["Please enter password"])
self.log("Send(%s) password." % ip)
process.send_line(pw1)
if pw2:
self.log("Expect(%s) on: [%s]" % (ip, pw_str))
- process.expect(['Please enter password'])
+ process.expect(["Please enter password"])
self.log("Send(%s) password." % ip)
process.send_line(pw2)
@@ -567,9 +638,7 @@ class CommandTool(object):
if ip:
process = Execution("%s %s %s" % (self.ssh, ssh_options, ip))
pdict = user[1]
- self.log("ssh session info:\n%s %s %s" % (self.ssh,
- ssh_options,
- ip))
+ self.log("ssh session info:\n%s %s %s" % (self.ssh, ssh_options, ip))
else:
process = Execution("%s" % (self.bash))
self.log("bash session")
@@ -592,19 +661,19 @@ class CommandTool(object):
self.__interactive_input(process, ip, user[1], user[2])
self.log("Expect(%s) on: [Done, Upgrade Failed]" % ip)
- idx = process.expect(['Done', 'Upgrade Failed'], timeout=51200)
+ idx = process.expect(["Done", "Upgrade Failed"], timeout=51200)
if idx == 0:
self.log("Expect(%s) received Done." % ip)
- process.send_line('exit')
- return [ip, ('0', str(process.context_before))]
+ process.send_line("exit")
+ return [ip, ("0", str(process.context_before))]
self.log("Expect(%s) received Upgrade Failed." % ip)
- process.send_line('exit')
- return [ip, ('1', str(process.context_buffer))]
+ process.send_line("exit")
+ return [ip, ("1", str(process.context_buffer))]
except (TimeoutException, EOFException) as err:
self.log("Expect(%s) timeout." % ip)
if process:
- process.send_line('exit')
- return [ip, ('1', str(err) + '\n' + str(process.context_buffer))]
+ process.send_line("exit")
+ return [ip, ("1", str(err) + "\n" + str(process.context_buffer))]
def execute_local(self, cmd):
ret_code, output, errput = exec_popen(cmd)
@@ -612,27 +681,25 @@ class CommandTool(object):
return ret_code, output
def expect_execute(self, ip_cmd_map):
- '''
+ """
execute shell command with expect
- '''
+ """
try:
pool = Pool(len(ip_cmd_map))
result = pool.map(self.__expect_execute, ip_cmd_map)
return self.__parse(result)
except KeyboardInterrupt as e:
- #captured and processed by the caller
+ # captured and processed by the caller
raise
def execute_in_node(self, ip_cmd_map):
- '''
- '''
+ """ """
pool = Pool(len(ip_cmd_map))
result = pool.map(self.__execute, ip_cmd_map)
return self.__parse(result)
def scp_in_node(self, ip_dest_map):
- '''
- '''
+ """ """
pool = Pool(len(ip_dest_map))
result = pool.map(self.__scp, ip_dest_map)
return self.__parse(result)
@@ -647,23 +714,22 @@ class CommandTool(object):
success_node = []
failed_node = []
for tmp_rs in result:
- if str(rs[1][0]) != '0':
+ if str(rs[1][0]) != "0":
ret_code = 1
failed_node.append(tmp_rs)
success_node.append(tmp_rs)
return ret_code, success_node, failed_node
def expect_ctsql(self, ip_cmd_map):
- '''
+ """
expect execute ctsql and sql command
- '''
+ """
pool = Pool(len(ip_cmd_map))
result = pool.map(self.__expect_ctsql, ip_cmd_map)
return self.__parse(result)
def __expect_ctsql(self, arg):
- '''
- '''
+ """ """
ip = arg[0]
ctsql = arg[1]
sql = arg[2]
@@ -679,21 +745,22 @@ class CommandTool(object):
process.send_line(ctsql)
if passwd:
- process.expect(['Please enter password'])
+ process.expect(["Please enter password"])
process.send_line(passwd)
- process.expect(['SQL>'])
+ process.expect(["SQL>"])
process.send_line(sql)
- idx = process.expect(['rows fetched', 'Succeed', 'CT-', 'SQL>'],
- timeout=600)
+ idx = process.expect(
+ ["rows fetched", "Succeed", "CT-", "SQL>"], timeout=600
+ )
if idx == 0 or idx == 1:
- process.send_line('exit')
- return [ip, ('0', str(process.context_before))]
- process.send_line('exit')
- return [ip, '1', str(process.context_buffer)]
+ process.send_line("exit")
+ return [ip, ("0", str(process.context_before))]
+ process.send_line("exit")
+ return [ip, "1", str(process.context_buffer)]
except (TimeoutException, EOFException):
if process:
- process.send_line('exit')
- return [ip, ('1', str(process.context_buffer))]
+ process.send_line("exit")
+ return [ip, ("1", str(process.context_buffer))]
class ExpectException(Exception):
@@ -718,13 +785,16 @@ class Execution(object):
if py_verion[0] == "3":
ALLOWED_STRING_TYPES = (str,)
else:
- ALLOWED_STRING_TYPES = (type(b''), type(''), type(u''),)
+ ALLOWED_STRING_TYPES = (
+ type(b""),
+ type(""),
+ type(""),
+ )
LINE_SEPERATOR = os.linesep
- CTRLF = '\r\n'
+ CTRLF = "\r\n"
- def __init__(self, command, timeout=1800, max_read_size=4096,
- delimiter=None):
+ def __init__(self, command, timeout=1800, max_read_size=4096, delimiter=None):
self.matcher = None
self.context_before = None
@@ -748,11 +818,13 @@ class Execution(object):
try:
from termios import CEOF
from termios import CINTR
+
(self._INTR, self._EOF) = (CINTR, CEOF)
except ImportError:
try:
from termios import VEOF
from termios import VINTR
+
tmp_fp = sys.__stdin__.fileno()
self._INTR = ord(termios.tcgetattr(tmp_fp)[6][VINTR])
self._EOF = ord(termios.tcgetattr(tmp_fp)[6][VEOF])
@@ -763,13 +835,13 @@ class Execution(object):
@staticmethod
def _ascii(content):
if not isinstance(content, bytes):
- return content.encode('ascii')
+ return content.encode("ascii")
return content
@staticmethod
def _utf8(content):
if not isinstance(content, bytes):
- return content.encode('utf-8')
+ return content.encode("utf-8")
return content
def __del__(self):
@@ -781,29 +853,29 @@ class Execution(object):
def __str__(self):
tmp_s = list()
- tmp_s.append('%r' % self)
- tmp_s.append('after: %r' % self.context_after)
- tmp_s.append('pid: %s' % str(self.child_pid))
- tmp_s.append('child_fd: %s' % str(self.child_fd))
- tmp_s.append('closed: %s' % str(self.is_closed))
- tmp_s.append('timeout: %s' % str(self.timeout))
- tmp_s.append('delimiter: %s' % str(self.delimiter))
- tmp_s.append('maxReadSize: %s' % str(self.max_read_size))
- return '\n'.join(tmp_s)
+ tmp_s.append("%r" % self)
+ tmp_s.append("after: %r" % self.context_after)
+ tmp_s.append("pid: %s" % str(self.child_pid))
+ tmp_s.append("child_fd: %s" % str(self.child_fd))
+ tmp_s.append("closed: %s" % str(self.is_closed))
+ tmp_s.append("timeout: %s" % str(self.timeout))
+ tmp_s.append("delimiter: %s" % str(self.delimiter))
+ tmp_s.append("maxReadSize: %s" % str(self.max_read_size))
+ return "\n".join(tmp_s)
def _excute(self, command):
self.args = shlex.split(command)
if self.child_pid is not None:
- raise ExpectException('The pid member must be None.')
+ raise ExpectException("The pid member must be None.")
if self.command is None:
- raise ExpectException('The command member must not be None.')
+ raise ExpectException("The command member must not be None.")
try:
self.child_pid, self.child_fd = pty.fork()
except OSError as err: # pragma: no cover
- raise ExpectException('pty.fork() failed: ' + str(err)) from err
+ raise ExpectException("pty.fork() failed: " + str(err)) from err
if self.child_pid == pty.CHILD:
# child
@@ -843,19 +915,18 @@ class Execution(object):
# give kernel time to update process status.
time.sleep(self.close_delay)
if self.is_alive() and not self.terminate():
- raise ExpectException('Could not terminate the child.')
+ raise ExpectException("Could not terminate the child.")
self.child_fd = -1
self.is_closed = True
def set_echo(self, state):
- err_msg = ('method set_echo() may not be available on'
- ' this operating system.')
+ err_msg = "method set_echo() may not be available on" " this operating system."
try:
child_attr = termios.tcgetattr(self.child_fd)
except termios.error as e:
if e.args[0] == errno.EINVAL:
- raise IOError(e.args[0], '%s: %s.' % (e.args[1], err_msg)) from e
+ raise IOError(e.args[0], "%s: %s." % (e.args[1], err_msg)) from e
raise
if state:
@@ -867,12 +938,12 @@ class Execution(object):
termios.tcsetattr(self.child_fd, termios.TCSANOW, child_attr)
except IOError as e:
if e.args[0] == errno.EINVAL:
- raise IOError(e.args[0], '%s: %s.' % (e.args[1], err_msg)) from e
+ raise IOError(e.args[0], "%s: %s." % (e.args[1], err_msg)) from e
raise
def read_non_block(self, size=1, timeout=-1):
if self.is_closed:
- raise ValueError('I/O operation on closed file.')
+ raise ValueError("I/O operation on closed file.")
if timeout == -1:
timeout = self.timeout
@@ -882,16 +953,16 @@ class Execution(object):
rfds, _, _ = self.select([self.child_fd], [], [], 0)
if not rfds:
self.eof_flag = True
- raise EOFException('End Of File (EOF). Braindead platform.')
+ raise EOFException("End Of File (EOF). Braindead platform.")
rfds, _, _ = self.select([self.child_fd], [], [], timeout)
if not rfds:
if not self.is_alive():
self.eof_flag = True
- raise EOFException('Reach end of File (EOF).')
+ raise EOFException("Reach end of File (EOF).")
else:
- raise TimeoutException('Timeout exceeded.')
+ raise TimeoutException("Timeout exceeded.")
if self.child_fd in rfds:
try:
@@ -899,17 +970,19 @@ class Execution(object):
except OSError as e:
if e.args[0] == errno.EIO:
self.eof_flag = True
- raise EOFException('Reach End Of File (EOF). '
- 'Exception style platform.') from e
+ raise EOFException(
+ "Reach End Of File (EOF). " "Exception style platform."
+ ) from e
raise
- if child_data == b'':
+ if child_data == b"":
self.eof_flag = True
- raise EOFException('Reach end Of File (EOF).'
- ' Empty string style platform.')
+ raise EOFException(
+ "Reach end Of File (EOF)." " Empty string style platform."
+ )
return child_data
- raise ExpectException('Reached an unexpected state.')
+ raise ExpectException("Reached an unexpected state.")
# pragma: no cover
def read(self, size=-1, timeout=-1):
@@ -926,7 +999,7 @@ class Execution(object):
def _send(self, content):
return os.write(self.child_fd, content)
- def send_line(self, content=''):
+ def send_line(self, content=""):
send_count = self.send(content)
send_count = send_count + self.send(self.LINE_SEPERATOR)
return send_count
@@ -978,7 +1051,7 @@ class Execution(object):
if os.WIFEXITED(child_status) or os.WIFSIGNALED(child_status):
self.is_terminated = True
elif os.WIFSTOPPED(child_status):
- raise ExpectException('process already been stopped.')
+ raise ExpectException("process already been stopped.")
def wait_child_process(self, wait_pid_options):
try:
@@ -986,7 +1059,7 @@ class Execution(object):
except OSError as error:
# No child processes
if error.errno == errno.ECHILD:
- raise ExpectException('process already not exist.') from error
+ raise ExpectException("process already not exist.") from error
else:
raise error
return child_pid, child_status
@@ -1004,9 +1077,13 @@ class Execution(object):
def raise_pattern_type_error(self, pattern):
raise TypeError(
- 'got %s as pattern, must be one'
- ' of: %s, pexpect.EOFException, pexpect.TIMEOUTException'
- % (type(pattern), ', '.join([str(ast) for ast in self.ALLOWED_STRING_TYPES])))
+ "got %s as pattern, must be one"
+ " of: %s, pexpect.EOFException, pexpect.TIMEOUTException"
+ % (
+ type(pattern),
+ ", ".join([str(ast) for ast in self.ALLOWED_STRING_TYPES]),
+ )
+ )
def compile_pattern_list(self, pattern_list):
if not pattern_list:
@@ -1023,7 +1100,7 @@ class Execution(object):
pattern_list_temp.append(EOFException)
elif pattern is TimeoutException:
pattern_list_temp.append(TimeoutException)
- elif isinstance(pattern, type(re.compile(''))):
+ elif isinstance(pattern, type(re.compile(""))):
pattern_list_temp.append(pattern)
else:
self.raise_pattern_type_error(pattern)
@@ -1048,15 +1125,17 @@ class Execution(object):
while True:
match_index = re_searcher.search(context_buffer)
if match_index > -1:
- self.context_buffer = context_buffer[re_searcher.end:]
+ self.context_buffer = context_buffer[re_searcher.end :]
self.context_before = context_buffer[: re_searcher.start]
- self.context_after = context_buffer[re_searcher.start: re_searcher.end]
+ self.context_after = context_buffer[
+ re_searcher.start : re_searcher.end
+ ]
self.context_match = re_searcher.context_match
self.matchIndex = match_index
return self.matchIndex
# no match at this point
if (timeout is not None) and (timeout < 0):
- raise TimeoutException('Timeout exceeded in loop_expect().')
+ raise TimeoutException("Timeout exceeded in loop_expect().")
# not timed out, continue read
more_context = self.read_non_block(self.max_read_size, timeout)
time.sleep(0.0001)
@@ -1091,8 +1170,8 @@ class Execution(object):
raise
def set_win_size(self, rows, cols):
- win_size = getattr(termios, 'TIOCSWINSZ', -2146929561)
- s_size = struct.pack('HHHH', rows, cols, 0, 0)
+ win_size = getattr(termios, "TIOCSWINSZ", -2146929561)
+ s_size = struct.pack("HHHH", rows, cols, 0, 0)
fcntl.ioctl(self.fileno(), win_size, s_size)
def select(self, inputs, outputs, errputs, timeout=None):
@@ -1124,8 +1203,7 @@ class RESearcher(object):
self.start = None
self.context_match = None
self.end = None
- for index, pattern_item in zip(list(range(len(pattern_list))),
- pattern_list):
+ for index, pattern_item in zip(list(range(len(pattern_list))), pattern_list):
if pattern_item is EOFException:
self.eof_index = index
continue
@@ -1138,21 +1216,23 @@ class RESearcher(object):
result_list = list()
for index, pattern_item in self._searches:
try:
- result_list.append((index, ' %d: re.compile("%s")' %
- (index, pattern_item.pattern)))
+ result_list.append(
+ (index, ' %d: re.compile("%s")' % (index, pattern_item.pattern))
+ )
except UnicodeEncodeError:
- result_list.append((index, ' %d: re.compile(%r)' %
- (index, pattern_item.pattern)))
- result_list.append((-1, 'RESearcher:'))
+ result_list.append(
+ (index, " %d: re.compile(%r)" % (index, pattern_item.pattern))
+ )
+ result_list.append((-1, "RESearcher:"))
if self.eof_index >= 0:
- result_list.append((self.eof_index, ' %d: EOF' %
- self.eof_index))
+ result_list.append((self.eof_index, " %d: EOF" % self.eof_index))
if self.timeout_index >= 0:
- result_list.append((self.timeout_index, ' %d: TIMEOUT' %
- self.timeout_index))
+ result_list.append(
+ (self.timeout_index, " %d: TIMEOUT" % self.timeout_index)
+ )
result_list.sort()
s_result_list = list(zip(*result_list))[1]
- return '\n'.join(s_result_list)
+ return "\n".join(s_result_list)
def search(self, content):
first_match_index = None
diff --git a/pkg/deploy/action/cantian/cantian_install.py b/pkg/deploy/action/cantian/cantian_install.py
index b8f854e2655305718d1b557d41c3659f9f0b7b45..50cd85cedf7327b6b024be2a4ee907e9173fd2b4 100644
--- a/pkg/deploy/action/cantian/cantian_install.py
+++ b/pkg/deploy/action/cantian/cantian_install.py
@@ -28,13 +28,21 @@ try:
import collections
from datetime import datetime
from get_config_info import get_value
- from cantian_funclib import CommonValue, SingleNodeConfig, ClusterNode0Config, \
- ClusterNode1Config, DefaultConfigValue
+ from cantian_funclib import (
+ CommonValue,
+ SingleNodeConfig,
+ ClusterNode0Config,
+ ClusterNode1Config,
+ DefaultConfigValue,
+ )
from Common import CommonPrint
from options import Options
from exception import NormalException
from log import LOGGER
- sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "dbstor"))
+
+ sys.path.append(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "dbstor")
+ )
from kmc_adapter import CApiWrapper
PYTHON242 = "2.4.2"
@@ -43,10 +51,12 @@ try:
if gPyVersion < PYTHON3:
print_str = CommonPrint()
- print_str.common_log("This install script can not support python version: %s"
- % gPyVersion)
- raise Exception("This install script can not support python version: %s"
- % gPyVersion)
+ print_str.common_log(
+ "This install script can not support python version: %s" % gPyVersion
+ )
+ raise Exception(
+ "This install script can not support python version: %s" % gPyVersion
+ )
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
sys.dont_write_bytecode = True
@@ -64,12 +74,24 @@ MYSQLD = "mysqld"
USE_DBSTOR = ["combined", "dbstor"]
USE_LUN = ["dss"]
-INSTALL_SCRIPT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "installdb.sh")
-
-VALID_RUNNING_MODE = {CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST, CANTIAND_IN_CLUSTER,
- CANTIAND_WITH_MYSQL_IN_CLUSTER, MYSQLD}
-
-VALID_SINGLE_MYSQL_RUNNING_MODE = {CANTIAND_WITH_MYSQL_IN_CLUSTER, CANTIAND_WITH_MYSQL_ST, CANTIAND_WITH_MYSQL}
+INSTALL_SCRIPT = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "installdb.sh"
+)
+
+VALID_RUNNING_MODE = {
+ CANTIAND,
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ MYSQLD,
+}
+
+VALID_SINGLE_MYSQL_RUNNING_MODE = {
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL,
+}
CLUSTER_SIZE = 2 # default to 2, 4 node cluster mode need add parameter to specify this
@@ -79,10 +101,12 @@ PKG_DIR = os.path.abspath(os.path.join(INSTALL_SCPRIT_DIR, "../.."))
JS_CONF_FILE = os.path.join(PKG_DIR, "action", "cantian", "install_config.json")
CANTIAN_CONF_FILE = os.path.join("/opt/cantian/cantian", "cfg", "cantian_config.json")
CONFIG_PARAMS_FILE = os.path.join(PKG_DIR, "config", "deploy_param.json")
-CANTIAN_START_STATUS_FILE = os.path.join("/opt/cantian/cantian", "cfg", "start_status.json")
+CANTIAN_START_STATUS_FILE = os.path.join(
+ "/opt/cantian/cantian", "cfg", "start_status.json"
+)
CANTIAN_INSTALL_LOG_FILE = "/opt/cantian/log/cantian/cantian_deploy.log"
CANTIAND_INI_FILE = "/mnt/dbdata/local/cantian/tmp/data/cfg/cantiand.ini"
-CTSQL_INI_FILE = '/mnt/dbdata/local/cantian/tmp/data/cfg/*sql.ini'
+CTSQL_INI_FILE = "/mnt/dbdata/local/cantian/tmp/data/cfg/*sql.ini"
MYSQL_CODE_DIR = "/opt/cantian/image/cantian_connector/cantian-connector-mysql"
MYSQL_BIN_DIR = "/opt/cantian/mysql/install/mysql"
@@ -91,14 +115,36 @@ MYSQL_LOG_FILE = ""
DEPLOY_MODE = ""
g_opts = Options()
-CheckPathsInfo = collections.namedtuple('CheckPathsInfo', ['path_len', 'path_type_in', 'a_ascii',
- 'z_ascii', 'a_cap_ascii', 'z_cap_ascii',
- 'num0_ascii', 'num9_ascii', 'char_check_list'])
-
-UnitConversionInfo = collections.namedtuple('UnitConversionInfo', ['tmp_gb', 'tmp_mb', 'tmp_kb', 'key', 'value',
- 'sga_buff_size', 'temp_buffer_size',
- 'data_buffer_size', 'shared_pool_size',
- 'log_buffer_size'])
+CheckPathsInfo = collections.namedtuple(
+ "CheckPathsInfo",
+ [
+ "path_len",
+ "path_type_in",
+ "a_ascii",
+ "z_ascii",
+ "a_cap_ascii",
+ "z_cap_ascii",
+ "num0_ascii",
+ "num9_ascii",
+ "char_check_list",
+ ],
+)
+
+UnitConversionInfo = collections.namedtuple(
+ "UnitConversionInfo",
+ [
+ "tmp_gb",
+ "tmp_mb",
+ "tmp_kb",
+ "key",
+ "value",
+ "sga_buff_size",
+ "temp_buffer_size",
+ "data_buffer_size",
+ "shared_pool_size",
+ "log_buffer_size",
+ ],
+)
def check_kernel_parameter(para):
@@ -116,12 +162,14 @@ def check_invalid_symbol(para):
:param para: parameter's value
:return: NA
"""
- symbols = ["|", "&", "$", ">", "<", "\"", "'", "`"]
+ symbols = ["|", "&", "$", ">", "<", '"', "'", "`"]
for symbol in symbols:
if para.find(symbol) > -1:
print_str_1 = CommonPrint()
- print_str_1.common_log("There is invalid symbol \"%s\" in %s" % (symbol, para))
- raise Exception("There is invalid symbol \"%s\" in %s" % (symbol, para))
+ print_str_1.common_log(
+ 'There is invalid symbol "%s" in %s' % (symbol, para)
+ )
+ raise Exception('There is invalid symbol "%s" in %s' % (symbol, para))
def all_zero_addr_after_ping(node_ip):
@@ -132,7 +180,7 @@ def all_zero_addr_after_ping(node_ip):
"""
if not node_ip:
return False
- allowed_chars = set('0:.')
+ allowed_chars = set("0:.")
if set(node_ip).issubset(allowed_chars):
return True
else:
@@ -146,38 +194,46 @@ def check_path(path_type_in):
:return: weather validity
"""
path_len = len(path_type_in)
- a_ascii = ord('a')
- z_ascii = ord('z')
- a_cap_ascii = ord('A')
- z_cap_ascii = ord('Z')
- num0_ascii = ord('0')
- num9_ascii = ord('9')
- blank_ascii = ord(' ')
+ a_ascii = ord("a")
+ z_ascii = ord("z")
+ a_cap_ascii = ord("A")
+ z_cap_ascii = ord("Z")
+ num0_ascii = ord("0")
+ num9_ascii = ord("9")
+ blank_ascii = ord(" ")
sep1_ascii = ord(os.sep)
- sep2_ascii = ord('_')
- sep3_ascii = ord(':')
- sep4_ascii = ord('-')
- sep5_ascii = ord('.')
- char_check_list1 = [blank_ascii,
- sep1_ascii,
- sep2_ascii,
- sep4_ascii,
- sep5_ascii
- ]
-
- char_check_list2 = [blank_ascii,
- sep1_ascii,
- sep2_ascii,
- sep3_ascii,
- sep4_ascii
- ]
+ sep2_ascii = ord("_")
+ sep3_ascii = ord(":")
+ sep4_ascii = ord("-")
+ sep5_ascii = ord(".")
+ char_check_list1 = [blank_ascii, sep1_ascii, sep2_ascii, sep4_ascii, sep5_ascii]
+
+ char_check_list2 = [blank_ascii, sep1_ascii, sep2_ascii, sep3_ascii, sep4_ascii]
if CURRENT_OS == "Linux":
- get_check_path_linux_info = CheckPathsInfo(path_len, path_type_in, a_ascii, z_ascii, a_cap_ascii,
- z_cap_ascii, num0_ascii, num9_ascii, char_check_list1)
+ get_check_path_linux_info = CheckPathsInfo(
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ a_cap_ascii,
+ z_cap_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list1,
+ )
return check_path_linux(get_check_path_linux_info)
elif CURRENT_OS == "Windows":
- get_check_path_windows_info = CheckPathsInfo(path_len, path_type_in, a_ascii, z_ascii, a_cap_ascii,
- z_cap_ascii, num0_ascii, num9_ascii, char_check_list2)
+ get_check_path_windows_info = CheckPathsInfo(
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ a_cap_ascii,
+ z_cap_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list2,
+ )
return check_path_windows(get_check_path_windows_info)
else:
print_str_1 = CommonPrint()
@@ -186,27 +242,49 @@ def check_path(path_type_in):
def check_path_linux(get_check_path_linux_info):
- path_len, path_type_in, a_ascii, z_ascii, a_cap_ascii, \
- z_cap_ascii, num0_ascii, num9_ascii, char_check_list1 = get_check_path_linux_info
+ (
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ a_cap_ascii,
+ z_cap_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list1,
+ ) = get_check_path_linux_info
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (a_ascii <= char_check <= z_ascii
- or a_cap_ascii <= char_check <= z_cap_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in char_check_list1)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or a_cap_ascii <= char_check <= z_cap_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in char_check_list1
+ ):
return False
return True
def check_path_windows(get_check_path_windows_info):
- path_len, path_type_in, a_ascii, z_ascii, a_cap_ascii, \
- z_cap_ascii, num0_ascii, num9_ascii, char_check_list2 = get_check_path_windows_info
+ (
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ a_cap_ascii,
+ z_cap_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list2,
+ ) = get_check_path_windows_info
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (a_ascii <= char_check <= z_ascii
- or a_cap_ascii <= char_check <= z_cap_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in char_check_list2)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or a_cap_ascii <= char_check <= z_cap_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in char_check_list2
+ ):
return False
return True
@@ -220,8 +298,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
pobj.stdin.write(cmd.encode())
@@ -281,7 +364,9 @@ def check_platform():
pass
else:
print_str_2 = CommonPrint()
- print_str_2.common_log("This install script can not support %s platform." % CURRENT_OS)
+ print_str_2.common_log(
+ "This install script can not support %s platform." % CURRENT_OS
+ )
raise Exception("This install script can not support %s platform." % CURRENT_OS)
@@ -291,28 +376,32 @@ def usage():
def load_config_param(json_data):
- g_opts.node_id = int(json_data.get('node_id'))
- if json_data.get('link_type', '0').strip() == '0':
+ g_opts.node_id = int(json_data.get("node_id"))
+ if json_data.get("link_type", "0").strip() == "0":
g_opts.link_type = "TCP"
- elif json_data.get('link_type', '0').strip() == '1':
+ elif json_data.get("link_type", "0").strip() == "1":
g_opts.link_type = "RDMA"
- elif json_data.get('link_type', '0').strip() == '2':
+ elif json_data.get("link_type", "0").strip() == "2":
g_opts.link_type = "RDMA_1823"
- if json_data.get('cantian_in_container', 0) == '1':
+ if json_data.get("cantian_in_container", 0) == "1":
g_opts.cantian_in_container = True
global DEPLOY_MODE
DEPLOY_MODE = get_value("deploy_mode")
- g_opts.db_type = json_data.get('db_type', '').strip()
+ g_opts.db_type = json_data.get("db_type", "").strip()
g_opts.storage_dbstor_fs = json_data.get("storage_dbstor_fs", "").strip()
- g_opts.storage_share_fs = json_data.get('storage_share_fs', "").strip()
- g_opts.namespace = json_data.get('cluster_name', 'test1').strip()
- g_opts.share_logic_ip = json_data.get('share_logic_ip', '127.0.0.1').strip() if DEPLOY_MODE == "file" else None
- g_opts.archive_logic_ip = json_data.get('archive_logic_ip', '127.0.0.1').strip()
+ g_opts.storage_share_fs = json_data.get("storage_share_fs", "").strip()
+ g_opts.namespace = json_data.get("cluster_name", "test1").strip()
+ g_opts.share_logic_ip = (
+ json_data.get("share_logic_ip", "127.0.0.1").strip()
+ if DEPLOY_MODE == "file"
+ else None
+ )
+ g_opts.archive_logic_ip = json_data.get("archive_logic_ip", "127.0.0.1").strip()
g_opts.mes_type = json_data.get("mes_type", "UC").strip()
if DEPLOY_MODE == "file":
g_opts.mes_type = "TCP"
g_opts.mes_ssl_switch = json_data.get("mes_ssl_switch", False)
- storage_archive_fs = json_data.get('storage_archive_fs', "").strip()
+ storage_archive_fs = json_data.get("storage_archive_fs", "").strip()
g_opts.use_dbstor = DEPLOY_MODE in USE_DBSTOR
g_opts.use_gss = DEPLOY_MODE in USE_LUN
g_opts.archive_location = f"""location=/{f'mnt/dbdata/remote/archive_{storage_archive_fs}'
@@ -320,13 +409,13 @@ def load_config_param(json_data):
if DEPLOY_MODE in USE_LUN:
g_opts.archive_location = "location=+vg3/archive"
g_opts.dbstor_deploy_mode = DEPLOY_MODE == "dbstor"
- metadata_str = "metadata_" + json_data.get('storage_metadata_fs', '').strip()
+ metadata_str = "metadata_" + json_data.get("storage_metadata_fs", "").strip()
node_str = "node" + str(g_opts.node_id)
global MYSQL_DATA_DIR
MYSQL_DATA_DIR = os.path.join("/mnt/dbdata/remote", metadata_str, node_str)
global MYSQL_LOG_FILE
MYSQL_LOG_FILE = os.path.join(MYSQL_DATA_DIR, "mysql.log")
- g_opts.max_arch_files_size = json_data['MAX_ARCH_FILES_SIZE'].strip()
+ g_opts.max_arch_files_size = json_data["MAX_ARCH_FILES_SIZE"].strip()
g_opts.cluster_id = json_data.get("cluster_id", "0").strip()
@@ -334,8 +423,9 @@ def parse_parameter():
try:
# Parameters are passed into argv. After parsing, they are stored
# in opts as binary tuples. Unresolved parameters are stored in args.
- opts, args = getopt.getopt(sys.argv[1:],
- "h:s:t:", ["help", "sys_password=", "isencrept="])
+ opts, args = getopt.getopt(
+ sys.argv[1:], "h:s:t:", ["help", "sys_password=", "isencrept="]
+ )
if args:
print_str_1 = CommonPrint()
print_str_1.common_log("Parameter input error: " + str(args[0]))
@@ -344,22 +434,24 @@ def parse_parameter():
for _key, _value in opts:
if _key == "-s":
g_opts.password = _get_input("please input pwd: ").strip()
- g_opts.cert_encrypt_pwd = _get_input("please input cert_encrypt_pwd:").strip()
+ g_opts.cert_encrypt_pwd = _get_input(
+ "please input cert_encrypt_pwd:"
+ ).strip()
if _key == "-t":
- if _value.strip() == 'reserve':
+ if _value.strip() == "reserve":
g_opts.isencrept = False
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(JS_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(JS_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- g_opts.log_file = json_data['l_LOG_FILE'].strip() # -I
- g_opts.running_mode = json_data['M_RUNING_MODE'].strip() # -M
+ g_opts.log_file = json_data["l_LOG_FILE"].strip() # -I
+ g_opts.running_mode = json_data["M_RUNING_MODE"].strip() # -M
g_opts.ignore_pkg_check = True # -p
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
load_config_param(json_data)
g_opts.opts = opts
@@ -375,27 +467,39 @@ def is_mlnx():
"""
ret_code, stdout, stderr = _exec_popen("which ofed_info")
if ret_code:
- log("no ofed_info cmd found."
- "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr))
+ log(
+ "no ofed_info cmd found."
+ "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr)
+ )
return False
ret_code, stdout, stderr = _exec_popen("ofed_info -s")
if ret_code:
- log("exec ofed_info cmd failed."
- "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr))
+ log(
+ "exec ofed_info cmd failed."
+ "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr)
+ )
return False
- if 'MLNX_OFED_LINUX-5.5' in stdout:
+ if "MLNX_OFED_LINUX-5.5" in stdout:
LOGGER.info("Is mlnx 5.5")
return True
ret_code, os_arch, stderr = _exec_popen("uname -i")
- aarch_mlnx_version_list = ['OFED-internal-5.8-2.0.3', 'MLNX_OFED_LINUX-5.8', 'MLNX_OFED_LINUX-5.9']
- aarch_version_check_result = any(mlnx_version if mlnx_version in stdout else False
- for mlnx_version in aarch_mlnx_version_list)
+ aarch_mlnx_version_list = [
+ "OFED-internal-5.8-2.0.3",
+ "MLNX_OFED_LINUX-5.8",
+ "MLNX_OFED_LINUX-5.9",
+ ]
+ aarch_version_check_result = any(
+ mlnx_version if mlnx_version in stdout else False
+ for mlnx_version in aarch_mlnx_version_list
+ )
if os_arch == "aarch64" and aarch_version_check_result == True:
- log("Is mlnx 5.8~5.9"
- " ret_code : %s, stdout : %s, stderr : %s" % (ret_code, os_arch, stderr))
+ log(
+ "Is mlnx 5.8~5.9"
+ " ret_code : %s, stdout : %s, stderr : %s" % (ret_code, os_arch, stderr)
+ )
return True
LOGGER.info("Not mlnx 5.5")
@@ -436,21 +540,38 @@ def check_parameter():
def check_running_mode(print_str_7):
# Check running mode
- if len(g_opts.running_mode) == 0 or g_opts.running_mode.lower() not in VALID_RUNNING_MODE:
+ if (
+ len(g_opts.running_mode) == 0
+ or g_opts.running_mode.lower() not in VALID_RUNNING_MODE
+ ):
print_str_7.common_log("Invalid running mode: " + g_opts.running_mode)
raise Exception("Invalid running mode: " + g_opts.running_mode)
if g_opts.node_id not in [0, 1]:
print_str_7.common_log("Invalid node id: " + g_opts.node_id)
raise Exception("Invalid node id: " + g_opts.node_id)
- if g_opts.running_mode.lower() in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST] and g_opts.node_id == 1:
- print_str_7.common_log("Invalid node id: " + g_opts.node_id + ", this node id can only run in cluster mode")
- raise Exception("Invalid node id: " + g_opts.node_id + ", this node id can only run in cluster mode")
+ if (
+ g_opts.running_mode.lower()
+ in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST]
+ and g_opts.node_id == 1
+ ):
+ print_str_7.common_log(
+ "Invalid node id: "
+ + g_opts.node_id
+ + ", this node id can only run in cluster mode"
+ )
+ raise Exception(
+ "Invalid node id: "
+ + g_opts.node_id
+ + ", this node id can only run in cluster mode"
+ )
def check_logfile_path(print_str_7):
# Check the legitimacy of the path logfile
if not check_path(g_opts.log_file):
- print_str_7.common_log("Error: There is invalid character in specified log file.")
+ print_str_7.common_log(
+ "Error: There is invalid character in specified log file."
+ )
raise Exception("Error: There is invalid character in specified log file.")
@@ -459,11 +580,20 @@ def init_start_status_file():
try:
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
modes = stat.S_IWUSR | stat.S_IRUSR
- start_parameters = {'start_status': 'default', 'db_create_status': 'default', 'ever_started': False , 'mysql_init': 'default'}
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w') as load_fp:
+ start_parameters = {
+ "start_status": "default",
+ "db_create_status": "default",
+ "ever_started": False,
+ "mysql_init": "default",
+ }
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w"
+ ) as load_fp:
json.dump(start_parameters, load_fp)
except IOError as ex:
- print_str_7.common_log("Error: Can not create or write file: " + CANTIAN_START_STATUS_FILE)
+ print_str_7.common_log(
+ "Error: Can not create or write file: " + CANTIAN_START_STATUS_FILE
+ )
print_str_7.common_log(str(ex))
raise Exception(str(ex)) from ex
@@ -477,13 +607,17 @@ def check_log_path():
if not os.path.isdir(dir_path):
g_opts.log_file = ""
print_str_1 = CommonPrint()
- print_str_1.common_log("Specified log path: \"%s\" does not exist, "
- "choose the default path instead." % dir_path)
+ print_str_1.common_log(
+ 'Specified log path: "%s" does not exist, '
+ "choose the default path instead." % dir_path
+ )
elif not base_name:
g_opts.log_file = ""
print_str_2 = CommonPrint()
- print_str_2.common_log("Log file does not been specified, "
- "choose the default logfile instead.")
+ print_str_2.common_log(
+ "Log file does not been specified, "
+ "choose the default logfile instead."
+ )
def use_default_log_path():
@@ -526,7 +660,7 @@ def cantian_check_share_logic_ip_isvalid(ipname, nodeip):
def ping_execute(p_cmd):
cmd = "%s %s -i 1 -c 3 | grep ttl | wc -l" % (p_cmd, nodeip)
ret_code, stdout, stderr = _exec_popen(cmd)
- if ret_code or stdout != '3':
+ if ret_code or stdout != "3":
return False
return True
@@ -537,15 +671,20 @@ def cantian_check_share_logic_ip_isvalid(ipname, nodeip):
LOGGER.info("check nfs logic ip address or domain name.")
if not ping_execute("ping") and not ping_execute("ping6"):
- err_msg = "checked the node %s IP address or domain name failed: %s" % (ipname, nodeip)
+ err_msg = "checked the node %s IP address or domain name failed: %s" % (
+ ipname,
+ nodeip,
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
- LOGGER.info("checked the node %s IP address or domain name success: %s" % (ipname, nodeip))
+ LOGGER.info(
+ "checked the node %s IP address or domain name success: %s" % (ipname, nodeip)
+ )
def file_reader(file_path):
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
return file.read()
@@ -557,29 +696,42 @@ class Platform(object):
def __init__(self):
pass
- SUPPORTED_DISTS = ('suse', 'debian', 'fedora', 'redhat', 'centos',
- 'mandrake', 'mandriva', 'rocks', 'slackware',
- 'yellowdog', 'gentoo', 'unitedlinux',
- 'turbolinux', 'arch', 'mageia', 'openeuler',
- 'neokylin', 'euleros', 'kylin')
- UNIXCONFDIR = '/etc'
+ SUPPORTED_DISTS = (
+ "suse",
+ "debian",
+ "fedora",
+ "redhat",
+ "centos",
+ "mandrake",
+ "mandriva",
+ "rocks",
+ "slackware",
+ "yellowdog",
+ "gentoo",
+ "unitedlinux",
+ "turbolinux",
+ "arch",
+ "mageia",
+ "openeuler",
+ "neokylin",
+ "euleros",
+ "kylin",
+ )
+ UNIXCONFDIR = "/etc"
@staticmethod
- def _parse_release_file(firstline, version='', dst_id=''):
+ def _parse_release_file(firstline, version="", dst_id=""):
"""
function: parse first line of /etc/*release
input: string
output: tuple(string, string, string)
"""
- lsb_release_version_re = r'(.+) release ([\d.]+)[^(]*(?:\((.+)\))?'
- release_version_re = (r'([^0-9]+)(?: release )?([\d.]+)[^(]*'
- r'(?:\((.+)\))?')
+ lsb_release_version_re = r"(.+) release ([\d.]+)[^(]*(?:\((.+)\))?"
+ release_version_re = r"([^0-9]+)(?: release )?([\d.]+)[^(]*" r"(?:\((.+)\))?"
try:
- lsb_release_version = re.compile(lsb_release_version_re,
- re.ASCII)
- release_version = re.compile(release_version_re,
- re.ASCII)
+ lsb_release_version = re.compile(lsb_release_version_re, re.ASCII)
+ release_version = re.compile(release_version_re, re.ASCII)
except AttributeError:
lsb_release_version = re.compile(lsb_release_version_re)
release_version = re.compile(release_version_re)
@@ -602,7 +754,7 @@ class Platform(object):
if len(line_list) > 1:
dst_id = line_list[1]
- return '', version, dst_id
+ return "", version, dst_id
@staticmethod
def dist():
@@ -621,9 +773,9 @@ class Platform(object):
return "", "", ""
try:
- release_re = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
+ release_re = re.compile(r"(\w+)[-_](release|version)", re.ASCII)
except AttributeError:
- release_re = re.compile(r'(\w+)[-_](release|version)')
+ release_re = re.compile(r"(\w+)[-_](release|version)")
for etc_file in etc:
tmp_m = release_re.match(etc_file)
@@ -637,15 +789,17 @@ class Platform(object):
etc_file_name = os.path.join(Platform.UNIXCONFDIR, etc_file)
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(etc_file_name, flags, modes), 'r') as f:
+ with os.fdopen(os.open(etc_file_name, flags, modes), "r") as f:
firstline = f.readline()
except Exception as error:
LOGGER.info("read first line exception: %s." % str(error))
continue
# when euler, has centos-release
- if (_distname.lower() == "centos" and
- _distname.lower() not in firstline.lower()):
+ if (
+ _distname.lower() == "centos"
+ and _distname.lower() not in firstline.lower()
+ ):
continue
if _distname.lower() in Platform.SUPPORTED_DISTS:
@@ -686,8 +840,7 @@ class ParameterContainer(object):
index = self.ifiles.index(value)
para_index = 0
for _ in range(index + 1):
- para_index = self.parameters.index(self.IFILE,
- para_index + 1)
+ para_index = self.parameters.index(self.IFILE, para_index + 1)
self.parameters.pop(para_index)
self.ifiles.pop(index)
self.parameters.append(key)
@@ -726,7 +879,10 @@ class ParameterContainer(object):
def skip_execute_in_node_1():
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER] and g_opts.node_id == 1:
+ if (
+ g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]
+ and g_opts.node_id == 1
+ ):
return True
return False
@@ -738,7 +894,7 @@ def create_dir_if_needed(condition, directory_addr):
class Installer:
- """ This is Cantiand installer. """
+ """This is Cantiand installer."""
# Defining a constant identifies which step the installer failed to take.
# For roll back.
@@ -778,7 +934,7 @@ class Installer:
ipv_type = "ipv4"
def __init__(self, user, group):
- """ Constructor for the Installer class. """
+ """Constructor for the Installer class."""
LOGGER.info("Begin init...")
LOGGER.info("Installer runs on python version : " + gPyVersion)
@@ -885,14 +1041,14 @@ class Installer:
data = sql_content % (node0_redo, node1_redo)
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(sql_file, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(sql_file, flags, modes), "w", encoding="utf-8") as file:
file.write(data)
def get_decompress_tarname(self, tar_file):
- '''
+ """
decompress a.tar.gz, then get file name
:return:
- '''
+ """
# get real directory name in tar file
tars = tarfile.open(tar_file)
basename = tars.getnames()[0]
@@ -900,52 +1056,54 @@ class Installer:
return basename
def is_readable(self, file_name, user):
- '''
+ """
:param path:
:param user:
:return:
- '''
+ """
user_info = pwd.getpwnam(user)
uid = user_info.pw_uid
gid = user_info.pw_gid
s = os.stat(file_name)
mode = s[stat.ST_MODE]
return (
- ((s[stat.ST_UID] == uid) and (mode & stat.S_IRUSR > 0)) or
- ((s[stat.ST_GID] == gid) and (mode & stat.S_IRGRP > 0)) or
- (mode & stat.S_IROTH > 0)
+ ((s[stat.ST_UID] == uid) and (mode & stat.S_IRUSR > 0))
+ or ((s[stat.ST_GID] == gid) and (mode & stat.S_IRGRP > 0))
+ or (mode & stat.S_IROTH > 0)
)
def check_createdb_file(self):
- '''
+ """
check it is a file; user has read permission,
:return:
- '''
+ """
# check -f parameter
if self.option != self.INS_ALL:
- raise Exception("Error: -f parameter should be used without"
- " -O parameter ")
+ raise Exception(
+ "Error: -f parameter should be used without" " -O parameter "
+ )
# check it is a file
if not os.path.isfile(self.create_db_file):
- raise Exception("Error: %s does not exists or is not a file"
- " or permission is not right."
- % self.create_db_file)
+ raise Exception(
+ "Error: %s does not exists or is not a file"
+ " or permission is not right." % self.create_db_file
+ )
if not check_path(self.create_db_file):
- raise Exception("Error: %s file path invalid: "
- % self.create_db_file)
+ raise Exception("Error: %s file path invalid: " % self.create_db_file)
# if execute user is root, check common user has read permission
file_path = os.path.dirname(self.create_db_file)
# check path of create db sql file that user can cd
permission_ok, _ = self.check_permission(file_path, True)
if not permission_ok:
- raise Exception("Error: %s can not access %s"
- % (self.user, file_path))
+ raise Exception("Error: %s can not access %s" % (self.user, file_path))
# check create db file is readable for user
if not self.is_readable(self.create_db_file, self.user):
- raise Exception("Error: %s is not readable for user %s"
- % (self.create_db_file, self.user))
+ raise Exception(
+ "Error: %s is not readable for user %s"
+ % (self.create_db_file, self.user)
+ )
# change file to a realpath file
self.create_db_file = os.path.realpath(self.create_db_file)
@@ -955,11 +1113,21 @@ class Installer:
:return: cantiand config, cms config, gss config
"""
# 获取默认值
- if g_opts.running_mode in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST]:
+ if g_opts.running_mode in [
+ CANTIAND,
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ ]:
self.cantiand_configs = SingleNodeConfig.get_config(False)
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER] and g_opts.node_id == 0:
+ if (
+ g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]
+ and g_opts.node_id == 0
+ ):
self.cantiand_configs = ClusterNode0Config.get_config(False)
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER] and g_opts.node_id == 1:
+ if (
+ g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]
+ and g_opts.node_id == 1
+ ):
self.cantiand_configs = ClusterNode1Config.get_config(False)
def check_parameter(self):
@@ -995,12 +1163,15 @@ class Installer:
err_msg = "Parameter input error, need -U parameter."
LOGGER.error(err_msg)
raise Exception(err_msg)
- os.environ['cantiand_user'] = str(self.user)
+ os.environ["cantiand_user"] = str(self.user)
# User must be exist.
str_cmd = "id -u ${cantiand_user}"
ret_code, stdout, stderr = _exec_popen(str_cmd)
if ret_code:
- err_msg = "%s : no such user, command: %s ret_code : %s, stdout : %s, stderr : %s" % (self.user, str_cmd, ret_code, stdout, stderr)
+ err_msg = (
+ "%s : no such user, command: %s ret_code : %s, stdout : %s, stderr : %s"
+ % (self.user, str_cmd, ret_code, stdout, stderr)
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
if self.option == self.INS_ALL:
@@ -1032,12 +1203,14 @@ class Installer:
LOGGER.error(err_msg)
raise Exception(err_msg)
if len(self.cantiand_configs.get("LOG_HOME")) == 0:
- log_path=os.path.dirname(g_opts.log_file)
+ log_path = os.path.dirname(g_opts.log_file)
self.cantiand_configs["LOG_HOME"] = log_path
if len(self.cantiand_configs.get("SHARED_PATH")) == 0:
self.cantiand_configs["SHARED_PATH"] = os.path.join(self.data, "data")
if g_opts.use_dbstor:
- self.cantiand_configs["DBSTOR_DEPLOY_MODE"] = "1" if g_opts.dbstor_deploy_mode else "0"
+ self.cantiand_configs["DBSTOR_DEPLOY_MODE"] = (
+ "1" if g_opts.dbstor_deploy_mode else "0"
+ )
self.cantiand_configs["ARCHIVE_DEST_1"] = g_opts.archive_location
self.cantiand_configs["MAX_ARCH_FILES_SIZE"] = g_opts.max_arch_files_size
self.cantiand_configs["CLUSTER_ID"] = g_opts.cluster_id
@@ -1064,52 +1237,59 @@ class Installer:
LOGGER.info("End check parameters.")
def add_config_for_dbstor(self):
- self.cantiand_configs["CONTROL_FILES"] = "{0}, {1}, {2}".format(os.path.join(self.data, "data/ctrl1"),
- os.path.join(self.data, "data/ctrl2"),
- os.path.join(self.data, "data/ctrl3"))
+ self.cantiand_configs["CONTROL_FILES"] = "{0}, {1}, {2}".format(
+ os.path.join(self.data, "data/ctrl1"),
+ os.path.join(self.data, "data/ctrl2"),
+ os.path.join(self.data, "data/ctrl3"),
+ )
if g_opts.use_dbstor:
self.cantiand_configs["CONTROL_FILES"] = "(-ctrl1, -ctrl2, -ctrl3)"
self.cantiand_configs["SHARED_PATH"] = "-"
self.cantiand_configs["ENABLE_DBSTOR"] = "TRUE"
self.cantiand_configs["DBSTOR_NAMESPACE"] = g_opts.namespace
elif g_opts.use_gss:
- self.cantiand_configs["CONTROL_FILES"] = "(+vg1/ctrl1, +vg1/ctrl2, +vg1/ctrl3)"
+ self.cantiand_configs["CONTROL_FILES"] = (
+ "(+vg1/ctrl1, +vg1/ctrl2, +vg1/ctrl3)"
+ )
self.cantiand_configs["ENABLE_DBSTOR"] = "FALSE"
self.cantiand_configs["ENABLE_DSS"] = "TRUE"
self.cantiand_configs["SHARED_PATH"] = "+vg1"
else:
self.cantiand_configs["ENABLE_DBSTOR"] = "FALSE"
- self.cantiand_configs["SHARED_PATH"] = \
- '/mnt/dbdata/remote/storage_{}/data'.format(g_opts.storage_dbstor_fs)
+ self.cantiand_configs["SHARED_PATH"] = (
+ "/mnt/dbdata/remote/storage_{}/data".format(g_opts.storage_dbstor_fs)
+ )
def parse_key_and_value(self):
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- _value = json_data.get('cms_ip', '0').strip()
- self.cantiand_configs['INTERCONNECT_ADDR'] = _value
+ _value = json_data.get("cms_ip", "0").strip()
+ self.cantiand_configs["INTERCONNECT_ADDR"] = _value
node_addr = _value.split(";")[g_opts.node_id]
- self.cantiand_configs['LSNR_ADDR'] += "," + node_addr
- _value = json_data.get('mysql_metadata_in_cantian', 'True')
- self.cantiand_configs['MYSQL_METADATA_IN_CANTIAN'] = str(_value).upper()
- _value = json_data.get('mes_ssl_switch', 'False')
- self.cantiand_configs['MES_SSL_SWITCH'] = str(_value).upper()
+ self.cantiand_configs["LSNR_ADDR"] += "," + node_addr
+ _value = json_data.get("mysql_metadata_in_cantian", "True")
+ self.cantiand_configs["MYSQL_METADATA_IN_CANTIAN"] = str(_value).upper()
+ _value = json_data.get("mes_ssl_switch", "False")
+ self.cantiand_configs["MES_SSL_SWITCH"] = str(_value).upper()
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(JS_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(JS_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- self.install_path = json_data['R_INSTALL_PATH'].strip() # -R
- self.data = json_data['D_DATA_PATH'].strip() # -D
+ self.install_path = json_data["R_INSTALL_PATH"].strip() # -R
+ self.data = json_data["D_DATA_PATH"].strip() # -D
self.option = self.INS_ALL
for tmp_num in range(100):
- tmp_word = 'Z_KERNEL_PARAMETER' + str(tmp_num)
- if json_data.get(tmp_word, '') != '':
- _value = json_data.get(tmp_word, '').strip().split('=')
+ tmp_word = "Z_KERNEL_PARAMETER" + str(tmp_num)
+ if json_data.get(tmp_word, "") != "":
+ _value = json_data.get(tmp_word, "").strip().split("=")
self.cantiand_configs[_value[0].strip().upper()] = _value[1].strip()
if json_data["CT_CLUSTER_STRICT_CHECK"] in ["FALSE", "TRUE"]:
- self.cantiand_configs["CT_CLUSTER_STRICT_CHECK"] = json_data["CT_CLUSTER_STRICT_CHECK"]
+ self.cantiand_configs["CT_CLUSTER_STRICT_CHECK"] = json_data[
+ "CT_CLUSTER_STRICT_CHECK"
+ ]
if g_opts.password:
self.cantiand_configs["_SYS_PASSWORD"] = g_opts.password
if g_opts.cert_encrypt_pwd:
@@ -1126,9 +1306,8 @@ class Installer:
LOGGER.info("Checking runner.")
gid = os.getgid()
uid = os.getuid()
- log("Check runner user id and group id is : %s, %s"
- % (str(uid), str(gid)))
- if (gid != 0 and uid != 0):
+ log("Check runner user id and group id is : %s, %s" % (str(uid), str(gid)))
+ if gid != 0 and uid != 0:
err_msg = "Only user with root privilege can run this script"
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1155,12 +1334,14 @@ class Installer:
chown data and gcc dirs
:return:
"""
- cmd = "chown %s:%s -hR \"%s\";" % (self.user, self.group, self.data)
+ cmd = 'chown %s:%s -hR "%s";' % (self.user, self.group, self.data)
LOGGER.info("Change owner cmd: %s" % cmd)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
raise Exception(
- "chown to %s:%s return: %s%s%s" % (self.user, self.group, str(ret_code), os.linesep, stderr))
+ "chown to %s:%s return: %s%s%s"
+ % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ )
###########################################################################
# Is there a database installed by the user? If right, raise error
@@ -1179,12 +1360,15 @@ class Installer:
str_cmd = "echo ~"
ret_code, stdout, stderr = _exec_popen(str_cmd)
if ret_code:
- err_msg = "Can not get user home. ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr)
+ err_msg = (
+ "Can not get user home. ret_code : %s, stdout : %s, stderr : %s"
+ % (ret_code, stdout, stderr)
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
# Get the profile of user.
output = os.path.realpath(os.path.normpath(stdout))
- if (not check_path(output)):
+ if not check_path(output):
err_msg = "The user home directory is invalid."
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1196,7 +1380,7 @@ class Installer:
try:
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(self.user_profile, flags, modes), 'r') as _file:
+ with os.fdopen(os.open(self.user_profile, flags, modes), "r") as _file:
is_find = self.dealwith_gsdb(is_find, _file)
except IOError as ex:
err_msg = "Can not read user profile: " + str(ex)
@@ -1217,15 +1401,18 @@ class Installer:
def dealwith_gsdb(self, is_find, _file):
while True:
str_line = _file.readline()
- if (not str_line):
+ if not str_line:
break
str_line = str_line.strip()
- if (str_line.startswith("#")):
+ if str_line.startswith("#"):
continue
user_info = str_line.split()
self.dealwith_gsdb_data(user_info, str_line)
- if (len(user_info) >= 2 and user_info[0] == "export"
- and user_info[1].startswith("CTDB_HOME_BAK=") > 0):
+ if (
+ len(user_info) >= 2
+ and user_info[0] == "export"
+ and user_info[1].startswith("CTDB_HOME_BAK=") > 0
+ ):
is_find = True
break
else:
@@ -1234,9 +1421,12 @@ class Installer:
def dealwith_gsdb_data(self, user_info, str_line):
# deal with the CTDB_DATA with """
- if (len(user_info) >= 2 and user_info[0] == "export"
- and user_info[1].startswith('CTDB_DATA="') > 0):
- self.old_data_path = str_line[str_line.find("=") + 2:-1]
+ if (
+ len(user_info) >= 2
+ and user_info[0] == "export"
+ and user_info[1].startswith('CTDB_DATA="') > 0
+ ):
+ self.old_data_path = str_line[str_line.find("=") + 2 : -1]
self.old_data_path = os.path.normpath(self.old_data_path)
real_path = os.path.realpath(self.old_data_path)
if not check_path(real_path):
@@ -1249,12 +1439,15 @@ class Installer:
LOGGER.error(err_msg)
raise Exception(err_msg)
# deal with the CTDB_DATA path without """
- elif (len(user_info) >= 2 and user_info[0] == "export"
- and user_info[1].startswith("CTDB_DATA=") > 0):
- self.old_data_path = str_line[str_line.find("=") + 1:]
+ elif (
+ len(user_info) >= 2
+ and user_info[0] == "export"
+ and user_info[1].startswith("CTDB_DATA=") > 0
+ ):
+ self.old_data_path = str_line[str_line.find("=") + 1 :]
self.old_data_path = os.path.normpath(self.old_data_path)
real_path = os.path.realpath(self.old_data_path)
- if (not check_path(real_path)):
+ if not check_path(real_path):
err_msg = "The Path specified by CTDB_DATA is invalid."
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1288,8 +1481,11 @@ class Installer:
# check the user has write permission or not
test_file = os.path.join(original_path, "touch.tst")
- cmd = ("touch %s && chmod %s %s "
- % (test_file, CommonValue.KEY_FILE_MODE, test_file))
+ cmd = "touch %s && chmod %s %s " % (
+ test_file,
+ CommonValue.KEY_FILE_MODE,
+ test_file,
+ )
status, _, stderr = _exec_popen(cmd)
if status != 0:
@@ -1345,14 +1541,22 @@ class Installer:
# 98: Address already in use
# 95: Operation not supported
# 13: Permission denied
- if (int(error.errno) == 98 or int(error.errno) == 95
- or int(error.errno) == 13):
- log("Error: port %s has been used,the detail"
- " information is as follows:" % value)
+ if (
+ int(error.errno) == 98
+ or int(error.errno) == 95
+ or int(error.errno) == 13
+ ):
+ log(
+ "Error: port %s has been used,the detail"
+ " information is as follows:" % value
+ )
str_cmd = "netstat -unltp | grep %s" % value
ret_code, stdout, stderr = _exec_popen(str_cmd)
if ret_code:
- err_msg = "can not get detail information of the port, command: %s. ret_code : %s, stdout : %s, stderr : %s" % (str_cmd, ret_code, stdout, stderr)
+ err_msg = (
+ "can not get detail information of the port, command: %s. ret_code : %s, stdout : %s, stderr : %s"
+ % (str_cmd, ret_code, stdout, stderr)
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
LOGGER.error(str(stdout))
@@ -1361,7 +1565,9 @@ class Installer:
LOGGER.error("check port failed: " + str(ex))
raise Exception("check port failed: " + str(ex))
else:
- err_msg = "This install script can not support python version : " + gPyVersion
+ err_msg = (
+ "This install script can not support python version : " + gPyVersion
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1401,7 +1607,7 @@ class Installer:
output: NA
"""
LOGGER.info("check the node IP address.")
- if get_value("cantian_in_container") == '0':
+ if get_value("cantian_in_container") == "0":
try:
socket.inet_aton(node_ip)
self.ipv_type = "ipv4"
@@ -1410,7 +1616,10 @@ class Installer:
socket.inet_pton(socket.AF_INET6, node_ip)
self.ipv_type = "ipv6"
except socket.error:
- err_msg = "The invalid IP address : %s is not ipv4 or ipv6 format." % node_ip
+ err_msg = (
+ "The invalid IP address : %s is not ipv4 or ipv6 format."
+ % node_ip
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1423,8 +1632,11 @@ class Installer:
cmd = "%s %s -i 1 -c 3 | grep ttl | wc -l" % (ping_cmd, node_ip)
ret_code, stdout, stderr = _exec_popen(cmd)
- if ret_code or stdout != '3':
- err_msg = "The invalid IP address is %s. ret_code : %s, stdout : %s, stderr : %s" % (node_ip, ret_code, stdout, stderr)
+ if ret_code or stdout != "3":
+ err_msg = (
+ "The invalid IP address is %s. ret_code : %s, stdout : %s, stderr : %s"
+ % (node_ip, ret_code, stdout, stderr)
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1439,7 +1651,10 @@ class Installer:
ip_is_found = 0
if ret_code or not int(ip_is_found):
- err_msg = "The invalid IP address is %s. ret_code : %s, ip_is_found : %s, stderr : %s" % (node_ip, ret_code, ip_is_found, stderr)
+ err_msg = (
+ "The invalid IP address is %s. ret_code : %s, ip_is_found : %s, stderr : %s"
+ % (node_ip, ret_code, ip_is_found, stderr)
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1450,16 +1665,18 @@ class Installer:
# not meet the requirements of the database。
#########################################################################
def set_numa_config(self):
- if not os.path.exists('/usr/bin/lscpu'):
+ if not os.path.exists("/usr/bin/lscpu"):
LOGGER.info("Warning: lscpu path get error")
return
- ret_code, result, stderr = _exec_popen('/usr/bin/lscpu | grep -i "NUMA node(s)"')
+ ret_code, result, stderr = _exec_popen(
+ '/usr/bin/lscpu | grep -i "NUMA node(s)"'
+ )
if ret_code:
err_msg = "can not get numa node parameters, err: %s" % stderr
LOGGER.error(err_msg)
raise Exception(err_msg)
- _result = result.strip().split(':')
+ _result = result.strip().split(":")
if len(_result) != 2:
LOGGER.info("Warning: numa get error, result:%s" % result)
@@ -1474,14 +1691,16 @@ class Installer:
LOGGER.info("Warning: numa(s) size get error, result:%s" % result)
return
while numa_num < int(_result[1].strip()):
- err_code, ans, err_msg = _exec_popen('/usr/bin/lscpu | grep -i "NUMA node%s"' % numa_num)
- _ans = ans.strip().split(':')
+ err_code, ans, err_msg = _exec_popen(
+ '/usr/bin/lscpu | grep -i "NUMA node%s"' % numa_num
+ )
+ _ans = ans.strip().split(":")
if len(_ans) != 2:
LOGGER.info("Warning: numa node get error, ans:%s" % ans)
return
numa_str = _ans[1].strip()
- if platform.machine() == 'aarch64' and numa_num == 0:
- numa_id_str = _ans[1].strip().split('-')
+ if platform.machine() == "aarch64" and numa_num == 0:
+ numa_id_str = _ans[1].strip().split("-")
last_numa_id = numa_id_str[-1]
if int(last_numa_id) >= 16 and g_opts.use_dbstor:
numa_str = "0-1,6-11,16-" + str(last_numa_id)
@@ -1490,7 +1709,9 @@ class Installer:
if not numa_info.isspace():
shm_mysql_cpu_group_info = (numa_info[:-1] + ";") * numa_cpu_num
- self.cantiand_configs["SHM_MYSQL_CPU_GROUP_INFO"] = shm_mysql_cpu_group_info[:-1]
+ self.cantiand_configs["SHM_MYSQL_CPU_GROUP_INFO"] = (
+ shm_mysql_cpu_group_info[:-1]
+ )
self.cantiand_configs["SHM_CPU_GROUP_INFO"] = numa_info
def check_config_options(self):
@@ -1509,8 +1730,9 @@ class Installer:
shared_pool_size = 128 * tmp_mb
data_buffer_size = 128 * tmp_mb
temp_buffer_size = 32 * tmp_mb
- sga_buff_size = (log_buffer_size + shared_pool_size + data_buffer_size
- + temp_buffer_size)
+ sga_buff_size = (
+ log_buffer_size + shared_pool_size + data_buffer_size + temp_buffer_size
+ )
self.set_numa_config()
# parse the value of kernel parameters
@@ -1522,9 +1744,18 @@ class Installer:
check_kernel_parameter(key)
check_invalid_symbol(value)
# Unit conversion
- get_unit_conversion_info = UnitConversionInfo(tmp_gb, tmp_mb, tmp_kb, key, value,
- sga_buff_size, temp_buffer_size, data_buffer_size,
- shared_pool_size, log_buffer_size)
+ get_unit_conversion_info = UnitConversionInfo(
+ tmp_gb,
+ tmp_mb,
+ tmp_kb,
+ key,
+ value,
+ sga_buff_size,
+ temp_buffer_size,
+ data_buffer_size,
+ shared_pool_size,
+ log_buffer_size,
+ )
sga_buff_size = self.do_unit_conversion(get_unit_conversion_info)
except ValueError as ex:
LOGGER.error("check kernel parameter failed: " + str(ex))
@@ -1536,7 +1767,9 @@ class Installer:
# Check the ip address
for item in _list:
if len(_list) != 1 and all_zero_addr_after_ping(item):
- err_msg = "lsnr_addr contains all-zero ip, can not specify other ip."
+ err_msg = (
+ "lsnr_addr contains all-zero ip, can not specify other ip."
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
self.check_ip_is_vaild(item)
@@ -1559,15 +1792,22 @@ class Installer:
self.check_port(self.lsnr_port)
self.lsnr_port = int(self.lsnr_port)
# check sga_buff_size
- cmd = ("cat /proc/meminfo |grep -wE 'MemFree:|Buffers:|Cached:"
- "|SwapCached' |awk '{sum += $2};END {print sum}'")
+ cmd = (
+ "cat /proc/meminfo |grep -wE 'MemFree:|Buffers:|Cached:"
+ "|SwapCached' |awk '{sum += $2};END {print sum}'"
+ )
ret_code, cur_avi_memory, stderr = _exec_popen(cmd)
if ret_code:
- err_msg = "can not get shmmax parameters, command: %s, err: %s" % (cmd, stderr)
+ err_msg = "can not get shmmax parameters, command: %s, err: %s" % (
+ cmd,
+ stderr,
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
if sga_buff_size < 114 * tmp_mb:
- err_msg = "sga_buff_size should bigger than or equal to 114*MB, please check it!"
+ err_msg = (
+ "sga_buff_size should bigger than or equal to 114*MB, please check it!"
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
try:
@@ -1604,11 +1844,17 @@ class Installer:
if mode == "decrypted":
ret_pwd = kmc_adapter.decrypt(plain_text)
except Exception as error:
- raise Exception("Failed to %s password of user [sys]. Error: %s" % (mode, str(error))) from error
+ raise Exception(
+ "Failed to %s password of user [sys]. Error: %s" % (mode, str(error))
+ ) from error
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
return ret_pwd
@@ -1616,21 +1862,27 @@ class Installer:
def decrypt_db_passwd():
file_list = glob.glob(CTSQL_INI_FILE)
ctsql_ini_data = file_reader(file_list[0])
- encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find('=') + 1:].strip()
+ encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find("=") + 1 :].strip()
mode = "decrypted"
g_opts.db_passwd = Installer.kmc_resovle_password(mode, encrypt_pwd)
@staticmethod
def set_cms_ini(passwd):
cms_conf = "/opt/cantian/cms/cfg/cms.ini"
- str_cmd = f"sed -i '/_CMS_MES_SSL_KEY_PWD = None/d' {cms_conf}" \
- f"&& echo '_CMS_MES_SSL_KEY_PWD = {passwd}' >> {cms_conf}"
+ str_cmd = (
+ f"sed -i '/_CMS_MES_SSL_KEY_PWD = None/d' {cms_conf}"
+ f"&& echo '_CMS_MES_SSL_KEY_PWD = {passwd}' >> {cms_conf}"
+ )
LOGGER.info("Copy config files cmd: " + str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
- LOGGER.error("update cms.ini failed: " + str(ret_code) + os.linesep + stderr)
- raise Exception("update cms.ini failed: " + str(ret_code) + os.linesep + stderr)
-
+ LOGGER.error(
+ "update cms.ini failed: " + str(ret_code) + os.linesep + stderr
+ )
+ raise Exception(
+ "update cms.ini failed: " + str(ret_code) + os.linesep + stderr
+ )
+
@staticmethod
def set_mes_passwd(passwd):
file_path = "/opt/cantian/common/config/certificates/mes.pass"
@@ -1648,29 +1900,37 @@ class Installer:
elif key == "LSNR_PORT":
self.lsnr_port = value
elif key == "ENABLE_SYSDBA_LOGIN":
- self.enable_sysdba_login = Installer.check_pare_bool_value(
- key, value)
+ self.enable_sysdba_login = Installer.check_pare_bool_value(key, value)
elif key == "REPL_AUTH":
- self.repl_auth = Installer.check_pare_bool_value(
- key, value)
+ self.repl_auth = Installer.check_pare_bool_value(key, value)
elif key == "REPL_SCRAM_AUTH":
- self.repl_scram_auth = Installer.check_pare_bool_value(
- key, value)
+ self.repl_scram_auth = Installer.check_pare_bool_value(key, value)
elif key == "ENABLE_ACCESS_DC":
- self.enable_access_dc = Installer.check_pare_bool_value(
- key, value)
+ self.enable_access_dc = Installer.check_pare_bool_value(key, value)
elif key == "REPLACE_VERIFY_PASSWORD":
- self.replace_password_verify = Installer.check_pare_bool_value(
- key, value)
+ self.replace_password_verify = Installer.check_pare_bool_value(key, value)
else:
return
def do_unit_conversion(self, get_unit_conversion_info):
- tmp_gb, tmp_mb, tmp_kb, key, value, \
- sga_buff_size, temp_buffer_size, data_buffer_size, \
- shared_pool_size, log_buffer_size = get_unit_conversion_info
- if key in ["TEMP_BUFFER_SIZE", "DATA_BUFFER_SIZE",
- "SHARED_POOL_SIZE", "LOG_BUFFER_SIZE"]:
+ (
+ tmp_gb,
+ tmp_mb,
+ tmp_kb,
+ key,
+ value,
+ sga_buff_size,
+ temp_buffer_size,
+ data_buffer_size,
+ shared_pool_size,
+ log_buffer_size,
+ ) = get_unit_conversion_info
+ if key in [
+ "TEMP_BUFFER_SIZE",
+ "DATA_BUFFER_SIZE",
+ "SHARED_POOL_SIZE",
+ "LOG_BUFFER_SIZE",
+ ]:
if value[0:-1].isdigit() and value[-1:] in ["G", "M", "K"]:
unit_map = {
"G": tmp_gb,
@@ -1699,49 +1959,75 @@ class Installer:
output: NA
"""
# change install path privilege to 700
- str_cmd = "chmod %s %s -R" % (CommonValue.KEY_DIRECTORY_MODE,
- self.install_path)
+ str_cmd = "chmod %s %s -R" % (CommonValue.KEY_DIRECTORY_MODE, self.install_path)
# chmod add-ons/ file 500
- str_cmd += ("&& find '%s'/add-ons -type f | xargs chmod %s "
- % (self.install_path, CommonValue.MID_FILE_MODE))
+ str_cmd += "&& find '%s'/add-ons -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.MID_FILE_MODE,
+ )
# chmod admin/ file 600
- str_cmd += ("&& find '%s'/admin -type f | xargs chmod %s "
- % (self.install_path, CommonValue.KEY_FILE_MODE))
+ str_cmd += "&& find '%s'/admin -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.KEY_FILE_MODE,
+ )
# chmod admin/scripts/fetch_cls_stat.py file 550
- str_cmd += ("&& find '%s'/admin -type f | grep fetch_cls_stat.py | xargs chmod %s "
- % (self.install_path, CommonValue.MAX_FILE_MODE))
+ str_cmd += (
+ "&& find '%s'/admin -type f | grep fetch_cls_stat.py | xargs chmod %s "
+ % (self.install_path, CommonValue.MAX_FILE_MODE)
+ )
# chmod lib/ file 500
- str_cmd += ("&& find '%s'/lib -type f | xargs chmod %s"
- % (self.install_path, CommonValue.MID_FILE_MODE))
+ str_cmd += "&& find '%s'/lib -type f | xargs chmod %s" % (
+ self.install_path,
+ CommonValue.MID_FILE_MODE,
+ )
# chmod bin/ file 500
- str_cmd += ("&& find '%s'/bin -type f | xargs chmod %s "
- % (self.install_path, CommonValue.MID_FILE_MODE))
- str_cmd += ("&& find '%s'/cfg -type f | xargs chmod %s "
- % (self.install_path, CommonValue.KEY_FILE_MODE))
+ str_cmd += "&& find '%s'/bin -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.MID_FILE_MODE,
+ )
+ str_cmd += "&& find '%s'/cfg -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.KEY_FILE_MODE,
+ )
package_xml = os.path.join(self.install_path, "package.xml")
if os.path.exists(package_xml):
- str_cmd += ("&& chmod %s '%s'/package.xml"
- % (CommonValue.MIN_FILE_MODE, self.install_path))
+ str_cmd += "&& chmod %s '%s'/package.xml" % (
+ CommonValue.MIN_FILE_MODE,
+ self.install_path,
+ )
LOGGER.info("Change app permission cmd: %s" % str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
self.failed_pos = self.DECOMPRESS_BIN_FAILED
- err_msg = "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE + str(ret_code) + os.linesep + stderr
+ err_msg = (
+ "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE
+ + str(ret_code)
+ + os.linesep
+ + stderr
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
# 管控面使用
- str_cmd = "chmod %s %s " % (CommonValue.MAX_DIRECTORY_MODE,
- self.install_path)
- str_cmd += "&& chmod %s %s " % (CommonValue.MAX_DIRECTORY_MODE,
- os.path.join(self.install_path, "admin"))
- str_cmd += "&& chmod %s %s" % (CommonValue.MAX_DIRECTORY_MODE,
- os.path.join(self.install_path, "admin", "scripts"))
+ str_cmd = "chmod %s %s " % (CommonValue.MAX_DIRECTORY_MODE, self.install_path)
+ str_cmd += "&& chmod %s %s " % (
+ CommonValue.MAX_DIRECTORY_MODE,
+ os.path.join(self.install_path, "admin"),
+ )
+ str_cmd += "&& chmod %s %s" % (
+ CommonValue.MAX_DIRECTORY_MODE,
+ os.path.join(self.install_path, "admin", "scripts"),
+ )
LOGGER.info("Change app server/admin/scripts dir for om. cmd: %s" % str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
self.failed_pos = self.DECOMPRESS_BIN_FAILED
- err_msg = "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE + str(ret_code) + os.linesep + stderr
+ err_msg = (
+ "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE
+ + str(ret_code)
+ + os.linesep
+ + stderr
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1752,15 +2038,15 @@ class Installer:
"""
# eg 'length in [8-64]'
if len(passwd) < shortest_len or len(passwd) > 64:
- raise Exception("The length of password must be %s to 64."
- % shortest_len)
+ raise Exception("The length of password must be %s to 64." % shortest_len)
# Can't save with user name
if passwd == self.user:
raise Exception("Error: Password can't be the same as username.")
elif passwd == self.user[::-1]:
- raise Exception("Error: Password cannot be the same as username "
- "in reverse order")
+ raise Exception(
+ "Error: Password cannot be the same as username " "in reverse order"
+ )
upper_cases = set("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
lower_cases = set("abcdefghijklmnopqrstuvwxyz")
@@ -1774,16 +2060,19 @@ class Installer:
if passwd_set & cases:
types += 1
if types < 3:
- raise Exception("Error: Password must contains at least three"
- " different types of characters.")
+ raise Exception(
+ "Error: Password must contains at least three"
+ " different types of characters."
+ )
# Only can contains enumerated cases
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- raise Exception("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
+ raise Exception(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
def get_new_passwd(self, pw_prompt, user_prompt, shortest_len):
"""Get new passwd"""
@@ -1799,7 +2088,9 @@ class Installer:
continue
print_str_3 = CommonPrint()
- print_str_3.common_log("Please enter %s of %s again: " % (pw_prompt, user_prompt))
+ print_str_3.common_log(
+ "Please enter %s of %s again: " % (pw_prompt, user_prompt)
+ )
new_passwd2 = getpass.getpass()
if new_passwd == new_passwd2:
@@ -1821,9 +2112,11 @@ class Installer:
# 0. "_SYS_PASSWORD" can't be set when ENABLE_SYSDBA_LOGIN is False
sys_password = self.cantiand_configs["_SYS_PASSWORD"]
if not self.enable_sysdba_login and len(sys_password) != 0:
- raise Exception("Can't use _SYS_PASSWORD to set the password of "
- "user [SYS] in the installation script when "
- "ENABLE_SYSDBA_LOGIN is False.")
+ raise Exception(
+ "Can't use _SYS_PASSWORD to set the password of "
+ "user [SYS] in the installation script when "
+ "ENABLE_SYSDBA_LOGIN is False."
+ )
# 1. Get passed from parameter -C
# Set passwd of SYS in cantiand.ini by parameter -C
@@ -1834,9 +2127,8 @@ class Installer:
if sys.stdin.isatty():
# If not pipe content, get passwd by interactive input
g_opts.db_passwd = self.get_new_passwd(
- pw_prompt="database password",
- user_prompt="user [SYS]",
- shortest_len=8)
+ pw_prompt="database password", user_prompt="user [SYS]", shortest_len=8
+ )
else:
try:
# Get passwd from pipe
@@ -1848,25 +2140,45 @@ class Installer:
def install_xnet_lib(self):
if is_rdma_startup():
- str_cmd = "cp -rf %s/add-ons/mlnx/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -rf %s/add-ons/mlnx/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
elif is_rdma_1823_startup():
- str_cmd = "cp -rf %s/add-ons/1823/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -rf %s/add-ons/1823/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
else:
- str_cmd = "cp -rf %s/add-ons/nomlnx/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -rf %s/add-ons/nomlnx/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
LOGGER.info("Install xnet lib cmd: " + str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
- LOGGER.error("Install xnet lib return: " + str(ret_code) + os.linesep + stderr)
- raise Exception("Install xnet lib return: " + str(ret_code) + os.linesep + stderr)
+ LOGGER.error(
+ "Install xnet lib return: " + str(ret_code) + os.linesep + stderr
+ )
+ raise Exception(
+ "Install xnet lib return: " + str(ret_code) + os.linesep + stderr
+ )
def install_kmc_lib(self):
- str_cmd = "cp -rf %s/add-ons/kmc_shared/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -rf %s/add-ons/kmc_shared/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
LOGGER.info("install kmc lib cmd:" + str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
- LOGGER.error("Install kmc lib return: " + str(ret_code) + os.linesep + stderr)
- raise Exception("Install kmc lib return: " + str(ret_code) + os.linesep + stderr)
+ LOGGER.error(
+ "Install kmc lib return: " + str(ret_code) + os.linesep + stderr
+ )
+ raise Exception(
+ "Install kmc lib return: " + str(ret_code) + os.linesep + stderr
+ )
#########################################################################
# Unzip the installation files to the installation directory.
@@ -1876,36 +2188,72 @@ class Installer:
Unzip the installation files to the installation directory.
:return: NA
"""
- self.run_file = "/opt/cantian/image/cantian_connector/CantianKernel/Cantian-DATABASE-CENTOS-64bit/" \
- "Cantian-RUN-CENTOS-64bit.tar.gz"
+ self.run_file = (
+ "/opt/cantian/image/cantian_connector/CantianKernel/Cantian-DATABASE-CENTOS-64bit/"
+ "Cantian-RUN-CENTOS-64bit.tar.gz"
+ )
self.run_pkg_name = self.get_decompress_tarname(self.run_file)
LOGGER.info("Decompressing run file.")
if g_opts.use_dbstor:
- os.makedirs("%s/dbstor/conf/dbs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/conf/infra/config" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/data/logs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/data/ftds" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
+ os.makedirs(
+ "%s/dbstor/conf/dbs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
+ os.makedirs(
+ "%s/dbstor/conf/infra/config" % self.data,
+ CommonValue.KEY_DIRECTORY_PERMISSION,
+ )
+ os.makedirs(
+ "%s/dbstor/data/logs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
+ os.makedirs(
+ "%s/dbstor/data/ftds" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
if is_rdma_startup() or is_rdma_1823_startup():
- str_cmd = "cp %s/cfg/node_config_rdma.xml %s/dbstor/conf/infra/config/node_config.xml" % (
- self.install_path, self.data)
+ str_cmd = (
+ "cp %s/cfg/node_config_rdma.xml %s/dbstor/conf/infra/config/node_config.xml"
+ % (self.install_path, self.data)
+ )
else:
- str_cmd = "cp %s/cfg/node_config_tcp.xml %s/dbstor/conf/infra/config/node_config.xml" % (
- self.install_path, self.data)
-
- str_cmd += " && cp %s/cfg/osd.cfg %s/dbstor/conf/infra/config/osd.cfg" % (self.install_path, self.data)
- str_cmd += " && cp /opt/cantian/dbstor/tools/dbstor_config.ini %s/dbstor/conf/dbs/" % (self.data)
- str_cmd += " && echo 'DBSTOR_OWNER_NAME = cantian' >> %s/dbstor/conf/dbs/dbstor_config.ini" % (self.data)
- str_cmd += " && sed -i '/^\s*$/d' %s/dbstor/conf/dbs/dbstor_config.ini" % (self.data)
- str_cmd += " && chown -R %s:%s %s/dbstor" % (self.user, self.group, self.data)
- str_cmd += " && chmod 640 %s/dbstor/conf/dbs/dbstor_config.ini" % (self.data)
+ str_cmd = (
+ "cp %s/cfg/node_config_tcp.xml %s/dbstor/conf/infra/config/node_config.xml"
+ % (self.install_path, self.data)
+ )
+
+ str_cmd += " && cp %s/cfg/osd.cfg %s/dbstor/conf/infra/config/osd.cfg" % (
+ self.install_path,
+ self.data,
+ )
+ str_cmd += (
+ " && cp /opt/cantian/dbstor/tools/dbstor_config.ini %s/dbstor/conf/dbs/"
+ % (self.data)
+ )
+ str_cmd += (
+ " && echo 'DBSTOR_OWNER_NAME = cantian' >> %s/dbstor/conf/dbs/dbstor_config.ini"
+ % (self.data)
+ )
+ str_cmd += " && sed -i '/^\s*$/d' %s/dbstor/conf/dbs/dbstor_config.ini" % (
+ self.data
+ )
+ str_cmd += " && chown -R %s:%s %s/dbstor" % (
+ self.user,
+ self.group,
+ self.data,
+ )
+ str_cmd += " && chmod 640 %s/dbstor/conf/dbs/dbstor_config.ini" % (
+ self.data
+ )
LOGGER.info("Copy config files cmd: " + str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
self.failed_pos = self.DECOMPRESS_BIN_FAILED
- LOGGER.error("Decompress bin return: " + str(ret_code) + os.linesep + stderr)
- raise Exception("Decompress bin return: " + str(ret_code) + os.linesep + stderr)
+ LOGGER.error(
+ "Decompress bin return: " + str(ret_code) + os.linesep + stderr
+ )
+ raise Exception(
+ "Decompress bin return: " + str(ret_code) + os.linesep + stderr
+ )
if not g_opts.cantian_in_container:
cantian_check_share_logic_ip_isvalid("share", g_opts.share_logic_ip)
@@ -1917,14 +2265,19 @@ class Installer:
self.change_app_permission()
# change owner to user:group
- str_cmd = "chown %s:%s -hR %s " % (self.user, self.group,
- self.install_path)
+ str_cmd = "chown %s:%s -hR %s " % (self.user, self.group, self.install_path)
# Change the owner
LOGGER.info("Change owner cmd: %s" % str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
self.failed_pos = self.DECOMPRESS_BIN_FAILED
- err_msg = "chown to %s: %s return: %s%s%s" % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ err_msg = "chown to %s: %s return: %s%s%s" % (
+ self.user,
+ self.group,
+ str(ret_code),
+ os.linesep,
+ stderr,
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -1934,30 +2287,40 @@ class Installer:
try:
flags = os.O_RDWR
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(self.user_profile, flags, modes), 'a') as _file:
- _file.write("export CTDB_HOME=\"%s\"" % self.install_path)
+ with os.fdopen(os.open(self.user_profile, flags, modes), "a") as _file:
+ _file.write('export CTDB_HOME="%s"' % self.install_path)
_file.write(os.linesep)
- _file.write("export PATH=\"%s\":$PATH"
- % os.path.join(MYSQL_BIN_DIR, "bin"))
+ _file.write(
+ 'export PATH="%s":$PATH' % os.path.join(MYSQL_BIN_DIR, "bin")
+ )
_file.write(os.linesep)
- _file.write("export PATH=\"%s\":$PATH"
- % os.path.join(self.install_path, "bin"))
+ _file.write(
+ 'export PATH="%s":$PATH' % os.path.join(self.install_path, "bin")
+ )
_file.write(os.linesep)
if "LD_LIBRARY_PATH" in os.environ:
- _file.write("export LD_LIBRARY_PATH=\"%s\":\"%s\":\"%s\""
- ":$LD_LIBRARY_PATH"
- % (os.path.join(self.install_path, "lib"),
- os.path.join(self.install_path, "add-ons"),
- os.path.join(MYSQL_BIN_DIR, "lib/plugin")))
+ _file.write(
+ 'export LD_LIBRARY_PATH="%s":"%s":"%s"'
+ ":$LD_LIBRARY_PATH"
+ % (
+ os.path.join(self.install_path, "lib"),
+ os.path.join(self.install_path, "add-ons"),
+ os.path.join(MYSQL_BIN_DIR, "lib/plugin"),
+ )
+ )
else:
- _file.write("export LD_LIBRARY_PATH=\"%s\":\"%s\":\"%s\""
- % (os.path.join(self.install_path, "lib"),
- os.path.join(self.install_path, "add-ons"),
- os.path.join(MYSQL_BIN_DIR, "lib/plugin")))
+ _file.write(
+ 'export LD_LIBRARY_PATH="%s":"%s":"%s"'
+ % (
+ os.path.join(self.install_path, "lib"),
+ os.path.join(self.install_path, "add-ons"),
+ os.path.join(MYSQL_BIN_DIR, "lib/plugin"),
+ )
+ )
_file.write(os.linesep)
if self.old_data_path == "":
# set CTDB_DATA
- _file.write("export CTDB_DATA=\"%s\"" % self.data)
+ _file.write('export CTDB_DATA="%s"' % self.data)
_file.write(os.linesep)
_file.flush()
except IOError as ex:
@@ -1983,19 +2346,25 @@ class Installer:
# Avoid create database failed by the value of CTSQL_SSL_KEY_PASSWD
self.clean_ssl_env()
- os.environ['PATH'] = (os.path.join(self.install_path, "bin")
- + ":" + os.environ['PATH'])
+ os.environ["PATH"] = (
+ os.path.join(self.install_path, "bin") + ":" + os.environ["PATH"]
+ )
# in some system LD_LIBRARY_PATH is not set,
# so must check it, or excetion will be raise
- if 'LD_LIBRARY_PATH' in os.environ:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s:%s" % (
- os.path.join(self.install_path, "lib"), os.path.join(
- self.install_path, "add-ons", ),
- os.environ['LD_LIBRARY_PATH']))
+ if "LD_LIBRARY_PATH" in os.environ:
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s:%s" % (
+ os.path.join(self.install_path, "lib"),
+ os.path.join(
+ self.install_path,
+ "add-ons",
+ ),
+ os.environ["LD_LIBRARY_PATH"],
+ )
else:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s" % (
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s" % (
os.path.join(self.install_path, "lib"),
- os.path.join(self.install_path, "add-ons"),))
+ os.path.join(self.install_path, "add-ons"),
+ )
os.environ["CTDB_HOME"] = self.install_path
os.environ["CTDB_DATA"] = self.data
os.environ["CANTIANLOG"] = self.cantiand_configs["LOG_HOME"]
@@ -2025,8 +2394,10 @@ class Installer:
cmd = cmd.strip(";")
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (conf_file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (conf_file, cmd, stderr)
+ )
def clean_old_conf(self, param_list, conf_file):
"""
@@ -2037,25 +2408,33 @@ class Installer:
cmd = ""
# make the command of delete the parameter
for parameter in param_list:
- cmd += "sed -i '/^%s/d' %s;" % (parameter.replace('[', '\[').replace(']', '\]'), conf_file)
+ cmd += "sed -i '/^%s/d' %s;" % (
+ parameter.replace("[", "\[").replace("]", "\]"),
+ conf_file,
+ )
if cmd:
cmd = cmd.strip(";")
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s"
- % (conf_file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (conf_file, cmd, stderr)
+ )
def generate_nomount_passwd(self, plain_passwd=""):
cmd = "source ~/.bashrc && %s/bin/ctencrypt -e PBKDF2" % self.install_path
- g_opts.db_passwd = g_opts.db_passwd if len(plain_passwd.strip()) == 0 else plain_passwd.strip()
+ g_opts.db_passwd = (
+ g_opts.db_passwd if len(plain_passwd.strip()) == 0 else plain_passwd.strip()
+ )
values = [g_opts.db_passwd, g_opts.db_passwd]
ret_code, stdout, stderr = _exec_popen(cmd, values)
if ret_code:
- raise OSError("Failed to encrypt password of user [sys]."
- " Error: %s" % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to encrypt password of user [sys]."
+ " Error: %s" % (stderr + os.linesep + stderr)
+ )
# Example of output:
# Please enter password to encrypt:
@@ -2080,18 +2459,26 @@ class Installer:
cmd = "echo >> %s" % conf_file
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (file, cmd, stderr)
+ )
# Generate new kernel parameters
common_parameters = copy.deepcopy(config)
# Set password of NOMOUNT mode before create database.
if encrypt_passwd and not g_opts.cantian_in_container:
mode = "encrypted"
- self.ctsql_conf["SYS_PASSWORD"] = Installer.kmc_resovle_password(mode, common_parameters["_SYS_PASSWORD"])
- common_parameters["_SYS_PASSWORD"] = self.generate_nomount_passwd(common_parameters["_SYS_PASSWORD"])
+ self.ctsql_conf["SYS_PASSWORD"] = Installer.kmc_resovle_password(
+ mode, common_parameters["_SYS_PASSWORD"]
+ )
+ common_parameters["_SYS_PASSWORD"] = self.generate_nomount_passwd(
+ common_parameters["_SYS_PASSWORD"]
+ )
if g_opts.mes_ssl_switch == True:
- common_parameters["MES_SSL_KEY_PWD"] = Installer.kmc_resovle_password(mode, g_opts.cert_encrypt_pwd)
+ common_parameters["MES_SSL_KEY_PWD"] = Installer.kmc_resovle_password(
+ mode, g_opts.cert_encrypt_pwd
+ )
self.set_cms_ini(common_parameters["MES_SSL_KEY_PWD"])
self.set_mes_passwd(common_parameters["MES_SSL_KEY_PWD"])
g_opts.password = common_parameters["_SYS_PASSWORD"]
@@ -2118,8 +2505,10 @@ class Installer:
cmd = cmd.strip(";")
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (conf_file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (conf_file, cmd, stderr)
+ )
def set_cluster_conf(self):
"""
@@ -2131,9 +2520,16 @@ class Installer:
cmd = "echo >> %s" % conf_file
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s, output: %s" % (conf_file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s, output: %s"
+ % (conf_file, cmd, stderr)
+ )
size = CLUSTER_SIZE
- if g_opts.running_mode in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST]:
+ if g_opts.running_mode in [
+ CANTIAND,
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ ]:
size = 1
if g_opts.node_id == 0 or g_opts.node_id == 1:
node_ip = self.cantiand_configs["INTERCONNECT_ADDR"].split(",")
@@ -2153,7 +2549,7 @@ class Installer:
common_parameters = {
"REPORT_FILE": g_opts.log_file,
"STATUS_LOG": os.path.join(self.data, "log", "cantianstatus.log"),
- "LD_LIBRARY_PATH": os.environ['LD_LIBRARY_PATH'],
+ "LD_LIBRARY_PATH": os.environ["LD_LIBRARY_PATH"],
"USER_HOME": self.user_home_path,
"USE_GSS": g_opts.use_gss,
"USE_DBSTOR": g_opts.use_dbstor,
@@ -2179,14 +2575,16 @@ class Installer:
if g_opts.use_dbstor:
common_parameters["CONTROL_FILES"] = self.cantiand_configs["CONTROL_FILES"]
- common_parameters["SHARED_PATH"] = self.cantiand_configs["SHARED_PATH"],
- common_parameters["DBSTOR_NAMESPACE"] = self.cantiand_configs["DBSTOR_NAMESPACE"]
+ common_parameters["SHARED_PATH"] = (self.cantiand_configs["SHARED_PATH"],)
+ common_parameters["DBSTOR_NAMESPACE"] = self.cantiand_configs[
+ "DBSTOR_NAMESPACE"
+ ]
common_parameters["ENABLE_DBSTOR"] = self.cantiand_configs["ENABLE_DBSTOR"]
cantian_config_data = common_parameters
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_CONF_FILE, flags, modes), 'w') as fp:
+ with os.fdopen(os.open(CANTIAN_CONF_FILE, flags, modes), "w") as fp:
json.dump(cantian_config_data, fp)
return common_parameters
@@ -2227,8 +2625,10 @@ class Installer:
cmd = cmd.strip(";")
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (self.CANTIAND_HBA_FILE, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (self.CANTIAND_HBA_FILE, cmd, stderr)
+ )
def init_db_instance(self):
"""
@@ -2245,7 +2645,9 @@ class Installer:
self.set_cthba_ssl()
self.add_cthba_item()
# g_opts.isencrept默认加密
- self.set_conf(self.cantiand_configs, self.CANTIAND_CONF_FILE, g_opts.isencrept)
+ self.set_conf(
+ self.cantiand_configs, self.CANTIAND_CONF_FILE, g_opts.isencrept
+ )
self.set_cluster_conf()
except Exception as error:
LOGGER.error(str(error))
@@ -2267,7 +2669,7 @@ class Installer:
ins_list = ins_str.split(os.sep)
reg_string = ""
for i in ins_list:
- if (i == ""):
+ if i == "":
continue
else:
reg_string += r"\/" + i
@@ -2281,8 +2683,7 @@ class Installer:
LOGGER.info("Clean environment variables cmd: %s" % cmd)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- log("Failed to clean environment variables."
- " Error: %s" % stderr)
+ log("Failed to clean environment variables." " Error: %s" % stderr)
err_msg = "Failed to clean environment variables."
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -2315,16 +2716,17 @@ class Installer:
# Clear environment variable CTDB_DATA
data_cmd = r"/^\s*export\s*CTDB_DATA=.*$/d"
# Clear environment variable PATH about database
- path_cmd = (r"/^\s*export\s*PATH=.*%s\/bin.*:\$PATH$/d"
- % self.genregstring(self.install_path))
+ path_cmd = r"/^\s*export\s*PATH=.*%s\/bin.*:\$PATH$/d" % self.genregstring(
+ self.install_path
+ )
# Clear environment variable LD_LIBRARY_PATH about database
- lib_cmd = (r"/^\s*export\s*LD_LIBRARY_PATH=.*%s\/lib.*"
- r":.*%s\/add-ons*$/d"
- % (self.genregstring(self.install_path),
- self.genregstring(self.install_path)))
+ lib_cmd = r"/^\s*export\s*LD_LIBRARY_PATH=.*%s\/lib.*" r":.*%s\/add-ons*$/d" % (
+ self.genregstring(self.install_path),
+ self.genregstring(self.install_path),
+ )
# Clear environment variable CTDB_HOME
home_cmd = r"/^\s*export\s*CTDB_HOME=.*$/d"
- # Clear environment variable CANTIANLOG
+ # Clear environment variable CANTIANLOG
cantianlog_cmd = r"/^\s*export\s*CANTIANLOG=.*$/d"
# Clear environment ssl cert
@@ -2334,8 +2736,17 @@ class Installer:
mode_cmd = r"/^\s*export\s*CTSQL_SSL_MODE=.*$/d"
cipher_cmd = r"/^\s*export\s*CTSQL_SSL_KEY_PASSWD=.*$/d"
- cmds = [path_cmd, lib_cmd, home_cmd, cantianlog_cmd,
- ca_cmd, cert_cmd, key_cmd, mode_cmd, cipher_cmd]
+ cmds = [
+ path_cmd,
+ lib_cmd,
+ home_cmd,
+ cantianlog_cmd,
+ ca_cmd,
+ cert_cmd,
+ key_cmd,
+ mode_cmd,
+ cipher_cmd,
+ ]
if self.option == self.INS_ALL:
cmds.insert(0, data_cmd)
@@ -2373,14 +2784,21 @@ class Installer:
def __kill_process(self, process_name):
# root do install, need su - user kill process
- kill_cmd = (r"proc_pid_list=`ps ux | grep %s | grep -v grep"
- r"|awk '{print $2}'` && " % process_name)
- kill_cmd += (r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
- r"$proc_pid_list | xargs kill -9; fi)")
+ kill_cmd = (
+ r"proc_pid_list=`ps ux | grep %s | grep -v grep"
+ r"|awk '{print $2}'` && " % process_name
+ )
+ kill_cmd += (
+ r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
+ r"$proc_pid_list | xargs kill -9; fi)"
+ )
LOGGER.info("kill process cmd: %s" % kill_cmd)
ret_code, stdout, stderr = _exec_popen(kill_cmd)
if ret_code:
- err_msg = "kill process %s faild. ret_code : %s, stdout : %s, stderr : %s" % (process_name, ret_code, stdout, stderr)
+ err_msg = (
+ "kill process %s faild. ret_code : %s, stdout : %s, stderr : %s"
+ % (process_name, ret_code, stdout, stderr)
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -2405,20 +2823,30 @@ class Installer:
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
raise Exception(
- "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE + str(ret_code) + os.linesep + stderr)
+ "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE
+ + str(ret_code)
+ + os.linesep
+ + stderr
+ )
# create data, cfg, log dir, trc
data_dir = "%s/data" % self.data
if not g_opts.use_dbstor:
- mount_storage_data = f"/mnt/dbdata/remote/storage_{g_opts.storage_dbstor_fs}/data"
+ mount_storage_data = (
+ f"/mnt/dbdata/remote/storage_{g_opts.storage_dbstor_fs}/data"
+ )
cmd = "ln -s %s %s;" % (mount_storage_data, self.data)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not link data dir, command: %s, output: %s" % (cmd, stderr))
+ raise Exception(
+ "Can not link data dir, command: %s, output: %s" % (cmd, stderr)
+ )
else:
os.makedirs(data_dir, CommonValue.KEY_DIRECTORY_PERMISSION)
os.makedirs("%s/log" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/archive_log" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
+ os.makedirs(
+ "%s/archive_log" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
os.makedirs("%s/trc" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
os.makedirs("%s/tmp" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
@@ -2426,18 +2854,36 @@ class Installer:
os.makedirs("%s/dbs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
# move the config files about database.
- cmd = "mv -i %s/cfg %s;touch %s/cfg/%s" % (self.install_path, self.data, self.data, self.CTSQL_CONF_FILE)
+ cmd = "mv -i %s/cfg %s;touch %s/cfg/%s" % (
+ self.install_path,
+ self.data,
+ self.data,
+ self.CTSQL_CONF_FILE,
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not create prepare data dir, command: %s, output: %s" % (cmd, stderr))
+ raise Exception(
+ "Can not create prepare data dir, command: %s, output: %s"
+ % (cmd, stderr)
+ )
# Change the mode of config files to 600
cmd = "chmod {0} {1}/cfg/{2} {1}/cfg/{3} {1}/cfg/{4} {1}/cfg/{5}".format(
- CommonValue.KEY_FILE_MODE, self.data, self.CANTIAND_CONF_FILE,
- self.CMS_CONF_FILE, self.CANTIAND_HBA_FILE, self.CTSQL_CONF_FILE)
+ CommonValue.KEY_FILE_MODE,
+ self.data,
+ self.CANTIAND_CONF_FILE,
+ self.CMS_CONF_FILE,
+ self.CANTIAND_HBA_FILE,
+ self.CTSQL_CONF_FILE,
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("chmod %s return: " % CommonValue.KEY_FILE_MODE + str(ret_code) + os.linesep + stderr)
+ raise Exception(
+ "chmod %s return: " % CommonValue.KEY_FILE_MODE
+ + str(ret_code)
+ + os.linesep
+ + stderr
+ )
# Change the owner of config files
self.chown_data_dir()
@@ -2474,7 +2920,9 @@ class Installer:
# start cantian dn
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'r') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "r"
+ ) as load_fp:
start_parameters = json.load(load_fp)
self.chown_data_dir()
status_success = False
@@ -2484,23 +2932,43 @@ class Installer:
start_mode = self.OPEN_MODE
if g_opts.install_type == "reserve":
start_mode = self.OPEN_MODE
- if start_parameters.setdefault('db_create_status', "default") == "done" and g_opts.node_id == 0:
+ if (
+ start_parameters.setdefault("db_create_status", "default") == "done"
+ and g_opts.node_id == 0
+ ):
start_mode = self.OPEN_MODE
- if start_parameters.setdefault('mysql_init', "default") == "done" or no_init:
+ if start_parameters.setdefault("mysql_init", "default") == "done" or no_init:
mysql_init = False
# Start instance, according to running mode can point to cantiand or cantiand with mysql
cmd = "echo -e '%s' | sh %s -P cantiand -M %s -T %s -C %s >> %s 2>&1" % (
- g_opts.db_passwd, INSTALL_SCRIPT, start_mode, g_opts.running_mode.lower(),
- g_opts.mysql_config_file_path, g_opts.log_file)
+ g_opts.db_passwd,
+ INSTALL_SCRIPT,
+ start_mode,
+ g_opts.running_mode.lower(),
+ g_opts.mysql_config_file_path,
+ g_opts.log_file,
+ )
install_log_file = self.status_log
- if g_opts.running_mode.lower() in [CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_IN_CLUSTER, CANTIAND_WITH_MYSQL_ST]:
+ if g_opts.running_mode.lower() in [
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_ST,
+ ]:
log_home = self.cantiand_configs["LOG_HOME"]
install_log_file = os.path.join(log_home, "run", "cantiand.rlog")
if not mysql_init:
- cmd = "echo -e '%s' | sh %s -P cantiand -M %s -T %s -C %s -R >> %s 2>&1" % (
- g_opts.db_passwd, INSTALL_SCRIPT, start_mode, g_opts.running_mode.lower(),
- g_opts.mysql_config_file_path, g_opts.log_file)
+ cmd = (
+ "echo -e '%s' | sh %s -P cantiand -M %s -T %s -C %s -R >> %s 2>&1"
+ % (
+ g_opts.db_passwd,
+ INSTALL_SCRIPT,
+ start_mode,
+ g_opts.running_mode.lower(),
+ g_opts.mysql_config_file_path,
+ g_opts.log_file,
+ )
+ )
now_time = datetime.now()
begin_time = str(now_time).split(".")[0]
else:
@@ -2511,13 +2979,17 @@ class Installer:
output = stdout + stderr
if g_opts.db_passwd in output:
output = "installdb.sh was killed"
- raise Exception("Can not start instance %s.\nOutput: %s" % (self.data, output))
+ raise Exception(
+ "Can not start instance %s.\nOutput: %s" % (self.data, output)
+ )
# In some condition cantian will take some time to start, so wait
# it by checking the process cyclically after the start command
# returned. If the cantiand process can't be found within the
# expected time, it is considered that the startup failed.
- tem_log_info, status_success = self.init_some_condition(status_success, install_log_file, begin_time)
+ tem_log_info, status_success = self.init_some_condition(
+ status_success, install_log_file, begin_time
+ )
# the log file's permission is 600, change it
if os.path.exists(self.status_log):
@@ -2527,14 +2999,18 @@ class Installer:
os.chmod(self.status_log, CommonValue.KEY_FILE_PERMISSION)
if not status_success:
- raise Exception("Can not get instance '%s' process pid,"
- "The detailed information: '%s' " % (self.data, tem_log_info))
+ raise Exception(
+ "Can not get instance '%s' process pid,"
+ "The detailed information: '%s' " % (self.data, tem_log_info)
+ )
log("cantiand has started")
flags = os.O_RDWR | os.O_CREAT
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w+') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w+"
+ ) as load_fp:
start_parameters = json.load(load_fp)
- start_status_item = {'mysql_init': "done"}
+ start_status_item = {"mysql_init": "done"}
start_parameters.update(start_status_item)
load_fp.seek(0)
load_fp.truncate()
@@ -2547,7 +3023,10 @@ class Installer:
ret_code, stdout, stderr = _exec_popen(cmd)
output = stdout + stderr
if ret_code:
- LOGGER.info("Failed to get the error message from '%s'. Output: %s" % (run_log, output))
+ LOGGER.info(
+ "Failed to get the error message from '%s'. Output: %s"
+ % (run_log, output)
+ )
return ""
else:
return output
@@ -2558,40 +3037,61 @@ class Installer:
for i in range(0, start_time):
time.sleep(3)
- cmd = ("ps aux | grep -v grep | grep %s | grep $ "
- "|awk '{print $2}'" % (self.data))
- if g_opts.running_mode.lower() in [CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST,
- CANTIAND_WITH_MYSQL_IN_CLUSTER]:
+ cmd = "ps aux | grep -v grep | grep %s | grep $ " "|awk '{print $2}'" % (
+ self.data
+ )
+ if g_opts.running_mode.lower() in [
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ ]:
cmd = "ps -ef | grep /opt/cantian/mysql/install/mysql/bin/mysqld | grep -v grep | awk '{print $2}'"
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
status_success = False
- tem_log_info = ("Failed to execute cmd: %s.output:%s"
- % (str(cmd), str(stderr)))
+ tem_log_info = "Failed to execute cmd: %s.output:%s" % (
+ str(cmd),
+ str(stderr),
+ )
break
else:
- all_the_text = open(status_log, errors='ignore').read()
+ all_the_text = open(status_log, errors="ignore").read()
is_instance_started = False
is_instance_failed = False
- if g_opts.running_mode.lower() in [CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST,
- CANTIAND_WITH_MYSQL_IN_CLUSTER] :
- succ_pattern = re.compile(r'.{10}(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?instance started.*?', re.IGNORECASE)
- fail_pattern = re.compile(r'.{10}(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?instance startup failed.*?', re.IGNORECASE)
+ if g_opts.running_mode.lower() in [
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ ]:
+ succ_pattern = re.compile(
+ r".{10}(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?instance started.*?",
+ re.IGNORECASE,
+ )
+ fail_pattern = re.compile(
+ r".{10}(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?instance startup failed.*?",
+ re.IGNORECASE,
+ )
succ_timestamps = re.findall(succ_pattern, all_the_text)
fail_timestamps = re.findall(fail_pattern, all_the_text)
- is_instance_started = len(succ_timestamps) != 0 and max(succ_timestamps) >= begin_time
- is_instance_failed = len(fail_timestamps) != 0 and max(fail_timestamps) >= begin_time
+ is_instance_started = (
+ len(succ_timestamps) != 0 and max(succ_timestamps) >= begin_time
+ )
+ is_instance_failed = (
+ len(fail_timestamps) != 0 and max(fail_timestamps) >= begin_time
+ )
else:
is_instance_started = all_the_text.find("instance started") >= 0
- is_instance_failed = all_the_text.find("instance startup failed") > 0
- if (is_instance_started):
+ is_instance_failed = (
+ all_the_text.find("instance startup failed") > 0
+ )
+ if is_instance_started:
if stdout:
status_success = True
self.pid = stdout.strip()
LOGGER.info("start instance successfully, pid = %s" % stdout)
break
- elif (is_instance_failed):
+ elif is_instance_failed:
status_success = False
tem_log_info = all_the_text.strip()
# Get the error message from run log. After roll_back,
@@ -2600,7 +3100,10 @@ class Installer:
run_log_info = self.get_invalid_parameter()
if run_log_info:
tem_log_info += os.linesep
- tem_log_info += ("The run log error: %s%s" % (os.linesep, run_log_info))
+ tem_log_info += "The run log error: %s%s" % (
+ os.linesep,
+ run_log_info,
+ )
break
if (i + 1) == start_time:
status_success = False
@@ -2632,30 +3135,44 @@ class Installer:
return "Install type is reserve."
if self.enable_sysdba_login:
- cmd = "source ~/.bashrc && %s/bin/ctsql / as sysdba -q -D %s -f %s" % (self.install_path,
- self.data, sql_file)
+ cmd = "source ~/.bashrc && %s/bin/ctsql / as sysdba -q -D %s -f %s" % (
+ self.install_path,
+ self.data,
+ sql_file,
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
else:
- cmd = ("source ~/.bashrc && echo -e '%s' | %s/bin/ctsql %s@%s:%s -q -f %s" % (
- g_opts.db_passwd,
- self.install_path,
- g_opts.db_user,
- self.login_ip,
- self.lsnr_port,
- sql_file))
+ cmd = (
+ "source ~/.bashrc && echo -e '%s' | %s/bin/ctsql %s@%s:%s -q -f %s"
+ % (
+ g_opts.db_passwd,
+ self.install_path,
+ g_opts.db_user,
+ self.login_ip,
+ self.lsnr_port,
+ sql_file,
+ )
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
- output = "%s%s".replace("password", "***") % (str(stdout_data), str(stderr_data))
+ output = "%s%s".replace("password", "***") % (
+ str(stdout_data),
+ str(stderr_data),
+ )
if g_opts.db_passwd in output:
output = "execute ctsql file failed"
LOGGER.info("Execute sql file %s output: %s" % (sql_file, output))
if return_code:
- raise Exception("Failed to execute sql file %s, output:%s" % (sql_file, output))
+ raise Exception(
+ "Failed to execute sql file %s, output:%s" % (sql_file, output)
+ )
# return code is 0, but output has error info, CT-xxx, ZS-xxx
result = output.replace("\n", "")
if re.match(".*CT-\d{5}.*", result) or re.match(".*ZS-\d{5}.*", result):
- raise Exception("Failed to execute sql file %s, output:%s" % (sql_file, output))
+ raise Exception(
+ "Failed to execute sql file %s, output:%s" % (sql_file, output)
+ )
return stdout_data
def execute_sql(self, sql, message):
@@ -2673,26 +3190,27 @@ class Installer:
ctsql_path = "%s/bin/*sql" % self.install_path
ctsql_path = glob.glob(ctsql_path)[0]
if self.enable_sysdba_login:
- cmd = ("source ~/.bashrc && %s / as sysdba "
- "-q -D %s -c \"%s\""
- % (ctsql_path, self.data, sql))
+ cmd = "source ~/.bashrc && %s / as sysdba " '-q -D %s -c "%s"' % (
+ ctsql_path,
+ self.data,
+ sql,
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
else:
- cmd = ("source ~/.bashrc && echo -e '%s' | %s %s@%s:%s -q"
- " -c \"%s\"" % (
- g_opts.db_passwd,
- ctsql_path,
- g_opts.db_user,
- self.login_ip,
- self.lsnr_port,
- sql))
+ cmd = "source ~/.bashrc && echo -e '%s' | %s %s@%s:%s -q" ' -c "%s"' % (
+ g_opts.db_passwd,
+ ctsql_path,
+ g_opts.db_user,
+ self.login_ip,
+ self.lsnr_port,
+ sql,
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
output = "%s%s" % (str(stdout_data), str(stderr_data))
output.replace(g_opts.db_passwd, "*****")
if return_code:
- raise Exception("Failed to %s by sql, output:%s"
- % (message, output))
+ raise Exception("Failed to %s by sql, output:%s" % (message, output))
# return code is 0, but output has error info, CT-xxx, ZS-xxx
result = output.replace("\n", "")
@@ -2715,26 +3233,37 @@ class Installer:
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(CANTIAN_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- self.user = json_data['USER'].strip()
- self.group = json_data['GROUP'].strip()
- self.data = json_data['DATA'].strip()
- self.create_db_file = json_data['CREAT_DB_FILE'].strip()
- self.install_path = json_data['INSTALL_PATH'].strip()
- g_opts.running_mode = json_data['RUNNING_MODE'].strip()
- self.cantiand_configs["LOG_HOME"] = json_data.get('LOG_HOME', '').strip()
- self.cantiand_configs["ENABLE_DBSTOR"] = json_data.get('ENABLE_DBSTOR', '').strip()
+ self.user = json_data["USER"].strip()
+ self.group = json_data["GROUP"].strip()
+ self.data = json_data["DATA"].strip()
+ self.create_db_file = json_data["CREAT_DB_FILE"].strip()
+ self.install_path = json_data["INSTALL_PATH"].strip()
+ g_opts.running_mode = json_data["RUNNING_MODE"].strip()
+ self.cantiand_configs["LOG_HOME"] = json_data.get("LOG_HOME", "").strip()
+ self.cantiand_configs["ENABLE_DBSTOR"] = json_data.get(
+ "ENABLE_DBSTOR", ""
+ ).strip()
if g_opts.use_dbstor:
- self.cantiand_configs["SHARED_PATH"] = json_data.get('SHARED_PATH', '')
- self.cantiand_configs["CONTROL_FILES"] = json_data.get('CONTROL_FILES', '').strip()
- self.cantiand_configs["DBSTOR_NAMESPACE"] = json_data.get('DBSTOR_NAMESPACE', '').strip()
+ self.cantiand_configs["SHARED_PATH"] = json_data.get("SHARED_PATH", "")
+ self.cantiand_configs["CONTROL_FILES"] = json_data.get(
+ "CONTROL_FILES", ""
+ ).strip()
+ self.cantiand_configs["DBSTOR_NAMESPACE"] = json_data.get(
+ "DBSTOR_NAMESPACE", ""
+ ).strip()
elif g_opts.use_gss:
- self.cantiand_configs["CONTROL_FILES"] = "(+vg1/ctrl1, +vg1/ctrl2, +vg1/ctrl3)"
+ self.cantiand_configs["CONTROL_FILES"] = (
+ "(+vg1/ctrl1, +vg1/ctrl2, +vg1/ctrl3)"
+ )
self.cantiand_configs["SHARED_PATH"] = "+vg1"
else:
- self.cantiand_configs["SHARED_PATH"] = '/mnt/dbdata/remote/storage_{}/data'.format(
- g_opts.storage_dbstor_fs)
+ self.cantiand_configs["SHARED_PATH"] = (
+ "/mnt/dbdata/remote/storage_{}/data".format(
+ g_opts.storage_dbstor_fs
+ )
+ )
# clean old backup log
# backup log file before rm data
@@ -2752,33 +3281,41 @@ class Installer:
cmd = "chown %s:%s %s;" % (self.user, self.group, INSTALL_SCRIPT)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("chown to %s:%s return: %s%s%s"
- % (self.user, self.group, str(ret_code),
- os.linesep, stderr))
+ raise Exception(
+ "chown to %s:%s return: %s%s%s"
+ % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ )
try:
# 准备拉起cantiand或mysqld
self.failed_pos = self.CREATE_DB_FAILED
mysql_init = True
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'r') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "r"
+ ) as load_fp:
start_parameters = json.load(load_fp)
- if start_parameters.setdefault('mysql_init', "default") == "done":
+ if start_parameters.setdefault("mysql_init", "default") == "done":
mysql_init = False
if g_opts.node_id != 0:
# node 1
self.failed_pos = self.CREATE_DB_FAILED
cantian_check_share_logic_ip_isvalid("share", g_opts.share_logic_ip)
- LOGGER.info('Begin to start node1')
- with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), 'r') as fp:
+ LOGGER.info("Begin to start node1")
+ with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- is_metadata_in_cantian = str(json_data.get('mysql_metadata_in_cantian', 'True'))
- if is_metadata_in_cantian != 'True':
+ is_metadata_in_cantian = str(
+ json_data.get("mysql_metadata_in_cantian", "True")
+ )
+ if is_metadata_in_cantian != "True":
# 非归一,第一次是初始化,需要第二次start
- log('mysql initialize...........')
+ log("mysql initialize...........")
self.start_cantiand()
- if g_opts.running_mode in VALID_SINGLE_MYSQL_RUNNING_MODE and mysql_init:
+ if (
+ g_opts.running_mode in VALID_SINGLE_MYSQL_RUNNING_MODE
+ and mysql_init
+ ):
# 若是首次拉起且单进程,还需二次启动mysql;否则退出
cmd = "pidof mysqld"
- log('wait for mysqld init complete.....................')
+ log("wait for mysqld init complete.....................")
while True:
return_code, stdout_data, _ = _exec_popen(cmd)
if return_code or not stdout_data:
@@ -2794,14 +3331,17 @@ class Installer:
cantian_check_share_logic_ip_isvalid("share", g_opts.share_logic_ip)
self.start_cantiand()
log("Creating cantian database...")
- log('wait for cantiand thread startup')
+ log("wait for cantiand thread startup")
time.sleep(20)
self.create_db()
- if g_opts.running_mode in VALID_SINGLE_MYSQL_RUNNING_MODE and mysql_init:
+ if (
+ g_opts.running_mode in VALID_SINGLE_MYSQL_RUNNING_MODE
+ and mysql_init
+ ):
# 第二次拉起mysqld。
cmd = "pidof mysqld"
- log('wait for mysqld init complete.....................')
+ log("wait for mysqld init complete.....................")
while True:
return_code, stdout_data, _ = _exec_popen(cmd)
if return_code or not stdout_data:
@@ -2819,19 +3359,25 @@ class Installer:
return
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'r') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "r"
+ ) as load_fp:
start_parameters = json.load(load_fp)
- if (start_parameters.setdefault('db_create_status', "default") == "default"):
+ if start_parameters.setdefault("db_create_status", "default") == "default":
self.update_factor_key()
flags = os.O_WRONLY | os.O_TRUNC
- db_create_status_item = {'db_create_status': "creating"}
+ db_create_status_item = {"db_create_status": "creating"}
start_parameters.update(db_create_status_item)
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w"
+ ) as load_fp:
json.dump(start_parameters, load_fp)
self.execute_sql_file(self.get_database_file())
- db_create_status_item = {'db_create_status': "done"}
+ db_create_status_item = {"db_create_status": "done"}
start_parameters.update(db_create_status_item)
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w"
+ ) as load_fp:
json.dump(start_parameters, load_fp)
def check_db_status(self):
@@ -2866,7 +3412,9 @@ class Installer:
continue
if "1 rows fetched" not in res:
continue
- db_status = re.split(r"\s+", re.split(r"\n+", res.strip())[-2].strip())[1].strip()
+ db_status = re.split(r"\s+", re.split(r"\n+", res.strip())[-2].strip())[
+ 1
+ ].strip()
LOGGER.info("ctsql db status: %s" % db_status)
if db_status == "OPEN":
LOGGER.info("Cantiand start success, db status: %s" % db_status)
@@ -2891,31 +3439,38 @@ class Installer:
create_database_sql = os.path.join(sql_file_path, file_name)
if g_opts.use_dbstor:
file_name = "create_dbstor_database.sample.sql"
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]:
+ if g_opts.running_mode in [
+ CANTIAND_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ ]:
file_name = "create_dbstor_cluster_database.sample.sql"
create_database_sql = os.path.join(sql_file_path, file_name)
file_name_0 = "create_dbstor_cluster_database.lun.sql"
file_name_1 = "create_dbstor_cluster_database.sample.sql"
file_name_2 = "create_dbstor_database.sample.sql"
- if g_opts.db_type in ['0', '1', '2']:
+ if g_opts.db_type in ["0", "1", "2"]:
create_database_sql_dic = {}
- create_database_sql_dic['0'] = file_name_0
- create_database_sql_dic['1'] = file_name_1
- create_database_sql_dic['2'] = file_name_2
- create_database_sql = os.path.join(sql_file_path, create_database_sql_dic.get(g_opts.db_type))
+ create_database_sql_dic["0"] = file_name_0
+ create_database_sql_dic["1"] = file_name_1
+ create_database_sql_dic["2"] = file_name_2
+ create_database_sql = os.path.join(
+ sql_file_path, create_database_sql_dic.get(g_opts.db_type)
+ )
else:
create_database_sql = os.path.join(sql_file_path, file_name)
file_name_0 = "create_cluster_database.lun.sql"
file_name_1 = "create_cluster_database.sample.sql"
file_name_2 = "create_database.sample.sql"
- if g_opts.db_type in ['0', '1', '2']:
+ if g_opts.db_type in ["0", "1", "2"]:
create_database_sql_dic = {}
- create_database_sql_dic['0'] = file_name_0
- create_database_sql_dic['1'] = file_name_1
- create_database_sql_dic['2'] = file_name_2
- create_database_sql = os.path.join(sql_file_path, create_database_sql_dic.get(g_opts.db_type))
-
- db_data_path = os.path.join(self.data, "data").replace('/', '\/')
+ create_database_sql_dic["0"] = file_name_0
+ create_database_sql_dic["1"] = file_name_1
+ create_database_sql_dic["2"] = file_name_2
+ create_database_sql = os.path.join(
+ sql_file_path, create_database_sql_dic.get(g_opts.db_type)
+ )
+
+ db_data_path = os.path.join(self.data, "data").replace("/", "\/")
self.set_sql_redo_size_and_num(db_data_path, create_database_sql)
if g_opts.use_gss:
self._sed_file("dbfiles1", "+vg1", create_database_sql)
@@ -2929,7 +3484,7 @@ class Installer:
return create_database_sql
def _sed_file(self, prefix, replace, file_name):
- fix_sql_file_cmd = ("sed -i 's/%s/%s/g' %s" % (prefix, replace, file_name))
+ fix_sql_file_cmd = "sed -i 's/%s/%s/g' %s" % (prefix, replace, file_name)
ret_code, _, _ = _exec_popen(fix_sql_file_cmd)
if ret_code:
raise Exception("sed %s failed, replace %s" % (file_name, replace))
@@ -2938,8 +3493,10 @@ class Installer:
cmd = "chown -hR %s:%s %s; " % (self.user, self.group, self.ssl_path)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("chown to %s:%s return: %s%s%s"
- % (self.user, self.group, str(ret_code), os.linesep, stderr))
+ raise Exception(
+ "chown to %s:%s return: %s%s%s"
+ % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ )
def get_ctencrypt_keys(self, skip_execute_sql=False):
"""Set the config about _FACTOR_KEY and LOCAL_KEY."""
@@ -2948,8 +3505,10 @@ class Installer:
cmd = "%s/bin/ctencrypt -g" % self.install_path
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
- raise OSError("Failed to generate encrypted keys. Error: %s"
- % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to generate encrypted keys. Error: %s"
+ % (stderr + os.linesep + stderr)
+ )
# Example of output:
# eg'Key: XXXXXXXXXXXXXXXXXXXXXXX'
@@ -2981,8 +3540,10 @@ class Installer:
cmd = "%s/bin/ctencrypt -g -o '%s' " % (self.install_path, f_factor1)
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
- raise OSError("Failed to generate encrypted keys. Error: %s"
- % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to generate encrypted keys. Error: %s"
+ % (stderr + os.linesep + stderr)
+ )
# Example of output:
# eg'Key: XXXXXXXXXXXXXXXXXXXXXXX'
@@ -3005,18 +3566,25 @@ class Installer:
LOGGER.info("Generate encrypted keys successfully.")
return key_, work_key
- def encrypt_ssl_key_passwd(self, key_, work_key, ssl_passwd, skip_execute_sql=False):
+ def encrypt_ssl_key_passwd(
+ self, key_, work_key, ssl_passwd, skip_execute_sql=False
+ ):
"""Encrypt ssl key password with _FACTOR_KEY and LOCAL_KEY."""
LOGGER.info("Encrypt ssl key password.")
- cmd = ("%s/bin/ctencrypt -e AES256 -f %s -k %s"
- % (self.install_path, key_, work_key))
+ cmd = "%s/bin/ctencrypt -e AES256 -f %s -k %s" % (
+ self.install_path,
+ key_,
+ work_key,
+ )
values = [ssl_passwd, ssl_passwd]
ret_code, stdout, stderr = _exec_popen(cmd, values)
if ret_code:
- raise OSError("Failed to encrypt ssl key password. Error: %s"
- % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to encrypt ssl key password. Error: %s"
+ % (stderr + os.linesep + stderr)
+ )
# Example of output:
# Please enter password to encrypt:
@@ -3038,8 +3606,7 @@ class Installer:
# Don't set SSL_CA and CTSQL_SSL_CA.
# Avoid the need to copy files, env and kernel parameter
# from the primary dn when installing the backup dn.
- cantian_conf_file = os.path.join(self.data, "cfg",
- self.CANTIAND_CONF_FILE)
+ cantian_conf_file = os.path.join(self.data, "cfg", self.CANTIAND_CONF_FILE)
ssl_map = {
"SSL_CERT": os.path.join(self.ssl_path, "server.crt"),
"SSL_KEY": os.path.join(self.ssl_path, "server.key"),
@@ -3069,21 +3636,26 @@ class Installer:
try:
flags = os.O_RDWR
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(self.user_profile, flags, modes), 'a') as _file:
- _file.write("export CTSQL_SSL_CERT=\"%s\""
- % os.path.join(self.ssl_path, "client.crt"))
+ with os.fdopen(os.open(self.user_profile, flags, modes), "a") as _file:
+ _file.write(
+ 'export CTSQL_SSL_CERT="%s"'
+ % os.path.join(self.ssl_path, "client.crt")
+ )
_file.write(os.linesep)
- _file.write("export CTSQL_SSL_KEY=\"%s\""
- % os.path.join(self.ssl_path, "client.key"))
+ _file.write(
+ 'export CTSQL_SSL_KEY="%s"'
+ % os.path.join(self.ssl_path, "client.key")
+ )
_file.write(os.linesep)
- _file.write("export CTSQL_SSL_MODE=\"required\"")
+ _file.write('export CTSQL_SSL_MODE="required"')
_file.write(os.linesep)
- _file.write("export CTSQL_SSL_KEY_PASSWD=\"%s\"" % cipher)
+ _file.write('export CTSQL_SSL_KEY_PASSWD="%s"' % cipher)
_file.write(os.linesep)
_file.flush()
except IOError as ex:
- raise IOError("Failed Set user environment variables about ssl: %s"
- % str(ex)) from ex
+ raise IOError(
+ "Failed Set user environment variables about ssl: %s" % str(ex)
+ ) from ex
os.environ["CTSQL_SSL_CERT"] = os.path.join(self.ssl_path, "client.crt")
os.environ["CTSQL_SSL_KEY"] = os.path.join(self.ssl_path, "client.key")
@@ -3099,22 +3671,32 @@ class Installer:
if not g_opts.db_passwd:
# connect database by sysdba
- cmd = ("%s/bin/shutdowndb.sh -h %s -p %s -w -m %s -D %s -T %d"
- % (self.install_path, host_ip, self.lsnr_port,
- "immediate", self.data, timeout))
+ cmd = "%s/bin/shutdowndb.sh -h %s -p %s -w -m %s -D %s -T %d" % (
+ self.install_path,
+ host_ip,
+ self.lsnr_port,
+ "immediate",
+ self.data,
+ timeout,
+ )
ret_code, _, stderr = _exec_popen(cmd)
else:
# connect database by username and password
- cmd = ("%s/bin/shutdowndb.sh -h"
- " %s -p %s -U %s -m %s -W -D %s -T %d" %
- (self.install_path,
- host_ip, self.lsnr_port, g_opts.db_user, "immediate",
- self.data, timeout))
+ cmd = "%s/bin/shutdowndb.sh -h" " %s -p %s -U %s -m %s -W -D %s -T %d" % (
+ self.install_path,
+ host_ip,
+ self.lsnr_port,
+ g_opts.db_user,
+ "immediate",
+ self.data,
+ timeout,
+ )
ret_code, _, stderr = _exec_popen(cmd, [g_opts.db_passwd])
if ret_code:
- raise Exception("Failed to stop database. Error: %s"
- % (stderr + os.linesep + stderr))
+ raise Exception(
+ "Failed to stop database. Error: %s" % (stderr + os.linesep + stderr)
+ )
LOGGER.info("stop cantian instance successfully.")
def chmod_install_sqlfile(self):
@@ -3124,15 +3706,22 @@ class Installer:
output: NA
"""
try:
- str_cmd = ("find '%s'/admin -type f | xargs chmod %s "
- % (self.install_path, CommonValue.MIN_FILE_MODE))
+ str_cmd = "find '%s'/admin -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.MIN_FILE_MODE,
+ )
ret_code, _, _ = _exec_popen(str_cmd)
if ret_code:
print_str_1 = CommonPrint()
- print_str_1.common_log("Change file permission to %s failed."
- " Please chmod %s filein directory %s/admin manually."
- % (CommonValue.MIN_FILE_MODE,
- CommonValue.MIN_FILE_MODE, self.install_path))
+ print_str_1.common_log(
+ "Change file permission to %s failed."
+ " Please chmod %s filein directory %s/admin manually."
+ % (
+ CommonValue.MIN_FILE_MODE,
+ CommonValue.MIN_FILE_MODE,
+ self.install_path,
+ )
+ )
except Exception as error:
LOGGER.error(str(error))
raise Exception(str(error))
@@ -3187,15 +3776,16 @@ class Installer:
cantiand_ini = {}
modes = stat.S_IWUSR | stat.S_IRUSR
flags = os.O_RDONLY
- with os.fdopen(os.open(CANTIAND_INI_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(CANTIAND_INI_FILE, flags, modes), "r") as fp:
for line in fp:
if line == "\n":
continue
(key, val) = line.split(" = ")
- val = val.replace('\n', '')
+ val = val.replace("\n", "")
cantiand_ini[key] = val
- self.enable_sysdba_login = Installer.check_pare_bool_value("ENABLE_SYSDBA_LOGIN",
- cantiand_ini.get("ENABLE_SYSDBA_LOGIN", "FALSE"))
+ self.enable_sysdba_login = Installer.check_pare_bool_value(
+ "ENABLE_SYSDBA_LOGIN", cantiand_ini.get("ENABLE_SYSDBA_LOGIN", "FALSE")
+ )
def install_start(self):
self.parse_cantiand_ini()
@@ -3206,15 +3796,24 @@ class Installer:
def check_parameter_mysql(self):
if g_opts.mysql_config_file_path == "unset":
- g_opts.mysql_config_file_path = os.path.join(MYSQL_CODE_DIR, "scripts/my.cnf")
- LOGGER.info("no mysql config file assigned, set to %s" % g_opts.mysql_config_file_path)
+ g_opts.mysql_config_file_path = os.path.join(
+ MYSQL_CODE_DIR, "scripts/my.cnf"
+ )
+ LOGGER.info(
+ "no mysql config file assigned, set to %s"
+ % g_opts.mysql_config_file_path
+ )
real_path = os.path.realpath(g_opts.mysql_config_file_path)
if not os.path.isfile(real_path):
- err_msg = "mysql config file {} not existed or it is not a file".format(real_path)
+ err_msg = "mysql config file {} not existed or it is not a file".format(
+ real_path
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
if not self.is_readable(real_path, self.user):
- err_msg = "mysql config file {} is not readable by {}".format(real_path, self.user)
+ err_msg = "mysql config file {} is not readable by {}".format(
+ real_path, self.user
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
g_opts.mysql_config_file_path = real_path
@@ -3222,7 +3821,10 @@ class Installer:
cmd += "chmod {} {}".format(CommonValue.MAX_FILE_MODE, real_path)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- err_msg = "Can not set mysql config mode, command: %s, output: %s" % (cmd, stderr)
+ err_msg = "Can not set mysql config mode, command: %s, output: %s" % (
+ cmd,
+ stderr,
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -3269,10 +3871,10 @@ class Installer:
"""
LOGGER.info("Preparing path [%s]." % one_path)
owner_path = one_path
- if (os.path.exists(one_path)):
- if (check_empty):
+ if os.path.exists(one_path):
+ if check_empty:
file_list = os.listdir(one_path)
- if (len(file_list) != 0):
+ if len(file_list) != 0:
err_msg = "Database path %s should be empty." % one_path
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -3290,8 +3892,7 @@ class Installer:
# will cause an error
if owner_path != one_path:
cmd = "chown -hR %s:%s %s; " % (self.user, self.group, owner_path)
- cmd += "chmod -R %s %s" % (CommonValue.KEY_DIRECTORY_MODE,
- owner_path)
+ cmd += "chmod -R %s %s" % (CommonValue.KEY_DIRECTORY_MODE, owner_path)
else:
cmd = "chown %s:%s %s; " % (self.user, self.group, owner_path)
cmd += "chmod %s %s" % (CommonValue.KEY_DIRECTORY_MODE, owner_path)
@@ -3306,7 +3907,11 @@ class Installer:
LOGGER.info("check [%s] user permission" % one_path)
permission_ok, stderr = self.check_permission(one_path)
if not permission_ok:
- err_msg = "Failed to check user [%s] path [%s] permission. Error: %s" % (self.user, one_path, stderr)
+ err_msg = "Failed to check user [%s] path [%s] permission. Error: %s" % (
+ self.user,
+ one_path,
+ stderr,
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -3319,52 +3924,71 @@ class Installer:
LOGGER.info("Preparing mysql bin dir...")
self.clean_dir(MYSQL_BIN_DIR)
self.prepare_given_path(MYSQL_BIN_DIR, True)
- cmd = "cp -arf {} {};".format(os.path.join(MYSQL_CODE_DIR, "mysql_bin/mysql/*"), MYSQL_BIN_DIR)
+ cmd = "cp -arf {} {};".format(
+ os.path.join(MYSQL_CODE_DIR, "mysql_bin/mysql/*"), MYSQL_BIN_DIR
+ )
cmd += "cp %s/cfg/osd.cfg %s/bin/osd.cfg;" % (self.data, MYSQL_BIN_DIR)
cmd += "chown -hR {}:{} {};".format(self.user, self.group, MYSQL_BIN_DIR)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- LOGGER.error("Can not copy mysql bin, command: %s, output: %s" % (cmd, stderr))
- raise Exception("Can not copy mysql bin, command: %s, output: %s" % (cmd, stderr))
+ LOGGER.error(
+ "Can not copy mysql bin, command: %s, output: %s" % (cmd, stderr)
+ )
+ raise Exception(
+ "Can not copy mysql bin, command: %s, output: %s" % (cmd, stderr)
+ )
LOGGER.info("end mysql bin dir...")
+
def set_mysql_env(self):
LOGGER.info("Preparing mysql running env...")
- if 'LD_LIBRARY_PATH' in os.environ:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s:%s" % (
+ if "LD_LIBRARY_PATH" in os.environ:
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s:%s" % (
os.path.join(MYSQL_BIN_DIR, "lib"),
os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
- os.environ['LD_LIBRARY_PATH']))
+ os.environ["LD_LIBRARY_PATH"],
+ )
else:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s" % (
- os.path.join(MYSQL_BIN_DIR, "lib"), os.path.join(
- MYSQL_CODE_DIR, "cantian_lib")))
- cmd = "ldconfig -N %s %s" % (os.path.join(MYSQL_CODE_DIR, "cantian_lib"), os.path.join(MYSQL_BIN_DIR, "lib"))
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s" % (
+ os.path.join(MYSQL_BIN_DIR, "lib"),
+ os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
+ )
+ cmd = "ldconfig -N %s %s" % (
+ os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
+ os.path.join(MYSQL_BIN_DIR, "lib"),
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- LOGGER.error("Can not link mysql lib, command: %s, output: %s" % (cmd, stderr))
- raise Exception("Can not link mysql lib, command: %s, output: %s" % (cmd, stderr))
+ LOGGER.error(
+ "Can not link mysql lib, command: %s, output: %s" % (cmd, stderr)
+ )
+ raise Exception(
+ "Can not link mysql lib, command: %s, output: %s" % (cmd, stderr)
+ )
def export_mysql_env(self):
try:
flags = os.O_RDWR
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(self.user_profile, flags, modes), 'a') as _file:
- _file.write("export MYSQL_BIN_DIR=\"%s\"" % MYSQL_BIN_DIR)
+ with os.fdopen(os.open(self.user_profile, flags, modes), "a") as _file:
+ _file.write('export MYSQL_BIN_DIR="%s"' % MYSQL_BIN_DIR)
_file.write(os.linesep)
- _file.write("export MYSQL_CODE_DIR=\"%s\"" % MYSQL_CODE_DIR)
+ _file.write('export MYSQL_CODE_DIR="%s"' % MYSQL_CODE_DIR)
_file.write(os.linesep)
- _file.write("export MYSQL_DATA_DIR=\"%s\"" % MYSQL_DATA_DIR)
+ _file.write('export MYSQL_DATA_DIR="%s"' % MYSQL_DATA_DIR)
_file.write(os.linesep)
- _file.write("export MYSQL_LOG_FILE=\"%s\"" % MYSQL_LOG_FILE)
+ _file.write('export MYSQL_LOG_FILE="%s"' % MYSQL_LOG_FILE)
_file.write(os.linesep)
_file.flush()
except IOError as ex:
self.failed_pos = self.SET_ENV_FAILED
log_exit("Can not set user environment variables: %s" % str(ex))
-
+
def prepare_mysql_for_single(self):
- if g_opts.running_mode.lower() not in VALID_SINGLE_MYSQL_RUNNING_MODE or int(g_opts.cantian_in_container)==1:
+ if (
+ g_opts.running_mode.lower() not in VALID_SINGLE_MYSQL_RUNNING_MODE
+ or int(g_opts.cantian_in_container) == 1
+ ):
return
LOGGER.info("prepare_mysql_for_single")
self.check_parameter_mysql()
@@ -3375,20 +3999,20 @@ class Installer:
def check_archive_dir():
- if g_opts.db_type not in ['0', '1', '2']:
+ if g_opts.db_type not in ["0", "1", "2"]:
err_msg = "Invalid db_type : %s." % g_opts.db_type
LOGGER.error(err_msg)
raise Exception(err_msg)
- if g_opts.db_type == '0' or g_opts.install_type == "reserve":
+ if g_opts.db_type == "0" or g_opts.install_type == "reserve":
return
if g_opts.node_id == 1:
return
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'r') as load_fp:
+ with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), "r") as load_fp:
start_parameters = json.load(load_fp)
- if start_parameters.setdefault('db_create_status', "default") == "done":
+ if start_parameters.setdefault("db_create_status", "default") == "done":
return
if DEPLOY_MODE == "dss":
return
@@ -3398,9 +4022,13 @@ def check_archive_dir():
if os.path.exists(archive_dir):
files = os.listdir(archive_dir)
for file in files:
- if (file[-4:] == ".arc" and file[:4] == "arch") or ("arch_file.tmp" in file):
- err_msg = "archive dir %s is not empty, history archive file or archive tmp file : %s." % (
- archive_dir, file)
+ if (file[-4:] == ".arc" and file[:4] == "arch") or (
+ "arch_file.tmp" in file
+ ):
+ err_msg = (
+ "archive dir %s is not empty, history archive file or archive tmp file : %s."
+ % (archive_dir, file)
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
else:
@@ -3415,11 +4043,17 @@ def check_archive_dir():
if "the archive dir does not exist" in str(output):
log("INFO: %s" % output.strip())
else:
- err_msg = "Failed to execute command '%s', error: %s" % (arch_query_cmd, stderr)
+ err_msg = "Failed to execute command '%s', error: %s" % (
+ arch_query_cmd,
+ stderr,
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
else:
- if any("arch" in line and (".arc" in line or "arch_file.tmp" in line) for line in output.splitlines()):
+ if any(
+ "arch" in line and (".arc" in line or "arch_file.tmp" in line)
+ for line in output.splitlines()
+ ):
err_msg = "Archive files found in dbstor: %s" % output
LOGGER.error(err_msg)
raise Exception(err_msg)
@@ -3427,8 +4061,12 @@ def check_archive_dir():
class CanTian(object):
- g_opts.os_user, g_opts.os_group = get_value("deploy_user"), get_value("deploy_group")
- g_opts.install_type = get_value('install_type') if get_value('install_type') else "0"
+ g_opts.os_user, g_opts.os_group = get_value("deploy_user"), get_value(
+ "deploy_group"
+ )
+ g_opts.install_type = (
+ get_value("install_type") if get_value("install_type") else "0"
+ )
def cantian_pre_install(self):
check_platform()
@@ -3441,7 +4079,10 @@ class CanTian(object):
try:
installer = Installer(g_opts.os_user, g_opts.os_group)
installer.install()
- LOGGER.info("Install successfully, for more detail information see %s." % g_opts.log_file)
+ LOGGER.info(
+ "Install successfully, for more detail information see %s."
+ % g_opts.log_file
+ )
except Exception as error:
LOGGER.error("Install failed: " + str(error))
raise Exception(str(error)) from error
@@ -3450,9 +4091,11 @@ class CanTian(object):
try:
flags = os.O_RDWR | os.O_CREAT
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w+') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w+"
+ ) as load_fp:
start_parameters = json.load(load_fp)
- start_status_item = {'start_status': "starting"}
+ start_status_item = {"start_status": "starting"}
start_parameters.update(start_status_item)
load_fp.seek(0)
load_fp.truncate()
@@ -3462,14 +4105,19 @@ class CanTian(object):
check_archive_dir()
installer = Installer(g_opts.os_user, g_opts.os_group)
installer.install_start()
- LOGGER.info("Start successfully, for more detail information see %s." % g_opts.log_file)
+ LOGGER.info(
+ "Start successfully, for more detail information see %s."
+ % g_opts.log_file
+ )
flags = os.O_RDWR | os.O_CREAT
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w+') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w+"
+ ) as load_fp:
start_parameters = json.load(load_fp)
- start_status_item = {'start_status': "started"}
+ start_status_item = {"start_status": "started"}
start_parameters.update(start_status_item)
- ever_started_item = {'ever_started': True}
+ ever_started_item = {"ever_started": True}
start_parameters.update(ever_started_item)
load_fp.seek(0)
load_fp.truncate()
@@ -3477,35 +4125,57 @@ class CanTian(object):
sep_mark = os.path.sep
cmd = "pidof cantiand"
- if g_opts.running_mode.lower() in [CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST,
- CANTIAND_WITH_MYSQL_IN_CLUSTER]:
+ if g_opts.running_mode.lower() in [
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ ]:
cmd = "pidof mysqld"
ret_code, cantiand_pid, stderr = _exec_popen(cmd)
if ret_code:
- LOGGER.error("can not get pid of cantiand or mysqld, command: %s, err: %s" % (cmd, stderr))
- raise Exception("can not get pid of cantiand or mysqld, command: %s, err: %s" % (cmd, stderr))
+ LOGGER.error(
+ "can not get pid of cantiand or mysqld, command: %s, err: %s"
+ % (cmd, stderr)
+ )
+ raise Exception(
+ "can not get pid of cantiand or mysqld, command: %s, err: %s"
+ % (cmd, stderr)
+ )
cantiand_pids = cantiand_pid.strip().split()
if len(cantiand_pids) > 1:
- error_message = (f"Detected multiple cantiand/mysqld processes ({', '.join(cantiand_pids)}). "
- f"Please manually clean up the abnormal processes and retry.")
+ error_message = (
+ f"Detected multiple cantiand/mysqld processes ({', '.join(cantiand_pids)}). "
+ f"Please manually clean up the abnormal processes and retry."
+ )
LOGGER.error(error_message)
raise Exception(error_message)
cantiand_pid = cantiand_pids[0]
- coredump_filter_path = os.path.join(sep_mark, "proc", str(cantiand_pid), "coredump_filter")
+ coredump_filter_path = os.path.join(
+ sep_mark, "proc", str(cantiand_pid), "coredump_filter"
+ )
if cantiand_pid:
cmd = f"echo 0x6f > {coredump_filter_path}"
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- LOGGER.error("can not set coredump_filter, command: %s, err: %s" % (cmd, stderr))
- raise Exception("can not set coredump_filter, command: %s, err: %s" % (cmd, stderr))
+ LOGGER.error(
+ "can not set coredump_filter, command: %s, err: %s"
+ % (cmd, stderr)
+ )
+ raise Exception(
+ "can not set coredump_filter, command: %s, err: %s"
+ % (cmd, stderr)
+ )
LOGGER.info("Set coredump_filter successfully")
except Exception as error:
LOGGER.info("Start failed: " + str(error))
- LOGGER.info("Please refer to install log \"%s\" for more detailed information." % g_opts.log_file)
+ LOGGER.info(
+ 'Please refer to install log "%s" for more detailed information.'
+ % g_opts.log_file
+ )
raise Exception(str(error)) from error
def post_check(self):
@@ -3514,9 +4184,9 @@ class CanTian(object):
installer.decrypt_db_passwd()
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(JS_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(JS_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- installer.install_path = json_data['R_INSTALL_PATH'].strip()
+ installer.install_path = json_data["R_INSTALL_PATH"].strip()
installer.check_db_status()
LOGGER.info("Post upgrade check success.")
diff --git a/pkg/deploy/action/cantian/cantian_post_upgrade.py b/pkg/deploy/action/cantian/cantian_post_upgrade.py
index c2793ba98a975f370a20b98de966a3934458c75e..5d044003fc5407eeb57aac1b2cc1b528d43372d6 100644
--- a/pkg/deploy/action/cantian/cantian_post_upgrade.py
+++ b/pkg/deploy/action/cantian/cantian_post_upgrade.py
@@ -10,4 +10,4 @@ if __name__ == "__main__":
exit(str(err))
except Exception as err:
exit(str(err))
- exit(0)
\ No newline at end of file
+ exit(0)
diff --git a/pkg/deploy/action/cantian/cantian_start.py b/pkg/deploy/action/cantian/cantian_start.py
index 5db575cc74de14293f54ed8b5ff4d2a59445746e..de6b48ed60df845658af9ff5a1bb1de7cdcabb46 100644
--- a/pkg/deploy/action/cantian/cantian_start.py
+++ b/pkg/deploy/action/cantian/cantian_start.py
@@ -13,4 +13,4 @@ if __name__ == "__main__":
exit(str(err))
except Exception as err:
exit(str(err))
- exit(0)
\ No newline at end of file
+ exit(0)
diff --git a/pkg/deploy/action/cantian/cantian_uninstall.py b/pkg/deploy/action/cantian/cantian_uninstall.py
index 87b04bf8ddefe9363ca6cc99c2b75c0b50330b45..acad8fdd30793c8e3d0a33f1aa4cc69e3dd4550f 100644
--- a/pkg/deploy/action/cantian/cantian_uninstall.py
+++ b/pkg/deploy/action/cantian/cantian_uninstall.py
@@ -42,9 +42,13 @@ PKG_DIR = os.path.abspath(os.path.join(INSTALL_SCPRIT_DIR, "../.."))
JS_CONF_FILE = os.path.join(PKG_DIR, "action", "cantian", "install_config.json")
UNINSTALL_PATH = os.path.join(PKG_DIR, "action", "cantian")
-CANTIAN_UNINSTALL_CONF_FILE = os.path.join(PKG_DIR, "action", "cantian", "cantian_uninstall_config.json")
+CANTIAN_UNINSTALL_CONF_FILE = os.path.join(
+ PKG_DIR, "action", "cantian", "cantian_uninstall_config.json"
+)
CANTIAN_CONF_FILE = os.path.join("/opt/cantian/cantian", "cfg", "cantian_config.json")
-CANTIAN_START_STATUS_FILE = os.path.join("/opt/cantian/cantian", "cfg", "start_status.json")
+CANTIAN_START_STATUS_FILE = os.path.join(
+ "/opt/cantian/cantian", "cfg", "start_status.json"
+)
CANTIAN_UNINSTALL_LOG_FILE = "/opt/cantian/log/cantian/cantian_deploy.log"
CONFIG_PARAMS_FILE = os.path.join(PKG_DIR, "config", "deploy_param.json")
FORCE_UNINSTALL = None
@@ -52,7 +56,12 @@ CHECK_MAX_TIMES = 60
CANTIAND_WITH_MYSQL = "cantiand_with_mysql"
CANTIAND_WITH_MYSQL_ST = "cantiand_with_mysql_st"
CANTIAND_WITH_MYSQL_IN_CLUSTER = "cantiand_with_mysql_in_cluster"
-VALID_SINGLE_MYSQL_RUNNING_MODE = {CANTIAND_WITH_MYSQL_IN_CLUSTER, CANTIAND_WITH_MYSQL_ST, CANTIAND_WITH_MYSQL}
+VALID_SINGLE_MYSQL_RUNNING_MODE = {
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL,
+}
+
def _exec_popen(_cmd, values=None):
"""
@@ -63,8 +72,13 @@ def _exec_popen(_cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
pobj.stdin.write(_cmd.encode())
@@ -105,11 +119,11 @@ def cantian_check_share_logic_ip_isvalid(node_ip, deploy_mode):
input : ip
output: NA
"""
-
+
def ping_execute(p_cmd):
cmd = "%s %s -i 1 -c 3 | grep ttl | wc -l" % (p_cmd, node_ip)
ret_code, stdout, _ = _exec_popen(cmd)
- if ret_code or stdout != '3':
+ if ret_code or stdout != "3":
return False
return True
@@ -198,21 +212,27 @@ def parse_parameter():
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(JS_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(JS_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
g_opts.clean_data_dir_on = 0 # -F
- g_opts.clean_data_dir = json_data['UNINSTALL_D_LOCATION_DATABASE_AREA'].strip() # -D
+ g_opts.clean_data_dir = json_data[
+ "UNINSTALL_D_LOCATION_DATABASE_AREA"
+ ].strip() # -D
if os.getuid() != 0: # -g
- g_opts.install_user_privilege = json_data['UNINSTALL_g_RUN_UNINSTALL_SCRIPT'].strip()
+ g_opts.install_user_privilege = json_data[
+ "UNINSTALL_g_RUN_UNINSTALL_SCRIPT"
+ ].strip()
g_opts.use_gss = True # -s
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), 'r') as config_fp:
+ with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), "r") as config_fp:
json_data = json.load(config_fp)
- g_opts.namespace = json_data.get('cluster_name', 'test1').strip()
- g_opts.node_id = int(json_data.get('node_id'))
- metadata_str = "metadata_" + json_data.get('storage_metadata_fs', '').strip()
+ g_opts.namespace = json_data.get("cluster_name", "test1").strip()
+ g_opts.node_id = int(json_data.get("node_id"))
+ metadata_str = (
+ "metadata_" + json_data.get("storage_metadata_fs", "").strip()
+ )
node_str = "node" + str(g_opts.node_id)
global MYSQL_DATA_DIR
MYSQL_DATA_DIR = os.path.join("/mnt/dbdata/remote", metadata_str, node_str)
@@ -252,10 +272,14 @@ def check_parameter():
if g_opts.install_user_privilege != "withoutroot":
print_str = CommonPrint()
- print_str.common_log("Error: User has no root privilege, "
- "do uninstall, need specify parameter '-g withoutroot'.")
- raise ValueError("Error: User has no root privilege, "
- "do uninstall, need specify parameter '-g withoutroot'.")
+ print_str.common_log(
+ "Error: User has no root privilege, "
+ "do uninstall, need specify parameter '-g withoutroot'."
+ )
+ raise ValueError(
+ "Error: User has no root privilege, "
+ "do uninstall, need specify parameter '-g withoutroot'."
+ )
else:
print_str = CommonPrint()
print_str.common_log("Error:Check os failed:current os is not linux")
@@ -264,25 +288,28 @@ def check_parameter():
if g_opts.clean_data_dir_on == 1:
if g_opts.clean_data_dir:
print_str = CommonPrint()
- print_str.common_log("Error: Parameter input error: "
- "you can not use -D without using -F")
- raise ValueError("Error: Parameter input error: "
- "you can not use -D without using -F")
+ print_str.common_log(
+ "Error: Parameter input error: " "you can not use -D without using -F"
+ )
+ raise ValueError(
+ "Error: Parameter input error: " "you can not use -D without using -F"
+ )
if g_opts.clean_data_dir:
g_opts.clean_data_dir = os.path.realpath(
- os.path.normpath(g_opts.clean_data_dir))
+ os.path.normpath(g_opts.clean_data_dir)
+ )
DefaultValue.check_invalid_path(g_opts.clean_data_dir)
def check_log_path():
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_UNINSTALL_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(CANTIAN_UNINSTALL_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- if json_data.get('LOG_FILE', '').strip() == "":
+ if json_data.get("LOG_FILE", "").strip() == "":
g_opts.log_file = CANTIAN_UNINSTALL_LOG_FILE
else:
- g_opts.log_file = json_data.get('LOG_FILE', '').strip()
+ g_opts.log_file = json_data.get("LOG_FILE", "").strip()
if not g_opts.log_file:
g_opts.log_file = CANTIAN_UNINSTALL_LOG_FILE
@@ -322,9 +349,9 @@ def get_install_path():
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(JS_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(JS_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- g_opts.install_path_l = json_data['R_INSTALL_PATH'].strip()
+ g_opts.install_path_l = json_data["R_INSTALL_PATH"].strip()
# Must be exist
if not os.path.exists(g_opts.install_path_l):
err_msg = "Failed to get install path."
@@ -338,7 +365,7 @@ def get_deploy_user():
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), 'r') as fp1:
+ with os.fdopen(os.open(CONFIG_PARAMS_FILE, flags, modes), "r") as fp1:
json_data_deploy = json.load(fp1)
deploy_user = get_value("deploy_user")
@@ -354,7 +381,8 @@ def get_user_environment_file():
LOGGER.info("Getting user environment variables file path...")
home_path = g_opts.user_info.pw_dir
g_opts.user_env_path = os.path.realpath(
- os.path.normpath(os.path.join(home_path, ".bashrc")))
+ os.path.normpath(os.path.join(home_path, ".bashrc"))
+ )
if not os.path.isfile(os.path.realpath(g_opts.user_env_path)):
err_msg = "Can't get the environment variables file."
LOGGER.error(err_msg)
@@ -371,7 +399,7 @@ def find_before_slice(slice_, str_):
find '#' in the head of line
"""
place = str_.find(slice_)
- return str_.find('#', 0, place)
+ return str_.find("#", 0, place)
####################################################################
@@ -400,12 +428,12 @@ def check_environment_install_path():
line = tmp_f.readline()
while line:
# Obtain 'export CTDB_HOME'
- if line.find('export CTDB_HOME') != -1:
+ if line.find("export CTDB_HOME") != -1:
# Determine whether there is "#" before CTDB_HOME, the
# function returns a value of -1, indicating that it is
# not found, CTDB_HOME is valid.
- if find_before_slice(line, 'CTDB_HOME') == -1:
- install_env_dic_l = line.split('=')
+ if find_before_slice(line, "CTDB_HOME") == -1:
+ install_env_dic_l = line.split("=")
install_env_temp_l = install_env_dic_l[1].rstrip()
install_env_l = os.path.normpath(install_env_temp_l)
install_env_l = os.path.realpath(install_env_l[1:-1])
@@ -415,11 +443,14 @@ def check_environment_install_path():
return 0
line = tmp_f.readline()
tmp_f.close()
- err_msg = "Check install path in user environment variables failed:\
- can not find install path in user: %s environment variables" % g_opts.user_info.pw_name
+ err_msg = (
+ "Check install path in user environment variables failed:\
+ can not find install path in user: %s environment variables"
+ % g_opts.user_info.pw_name
+ )
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
- raise Exception(err_msg)
+ raise Exception(err_msg)
LOGGER.info("End check install path in user environment variables")
@@ -455,8 +486,8 @@ def get_gsdata_path_env():
# Determine whether there is "#" before CTDB_DATA, the
# function returns a value of -1, indicating that it is
# not found, CTDB_DATA is valid.
- if find_before_slice('export CTDB_DATA', line) == -1:
- gsdata_path_dic_temp = line.split('=')
+ if find_before_slice("export CTDB_DATA", line) == -1:
+ gsdata_path_dic_temp = line.split("=")
gsdata_path_temp = gsdata_path_dic_temp[1].rstrip()
gsdata_path = os.path.normpath(gsdata_path_temp)
g_opts.gs_data_path = os.path.realpath(gsdata_path[1:-1])
@@ -474,15 +505,16 @@ def get_gsdata_path_env():
return 0
# deal with the CTDB_HOME with """
# Obtain 'export CTDB_DATA'
- elif line.find('export CTDB_DATA') != -1:
+ elif line.find("export CTDB_DATA") != -1:
# Determine whether there is "#" before CTDB_DATA, the
# function returns a value of -1, indicating that it is
# not found, CTDB_DATA is valid.
- if find_before_slice('export CTDB_DATA', line) == -1:
- gsdata_path_dic_temp = line.split('=')
+ if find_before_slice("export CTDB_DATA", line) == -1:
+ gsdata_path_dic_temp = line.split("=")
gsdata_path_temp = gsdata_path_dic_temp[1].rstrip()
g_opts.gs_data_path = os.path.realpath(
- os.path.normpath(gsdata_path_temp))
+ os.path.normpath(gsdata_path_temp)
+ )
if not os.path.exists(g_opts.gs_data_path):
f.close()
err_msg = "Get data directory in user environment variables \
@@ -513,12 +545,14 @@ def check_data_dir():
"""
LOGGER.info("Begin check data dir...")
if g_opts.clean_data_dir:
- if os.path.exists(g_opts.clean_data_dir) \
- and os.path.isdir(g_opts.clean_data_dir) \
- and g_opts.clean_data_dir == g_opts.gs_data_path:
- LOGGER.info("path: \"%s\" is correct" % g_opts.clean_data_dir)
+ if (
+ os.path.exists(g_opts.clean_data_dir)
+ and os.path.isdir(g_opts.clean_data_dir)
+ and g_opts.clean_data_dir == g_opts.gs_data_path
+ ):
+ LOGGER.info('path: "%s" is correct' % g_opts.clean_data_dir)
else:
- err_msg = "path: \"%s\" is incorrect" % g_opts.clean_data_dir
+ err_msg = 'path: "%s" is incorrect' % g_opts.clean_data_dir
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
@@ -537,11 +571,11 @@ def check_uninstall_pos():
output: NA
"""
LOGGER.info("Checking uninstall.py position...")
- bin_path = g_opts.install_path_l + os.sep + 'bin'
- addons_path = g_opts.install_path_l + os.sep + 'add-ons'
- admin_path = g_opts.install_path_l + os.sep + 'admin'
- lib_path = g_opts.install_path_l + os.sep + 'lib'
- pkg_file = g_opts.install_path_l + os.sep + 'package.xml'
+ bin_path = g_opts.install_path_l + os.sep + "bin"
+ addons_path = g_opts.install_path_l + os.sep + "add-ons"
+ admin_path = g_opts.install_path_l + os.sep + "admin"
+ lib_path = g_opts.install_path_l + os.sep + "lib"
+ pkg_file = g_opts.install_path_l + os.sep + "package.xml"
# Check if the install path exists
if not os.path.exists(g_opts.install_path_l):
@@ -607,11 +641,15 @@ def clean_install_path():
if os.path.exists(g_opts.install_path_l):
shutil.rmtree(g_opts.install_path_l)
except OSError as error:
- LOGGER.error("Clean install path failed:can not delete install path "
- "%s\nPlease manually delete it." % str(error))
+ LOGGER.error(
+ "Clean install path failed:can not delete install path "
+ "%s\nPlease manually delete it." % str(error)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("Clean install path failed:can not delete install path "
- "%s\nPlease manually delete it." % str(error))
+ raise Exception(
+ "Clean install path failed:can not delete install path "
+ "%s\nPlease manually delete it." % str(error)
+ )
LOGGER.info("Clean install path success")
LOGGER.info("End clean Install path")
@@ -620,6 +658,7 @@ def clean_install_path():
# Clear environment variables
###########################################################################
+
# Resolution path
def gen_reg_string(text):
"""
@@ -633,7 +672,7 @@ def gen_reg_string(text):
in_s_list = in_s_str.split(os.sep)
reg_string = ""
for i in in_s_list:
- if (i == ""):
+ if i == "":
continue
else:
reg_string += r"\/" + i
@@ -653,12 +692,14 @@ def clean_environment():
# Clear environment variable CTDB_DATA
data_cmd = r"/^\s*export\s*CTDB_DATA=\".*\"$/d"
# Clear environment variable PATH about database
- path_cmd = (r"/^\s*export\s*PATH=\"%s\/bin\":\$PATH$/d"
- % gen_reg_string(g_opts.install_path_l))
+ path_cmd = r"/^\s*export\s*PATH=\"%s\/bin\":\$PATH$/d" % gen_reg_string(
+ g_opts.install_path_l
+ )
# Clear environment variable LD_LIBRARY_PATH about database
- lib_cmd = (r"/^\s*export\s*LD_LIBRARY_PATH=\"%s\/lib\":\"%s\/add-ons\".*$/d"
- % (gen_reg_string(g_opts.install_path_l),
- gen_reg_string(g_opts.install_path_l)))
+ lib_cmd = r"/^\s*export\s*LD_LIBRARY_PATH=\"%s\/lib\":\"%s\/add-ons\".*$/d" % (
+ gen_reg_string(g_opts.install_path_l),
+ gen_reg_string(g_opts.install_path_l),
+ )
# Clear environment variable CTDB_HOME
home_cmd = r"/^\s*export\s*CTDB_HOME=\".*\"$/d"
# Clear environment variable CANTIANLOG
@@ -670,13 +711,22 @@ def clean_environment():
mode_cmd = r"/^\s*export\s*CTSQL_SSL_MODE=.*$/d"
cipher_cmd = r"/^\s*export\s*CTSQL_SSL_KEY_PASSWD=.*$/d"
- cmds = [path_cmd, lib_cmd, home_cmd, cantianlog_cmd,
- ca_cmd, cert_cmd, key_cmd, mode_cmd, cipher_cmd]
+ cmds = [
+ path_cmd,
+ lib_cmd,
+ home_cmd,
+ cantianlog_cmd,
+ ca_cmd,
+ cert_cmd,
+ key_cmd,
+ mode_cmd,
+ cipher_cmd,
+ ]
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(JS_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(JS_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- g_opts.running_mode = json_data['M_RUNING_MODE'].strip()
+ g_opts.running_mode = json_data["M_RUNING_MODE"].strip()
if g_opts.running_mode.lower() in VALID_SINGLE_MYSQL_RUNNING_MODE:
mysql_bin_cmd = r"/^\s*export\s*MYSQL_BIN_DIR=\".*\"$/d"
mysql_code_cmd = r"/^\s*export\s*MYSQL_CODE_DIR=\".*\"$/d"
@@ -698,6 +748,7 @@ def clean_environment():
raise Exception(err_msg)
LOGGER.info("End clean user environment variables...")
+
def read_ifile(ifile, keyword):
if not os.path.isfile(ifile):
LOGGER.error("The value of IFILE '{}' is not exists.".format(ifile))
@@ -705,7 +756,7 @@ def read_ifile(ifile, keyword):
raise Exception("The value of IFILE '{}' is not exists.".format(ifile))
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(ifile, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(ifile, flags, modes), "r") as fp:
for line in fp:
items = line.split("=", 1)
if len(items) == 2 and items[0].strip() == keyword:
@@ -735,7 +786,7 @@ def read_cantiand_cfg(keyword):
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(cantiand_cfg_file, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(cantiand_cfg_file, flags, modes), "r") as fp:
for line in fp:
items = line.split("=", 1)
if len(items) != 2:
@@ -757,20 +808,25 @@ def get_instance_id():
"""
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- _cmd = ("ps ux | grep -v grep | grep cantiand "
- "| grep -w '\-D %s' |awk '{print $2}'") % g_opts.gs_data_path
- with os.fdopen(os.open(CANTIAN_CONF_FILE, flags, modes), 'r') as fp:
- json_data = json.load(fp)
- g_opts.running_mode = json_data['RUNNING_MODE'].strip()
- if g_opts.running_mode.lower() in [CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_IN_CLUSTER, CANTIAND_WITH_MYSQL_ST]:
- _cmd = ("ps ux | grep -v grep | grep mysqld |awk '{print $2}'")
+ _cmd = (
+ "ps ux | grep -v grep | grep cantiand " "| grep -w '\-D %s' |awk '{print $2}'"
+ ) % g_opts.gs_data_path
+ with os.fdopen(os.open(CANTIAN_CONF_FILE, flags, modes), "r") as fp:
+ json_data = json.load(fp)
+ g_opts.running_mode = json_data["RUNNING_MODE"].strip()
+ if g_opts.running_mode.lower() in [
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_ST,
+ ]:
+ _cmd = "ps ux | grep -v grep | grep mysqld |awk '{print $2}'"
status, output, _ = _exec_popen(_cmd)
if status:
- LOGGER.error("Failed to execute cmd: %s. Error:%s." % (str(_cmd),
- str(output)))
+ LOGGER.error("Failed to execute cmd: %s. Error:%s." % (str(_cmd), str(output)))
if FORCE_UNINSTALL != "force":
- raise Exception("Failed to execute cmd: %s. Error:%s." % (str(_cmd),
- str(output)))
+ raise Exception(
+ "Failed to execute cmd: %s. Error:%s." % (str(_cmd), str(output))
+ )
# process exists
return output
@@ -782,18 +838,26 @@ def get_error_id(error_name):
output: NA
"""
if error_name == "installdb.sh":
- _cmd = ("ps ux | grep -v grep | grep installdb.sh | grep P | grep cantiand "
- "| awk '{print $2}'")
+ _cmd = (
+ "ps ux | grep -v grep | grep installdb.sh | grep P | grep cantiand "
+ "| awk '{print $2}'"
+ )
elif error_name == "cantian_start.py":
- _cmd = ("ps ux | grep -v grep | grep python | grep cantian_start.py "
- "| awk '{print $2}'")
+ _cmd = (
+ "ps ux | grep -v grep | grep python | grep cantian_start.py "
+ "| awk '{print $2}'"
+ )
status, output, error = _exec_popen(_cmd)
if status:
- LOGGER.error("Failed to execute cmd: %s. Output:%s. Error:%s" % (str(_cmd),
- str(output), str(error)))
+ LOGGER.error(
+ "Failed to execute cmd: %s. Output:%s. Error:%s"
+ % (str(_cmd), str(output), str(error))
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("Failed to execute cmd: %s. Output:%s. Error:%s" % (str(_cmd),
- str(output), str(error)))
+ raise Exception(
+ "Failed to execute cmd: %s. Output:%s. Error:%s"
+ % (str(_cmd), str(output), str(error))
+ )
# process exists
return output
@@ -808,11 +872,17 @@ def kill_instance(instance_pid):
LOGGER.info("kill process cmd: %s" % kill_cmd_tmp)
ret_code_1, stdout, stderr = _exec_popen(kill_cmd_tmp)
if ret_code_1 and "No such process" not in stderr:
- LOGGER.error("kill process %s failed."
- "ret_code : %s, stdout : %s, stderr : %s" % (instance_pid, ret_code_1, stdout, stderr))
+ LOGGER.error(
+ "kill process %s failed."
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (instance_pid, ret_code_1, stdout, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("kill process %s failed."
- "ret_code : %s, stdout : %s, stderr : %s" % (instance_pid, ret_code_1, stdout, stderr))
+ raise Exception(
+ "kill process %s failed."
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (instance_pid, ret_code_1, stdout, stderr)
+ )
check_process_status("cantiand")
LOGGER.info("Kill cantiand instance succeed")
@@ -832,9 +902,15 @@ def kill_extra_process():
LOGGER.info("kill process cmd: %s" % kill_cmd_tmp)
ret_code, output, error = _exec_popen(kill_cmd_tmp)
if ret_code and "No such process" not in error:
- LOGGER.error("kill extra process failed. Output:%s. Error:%s." % (str(output), str(error)))
+ LOGGER.error(
+ "kill extra process failed. Output:%s. Error:%s."
+ % (str(output), str(error))
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("kill extra process failed. Output:%s. Error:%s." % (str(output), str(error)))
+ raise Exception(
+ "kill extra process failed. Output:%s. Error:%s."
+ % (str(output), str(error))
+ )
check_process_status(extra_process)
LOGGER.info("Kill %s succeed" % extra_process)
@@ -865,18 +941,28 @@ def check_process_status(process_name):
def kill_process(process_name):
- kill_cmd_1 = (r"proc_pid_list=`ps ux | grep %s | grep -v grep"
- r"|awk '{print $2}'` && " % process_name)
- kill_cmd_1 += (r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
- r"$proc_pid_list | xargs kill -9; exit 0; fi)")
+ kill_cmd_1 = (
+ r"proc_pid_list=`ps ux | grep %s | grep -v grep"
+ r"|awk '{print $2}'` && " % process_name
+ )
+ kill_cmd_1 += (
+ r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
+ r"$proc_pid_list | xargs kill -9; exit 0; fi)"
+ )
LOGGER.info("kill process cmd: %s" % kill_cmd_1)
ret_code_2, stdout, stderr = _exec_popen(kill_cmd_1)
if ret_code_2:
- LOGGER.error("kill process %s faild."
- "ret_code : %s, stdout : %s, stderr : %s" % (process_name, ret_code_2, stdout, stderr))
+ LOGGER.error(
+ "kill process %s faild."
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (process_name, ret_code_2, stdout, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("kill process %s faild."
- "ret_code : %s, stdout : %s, stderr : %s" % (process_name, ret_code_2, stdout, stderr))
+ raise Exception(
+ "kill process %s faild."
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (process_name, ret_code_2, stdout, stderr)
+ )
def stop_instance():
@@ -902,7 +988,7 @@ def stop_instance():
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
- host_ip = lsnr_addr.split(',')[0]
+ host_ip = lsnr_addr.split(",")[0]
# if the cantian process not exists, and disable sysdba user
# tell user the user name and password input interactive are
@@ -910,11 +996,14 @@ def stop_instance():
instance_pid = get_instance_id()
# specify -P parameter, db password is supported
if not instance_pid and g_opts.db_passwd:
- log("Notice: Instance '%s' has been stopped." %
- g_opts.gs_data_path)
- log(("Notice: The Database username and password"
- " that are interactive entered "
- "will not be verified correct and used."))
+ log("Notice: Instance '%s' has been stopped." % g_opts.gs_data_path)
+ log(
+ (
+ "Notice: The Database username and password"
+ " that are interactive entered "
+ "will not be verified correct and used."
+ )
+ )
kill_process("mysqld")
if g_opts.use_gss:
kill_process("gssd")
@@ -936,24 +1025,34 @@ def stop_instance():
install_path_l = g_opts.install_path_l
gs_data_path = g_opts.gs_data_path
tmp_cmd = "%s/bin/shutdowndb.sh -h %s -p %s -w -m immediate -D %s" % (
- install_path_l, host_ip, lsnr_port, gs_data_path)
+ install_path_l,
+ host_ip,
+ lsnr_port,
+ gs_data_path,
+ )
else:
- tmp_cmd = ("echo '%s' | %s/bin/shutdowndb.sh"
- " -h %s -p %s -U %s -m immediate -W -D %s") % (
- g_opts.db_passwd,
- g_opts.install_path_l,
- host_ip,
- lsnr_port,
- g_opts.db_user,
- g_opts.gs_data_path)
+ tmp_cmd = (
+ "echo '%s' | %s/bin/shutdowndb.sh"
+ " -h %s -p %s -U %s -m immediate -W -D %s"
+ ) % (
+ g_opts.db_passwd,
+ g_opts.install_path_l,
+ host_ip,
+ lsnr_port,
+ g_opts.db_user,
+ g_opts.gs_data_path,
+ )
return_code_3, stdout_2, stderr_2 = _exec_popen(tmp_cmd)
if return_code_3:
g_opts.db_passwd = ""
stdout_2 = get_error_msg(stdout_2, stderr_2)
if (not g_opts.db_passwd) and stdout_2.find(
- "login as sysdba is prohibited") >= 0:
- stdout_2 += ("\nsysdba login is disabled, please specify -P "
- "parameter to input password, refer to --help.")
+ "login as sysdba is prohibited"
+ ) >= 0:
+ stdout_2 += (
+ "\nsysdba login is disabled, please specify -P "
+ "parameter to input password, refer to --help."
+ )
err_msg = "stop cantian instance failed. Error: %s" % stdout_2
LOGGER.error(err_msg)
@@ -980,20 +1079,20 @@ def get_error_msg(outmsg, errmsg):
def clean_archive_dir(json_data_deploy):
- db_type = json_data_deploy.get('db_type', '')
- deploy_mode = json_data_deploy.get('deploy_mode', '')
- archive_fs = json_data_deploy.get('storage_archive_fs', '')
+ db_type = json_data_deploy.get("db_type", "")
+ deploy_mode = json_data_deploy.get("deploy_mode", "")
+ archive_fs = json_data_deploy.get("storage_archive_fs", "")
uninstall_type = sys.argv[1]
- if db_type == '' or db_type == '0' or uninstall_type == "reserve":
+ if db_type == "" or db_type == "0" or uninstall_type == "reserve":
return
- node_id = json_data_deploy.get('node_id', '').strip()
+ node_id = json_data_deploy.get("node_id", "").strip()
if node_id == "":
err_msg = "node_id is not found."
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
- archive_logic_ip = json_data_deploy.get('archive_logic_ip', '').strip()
- storage_archive_fs = json_data_deploy.get('storage_archive_fs', '').strip()
+ archive_logic_ip = json_data_deploy.get("archive_logic_ip", "").strip()
+ storage_archive_fs = json_data_deploy.get("storage_archive_fs", "").strip()
archive_dir = os.path.join("/mnt/dbdata/remote", "archive_" + storage_archive_fs)
cantian_check_share_logic_ip_isvalid(archive_logic_ip, deploy_mode)
@@ -1001,26 +1100,38 @@ def clean_archive_dir(json_data_deploy):
cmd = "timeout 10 ls %s" % archive_dir
ret_code, _, stderr = _exec_popen(cmd)
if node_id == "0" and (ret_code == 0 or FORCE_UNINSTALL != "force"):
- cmd_str = "rm -rf %s/arch*.arc %s/*arch_file.tmp" % (archive_dir, archive_dir)
+ cmd_str = "rm -rf %s/arch*.arc %s/*arch_file.tmp" % (
+ archive_dir,
+ archive_dir,
+ )
ret_code, _, stderr = _exec_popen(cmd_str)
if ret_code:
LOGGER.error(
- "can not clean the archive dir %s, command: %s, output: %s" % (archive_dir, cmd_str, stderr))
+ "can not clean the archive dir %s, command: %s, output: %s"
+ % (archive_dir, cmd_str, stderr)
+ )
if FORCE_UNINSTALL != "force":
raise Exception(
- "can not clean the archive dir %s, command: %s, output: %s" % (archive_dir, cmd_str, stderr))
+ "can not clean the archive dir %s, command: %s, output: %s"
+ % (archive_dir, cmd_str, stderr)
+ )
LOGGER.info("cleaned archive files.")
else:
- arch_clean_cmd = f"dbstor --delete-file --fs-name={archive_fs} --file-name=archive"
+ arch_clean_cmd = (
+ f"dbstor --delete-file --fs-name={archive_fs} --file-name=archive"
+ )
try:
ret_code, output, stderr = _exec_popen(arch_clean_cmd)
if ret_code:
- LOGGER.info(f"Failed to execute command '{arch_clean_cmd}', error: {stderr}")
+ LOGGER.info(
+ f"Failed to execute command '{arch_clean_cmd}', error: {stderr}"
+ )
else:
LOGGER.info("Cleaned archive files using dbstor --delete-file.")
except Exception as e:
- LOGGER.info(f"Exception occurred while executing command '{arch_clean_cmd}': {str(e)}")
-
+ LOGGER.info(
+ f"Exception occurred while executing command '{arch_clean_cmd}': {str(e)}"
+ )
class CanTian(object):
@@ -1062,22 +1173,29 @@ class CanTian(object):
flags = os.O_WRONLY | os.O_TRUNC
modes = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXOTH | stat.S_IWOTH
- with os.fdopen(os.open(CANTIAN_UNINSTALL_CONF_FILE, flags, modes), 'w') as fp:
+ with os.fdopen(
+ os.open(CANTIAN_UNINSTALL_CONF_FILE, flags, modes), "w"
+ ) as fp:
json.dump(cantian_uninstall_config_data, fp)
LOGGER.info("Uninstall cantiand finish ")
flags = os.O_RDWR | os.O_CREAT
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w+') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w+"
+ ) as load_fp:
start_parameters = json.load(load_fp)
- start_status_item = {'start_status': "default"}
+ start_status_item = {"start_status": "default"}
start_parameters.update(start_status_item)
load_fp.seek(0)
load_fp.truncate()
json.dump(start_parameters, load_fp)
except Exception as error:
LOGGER.info("Stop failed: " + str(error))
- LOGGER.info("Please refer to uninstall log \"%s\" for more detailed information." % g_opts.log_file)
+ LOGGER.info(
+ 'Please refer to uninstall log "%s" for more detailed information.'
+ % g_opts.log_file
+ )
raise ValueError(str(error)) from error
def cantian_uninstall(self):
@@ -1088,16 +1206,16 @@ class CanTian(object):
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(CANTIAN_UNINSTALL_CONF_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(CANTIAN_UNINSTALL_CONF_FILE, flags, modes), "r") as fp:
json_data = json.load(fp)
- g_opts.clean_data_dir_on = json_data.get('CLEAN_DATA_DIR_ON', '')
- if json_data.get('USER_ENV_PATH', '').strip() == "":
+ g_opts.clean_data_dir_on = json_data.get("CLEAN_DATA_DIR_ON", "")
+ if json_data.get("USER_ENV_PATH", "").strip() == "":
g_opts.user_env_path = os.path.join("/home", user, ".bashrc")
else:
- g_opts.user_env_path = json_data.get('USER_ENV_PATH', '').strip()
- g_opts.install_path_l = json_data.get('INSTALL_PATH_L', '').strip()
- stat.S_IRUSR = json_data.get('S_IRUSR', '')
+ g_opts.user_env_path = json_data.get("USER_ENV_PATH", "").strip()
+ g_opts.install_path_l = json_data.get("INSTALL_PATH_L", "").strip()
+ stat.S_IRUSR = json_data.get("S_IRUSR", "")
g_opts.gs_data_path = "/mnt/dbdata/local/cantian/tmp/data"
@@ -1109,15 +1227,19 @@ class CanTian(object):
clean_archive_dir(json_data_deploy)
LOGGER.info("uninstall step 4")
- start_parameters = {'start_status': 'default', 'db_create_status': 'default'}
+ start_parameters = {"start_status": "default", "db_create_status": "default"}
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(CANTIAN_START_STATUS_FILE, flags, modes), 'w') as load_fp:
+ with os.fdopen(
+ os.open(CANTIAN_START_STATUS_FILE, flags, modes), "w"
+ ) as load_fp:
json.dump(start_parameters, load_fp)
LOGGER.info("uninstall step 5")
clean_install_path()
LOGGER.info("uninstall step 6")
- log("Cantiand was successfully removed from your computer, "
- "for more message please see %s." % g_opts.log_file)
+ log(
+ "Cantiand was successfully removed from your computer, "
+ "for more message please see %s." % g_opts.log_file
+ )
os.chmod(g_opts.log_file, stat.S_IWUSR + stat.S_IRUSR + stat.S_IRGRP)
@@ -1125,13 +1247,19 @@ class CanTian(object):
g_opts.tmp_fp.flush()
g_opts.tmp_fp.close()
- ret_code, cantiand_pid, stderr = _exec_popen('exit')
+ ret_code, cantiand_pid, stderr = _exec_popen("exit")
if ret_code:
- LOGGER.error("can not logout, command: exit"
- " ret_code : %s, stdout : %s, stderr : %s" % (ret_code, cantiand_pid, stderr))
+ LOGGER.error(
+ "can not logout, command: exit"
+ " ret_code : %s, stdout : %s, stderr : %s"
+ % (ret_code, cantiand_pid, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("can not logout, command: exit"
- " ret_code : %s, stdout : %s, stderr : %s" % (ret_code, cantiand_pid, stderr))
+ raise Exception(
+ "can not logout, command: exit"
+ " ret_code : %s, stdout : %s, stderr : %s"
+ % (ret_code, cantiand_pid, stderr)
+ )
def cantian_check_status(self):
g_opts.gs_data_path = "/mnt/dbdata/local/cantian/tmp/data"
diff --git a/pkg/deploy/action/cantian/exception.py b/pkg/deploy/action/cantian/exception.py
index c7691ab3c8f6928cf871b4ac48634fbee1684a1b..1021e1eaa5f02208bcac06b394ad1facbe16c1a6 100644
--- a/pkg/deploy/action/cantian/exception.py
+++ b/pkg/deploy/action/cantian/exception.py
@@ -1,2 +1,2 @@
class NormalException(Exception):
- pass
\ No newline at end of file
+ pass
diff --git a/pkg/deploy/action/cantian/get_config_info.py b/pkg/deploy/action/cantian/get_config_info.py
index 1f836d9be09463f2a5b57fb279270e3c5bd7f47b..e843cbd5efcd8323fb81e955319e28374161641a 100644
--- a/pkg/deploy/action/cantian/get_config_info.py
+++ b/pkg/deploy/action/cantian/get_config_info.py
@@ -8,19 +8,46 @@ INSTALL_SCPRIT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_DIR = os.path.abspath(os.path.join(INSTALL_SCPRIT_DIR, "../.."))
CONFIG_PARAMS_FILE = os.path.join(PKG_DIR, "config", "deploy_param.json")
-CANTIAN_CONFIG_PARAMS_FILE = os.path.join(PKG_DIR, "action", "cantian", "cantian_config.json")
-CANTIAN_CONFIG_PARAMS_FILE_BACKUP = "/opt/cantian/backup/files/cantian/cantian_config.json"
+CANTIAN_CONFIG_PARAMS_FILE = os.path.join(
+ PKG_DIR, "action", "cantian", "cantian_config.json"
+)
+CANTIAN_CONFIG_PARAMS_FILE_BACKUP = (
+ "/opt/cantian/backup/files/cantian/cantian_config.json"
+)
NUMA_CONFIG_FILE = "/opt/cantian/cantian/cfg/cpu_config.json"
-CANTIAN_START_STATUS_FILE = os.path.join("/opt/cantian/cantian", "cfg", "start_status.json")
-CANTIAN_START_CONFIG_FILE = os.path.join(PKG_DIR, "config", "container_conf", "init_conf", "start_config.json")
-CANTIAN_MEM_SPEC_FILE = os.path.join(PKG_DIR, "config", "container_conf", "init_conf", "mem_spec")
+CANTIAN_START_STATUS_FILE = os.path.join(
+ "/opt/cantian/cantian", "cfg", "start_status.json"
+)
+CANTIAN_START_CONFIG_FILE = os.path.join(
+ PKG_DIR, "config", "container_conf", "init_conf", "start_config.json"
+)
+CANTIAN_MEM_SPEC_FILE = os.path.join(
+ PKG_DIR, "config", "container_conf", "init_conf", "mem_spec"
+)
ENV_FILE = os.path.join(PKG_DIR, "action", "env.sh")
info = {}
-kernel_params_list = ['SHM_CPU_GROUP_INFO', 'LARGE_POOL_SIZE', 'CR_POOL_COUNT', 'CR_POOL_SIZE',
- 'TEMP_POOL_NUM', 'BUF_POOL_NUM', 'LOG_BUFFER_SIZE', 'LOG_BUFFER_COUNT',
- 'SHARED_POOL_SIZE', 'DATA_BUFFER_SIZE', 'TEMP_BUFFER_SIZE', 'SESSIONS',
- 'SHM_MEMORY_REDUCTION_RATIO', "VARIANT_MEMORY_AREA_SIZE", "DTC_RCY_PARAL_BUF_LIST_SIZE"]
-mysql_kernel_params_list = ['max_connections', 'table_open_cache', 'table_open_cache_instances']
+kernel_params_list = [
+ "SHM_CPU_GROUP_INFO",
+ "LARGE_POOL_SIZE",
+ "CR_POOL_COUNT",
+ "CR_POOL_SIZE",
+ "TEMP_POOL_NUM",
+ "BUF_POOL_NUM",
+ "LOG_BUFFER_SIZE",
+ "LOG_BUFFER_COUNT",
+ "SHARED_POOL_SIZE",
+ "DATA_BUFFER_SIZE",
+ "TEMP_BUFFER_SIZE",
+ "SESSIONS",
+ "SHM_MEMORY_REDUCTION_RATIO",
+ "VARIANT_MEMORY_AREA_SIZE",
+ "DTC_RCY_PARAL_BUF_LIST_SIZE",
+]
+mysql_kernel_params_list = [
+ "max_connections",
+ "table_open_cache",
+ "table_open_cache_instances",
+]
numa_params_list = ["CANTIAN_NUMA_CPU_INFO", "MYSQL_NUMA_CPU_INFO"]
MEM_SPEC = {
"0": {
@@ -35,7 +62,7 @@ MEM_SPEC = {
"CR_POOL_SIZE": "256M",
"max_connections": "128",
"table_open_cache": "5120",
- "table_open_cache_instances": "4"
+ "table_open_cache_instances": "4",
},
"1": {
"SESSIONS": "1024",
@@ -49,7 +76,7 @@ MEM_SPEC = {
"CR_POOL_SIZE": "512M",
"max_connections": "1024",
"table_open_cache": "10240",
- "table_open_cache_instances": "8"
+ "table_open_cache_instances": "8",
},
"2": {
"SESSIONS": "2048",
@@ -63,7 +90,7 @@ MEM_SPEC = {
"CR_POOL_SIZE": "1G",
"max_connections": "2048",
"table_open_cache": "20480",
- "table_open_cache_instances": "16"
+ "table_open_cache_instances": "16",
},
"3": {
"SESSIONS": "4096",
@@ -77,8 +104,8 @@ MEM_SPEC = {
"CR_POOL_SIZE": "2G",
"max_connections": "4096",
"table_open_cache": "40960",
- "table_open_cache_instances": "32"
- }
+ "table_open_cache_instances": "32",
+ },
}
@@ -116,13 +143,13 @@ with open(ENV_FILE, "r", encoding="utf-8") as f:
def get_value(param):
if param == "mysql_user":
- return info.get('deploy_user').split(':')[0]
+ return info.get("deploy_user").split(":")[0]
if param == "mysql_group":
- return info.get('deploy_user').split(':')[1]
- if param == 'cantian_in_container':
- return info.get('cantian_in_container', '0')
- if param == 'SYS_PASSWORD':
- return info_cantian.get('SYS_PASSWORD', "")
+ return info.get("deploy_user").split(":")[1]
+ if param == "cantian_in_container":
+ return info.get("cantian_in_container", "0")
+ if param == "SYS_PASSWORD":
+ return info_cantian.get("SYS_PASSWORD", "")
if param == "deploy_user":
for line in env_config:
if line.startswith("cantian_user"):
@@ -131,12 +158,12 @@ def get_value(param):
for line in env_config:
if line.startswith("cantian_group"):
return line.split("=")[1].strip("\n").strip('"')
- if param == 'CANTIAN_START_STATUS':
- return info_cantian_start.get('start_status', "")
- if param == 'CANTIAN_DB_CREATE_STATUS':
- return info_cantian_start.get('db_create_status', "")
- if param == 'CANTIAN_EVER_START':
- return info_cantian_start.get('ever_started', "")
+ if param == "CANTIAN_START_STATUS":
+ return info_cantian_start.get("start_status", "")
+ if param == "CANTIAN_DB_CREATE_STATUS":
+ return info_cantian_start.get("db_create_status", "")
+ if param == "CANTIAN_EVER_START":
+ return info_cantian_start.get("ever_started", "")
if param in numa_params_list:
return numa_config.get(param, "")
if param in kernel_params_list:
@@ -144,9 +171,11 @@ def get_value(param):
if param in mysql_kernel_params_list:
return info_cantian_config.get(param, "")
if param == "deploy_mode":
- if info.get('deploy_mode', ""):
- return info.get('deploy_mode')
- return "dbstor" if info.get('deploy_policy', "") in ["ModeB", "ModeC"] else "file"
+ if info.get("deploy_mode", ""):
+ return info.get("deploy_mode")
+ return (
+ "dbstor" if info.get("deploy_policy", "") in ["ModeB", "ModeC"] else "file"
+ )
return info.get(param, "")
@@ -155,4 +184,3 @@ if __name__ == "__main__":
_param = sys.argv[1]
res = get_value(_param)
print(res)
-
diff --git a/pkg/deploy/action/cantian/log.py b/pkg/deploy/action/cantian/log.py
index c6de9e4d67bd47dcea257bce8fbb0f5f34cb12f0..f67da3c573107731e98604ce14b0e7bb12bd95c3 100644
--- a/pkg/deploy/action/cantian/log.py
+++ b/pkg/deploy/action/cantian/log.py
@@ -23,9 +23,9 @@ CONSOLE_CONF = {
"log_file_backup_count": 5,
"log_date_format": "%Y-%m-%d %H:%M:%S",
"logging_default_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
"logging_context_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s"
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
}
}
@@ -48,14 +48,30 @@ def _get_log_file_path(project):
os.makedirs(logger_dir)
return os.path.join(logger_dir, "{}.log".format(project))
- return ''
+ return ""
SENSITIVE_STR = [
- 'Password', 'passWord', 'PASSWORD', 'password', 'Pswd',
- 'PSWD', 'pwd', 'signature', 'HmacSHA256', 'newPasswd',
- 'private', 'certfile', 'secret', 'token', 'Token', 'pswd',
- 'passwd', 'mysql -u', 'session', 'cookie'
+ "Password",
+ "passWord",
+ "PASSWORD",
+ "password",
+ "Pswd",
+ "PSWD",
+ "pwd",
+ "signature",
+ "HmacSHA256",
+ "newPasswd",
+ "private",
+ "certfile",
+ "secret",
+ "token",
+ "Token",
+ "pswd",
+ "passwd",
+ "mysql -u",
+ "session",
+ "cookie",
]
@@ -83,7 +99,7 @@ def setup(project_name):
stream_handler.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")
+ datefmt=log_config.get("log_date_format"),
)
)
log_root.addHandler(stream_handler)
@@ -91,12 +107,14 @@ def setup(project_name):
log_path = _get_log_file_path(project_name)
if log_path:
file_log = handlers.RotatingFileHandler(
- log_path, maxBytes=log_config.get("log_file_max_size"),
- backupCount=log_config.get("log_file_backup_count"))
+ log_path,
+ maxBytes=log_config.get("log_file_max_size"),
+ backupCount=log_config.get("log_file_backup_count"),
+ )
file_log.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")
+ datefmt=log_config.get("log_date_format"),
)
)
log_root.addHandler(file_log)
@@ -113,4 +131,4 @@ def setup(project_name):
LOGGER = setup(os.path.basename(LOG_FILE).split(".")[0])
log_directory = log_config.get("log_dir")
os.chmod(log_directory, 0o750)
-os.chmod(f'{str(Path(log_directory, os.path.basename(LOG_FILE)))}', 0o640)
+os.chmod(f"{str(Path(log_directory, os.path.basename(LOG_FILE)))}", 0o640)
diff --git a/pkg/deploy/action/cantian/options.py b/pkg/deploy/action/cantian/options.py
index 66344a59ab4be22627ef894396464d7eed7c65b1..82daabadaebd953154576ff641742a14947f5148 100644
--- a/pkg/deploy/action/cantian/options.py
+++ b/pkg/deploy/action/cantian/options.py
@@ -73,4 +73,4 @@ class Options(object):
self.cert_encrypt_pwd = ""
- self.storage_dbstor_fs = ""
\ No newline at end of file
+ self.storage_dbstor_fs = ""
diff --git a/pkg/deploy/action/cantian_common/crypte_adapter.py b/pkg/deploy/action/cantian_common/crypte_adapter.py
index f4c5858a42352df7c1006a2c01c1c11b4415f708..3deabebd077bbcf939efbf58567a22d20bb08d37 100644
--- a/pkg/deploy/action/cantian_common/crypte_adapter.py
+++ b/pkg/deploy/action/cantian_common/crypte_adapter.py
@@ -31,12 +31,18 @@ class KmcResolve(object):
if mode == "decrypted":
ret_pwd = kmc_adapter.decrypt(plain_text)
except Exception as error:
- raise Exception("Failed to %s password of user [sys]. Error: %s" % (mode, str(error))) from error
+ raise Exception(
+ "Failed to %s password of user [sys]. Error: %s" % (mode, str(error))
+ ) from error
finally:
kmc_adapter.finalize()
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
return ret_pwd
def encrypted(self, pwd):
diff --git a/pkg/deploy/action/cantian_common/exec_sql.py b/pkg/deploy/action/cantian_common/exec_sql.py
index f9b306b38b827ef493df394c1d7e921b9adaaf8a..d5b2b2218442adb7ca180b0722c24bdd29a4a430 100644
--- a/pkg/deploy/action/cantian_common/exec_sql.py
+++ b/pkg/deploy/action/cantian_common/exec_sql.py
@@ -2,17 +2,18 @@ import subprocess
import sys
import os
import signal
+
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(CURRENT_PATH, "../"))
from cantian_common.crypte_adapter import KmcResolve
-ZSQL_INI_PATH = '/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini'
+ZSQL_INI_PATH = "/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini"
TIME_OUT = 5
def file_reader(file_path):
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
return file.read()
@@ -21,11 +22,11 @@ def close_child_process(proc):
os.killpg(proc.pid, signal.SIGKILL)
except ProcessLookupError as err:
_ = err
- return 'success'
+ return "success"
except Exception as err:
return str(err)
- return 'success'
+ return "success"
def exec_popen(cmd, timeout=TIME_OUT):
@@ -35,8 +36,14 @@ def exec_popen(cmd, timeout=TIME_OUT):
return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=os.setsid,
+ )
pobj.stdin.write(cmd.encode())
pobj.stdin.write(os.linesep.encode())
try:
@@ -62,27 +69,30 @@ class ExecSQL(object):
def decrypted(self):
ctsql_ini_data = file_reader(ZSQL_INI_PATH)
- encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find('=') + 1:].strip()
+ encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find("=") + 1 :].strip()
ctsql_passwd = KmcResolve.kmc_resolve_password("decrypted", encrypt_pwd)
return ctsql_passwd
def execute(self):
ctsql_passwd = self.decrypted()
- sql = ("source ~/.bashrc && echo -e \"%s\" | "
- "ctsql sys@127.0.0.1:1611 -q -c \"%s\"") % (ctsql_passwd, self.sql)
+ sql = (
+ 'source ~/.bashrc && echo -e "%s" | ' 'ctsql sys@127.0.0.1:1611 -q -c "%s"'
+ ) % (ctsql_passwd, self.sql)
return_code, stdout, stderr = exec_popen(sql)
if return_code:
output = stdout + stderr
- err_msg = "Exec [%s] failed, details: %s" % (self.sql, output.replace(ctsql_passwd, "***"))
+ err_msg = "Exec [%s] failed, details: %s" % (
+ self.sql,
+ output.replace(ctsql_passwd, "***"),
+ )
raise Exception(err_msg)
return stdout
-if __name__ == '__main__':
+if __name__ == "__main__":
_sql_cmd = input()
exec_sql = ExecSQL(_sql_cmd)
try:
print(exec_sql.execute())
except Exception as e:
exit(str(e))
-
diff --git a/pkg/deploy/action/cantian_common/get_config_info.py b/pkg/deploy/action/cantian_common/get_config_info.py
index 6828348c15efa1920debd3a605d0cdbdced60431..ae6993470c80e5a03bcb25c66d1ff23584073811 100644
--- a/pkg/deploy/action/cantian_common/get_config_info.py
+++ b/pkg/deploy/action/cantian_common/get_config_info.py
@@ -35,4 +35,4 @@ def get_value(param):
if __name__ == "__main__":
_param = sys.argv[1]
res = get_value(_param)
- print(res)
\ No newline at end of file
+ print(res)
diff --git a/pkg/deploy/action/cantian_common/mysql_shell.py b/pkg/deploy/action/cantian_common/mysql_shell.py
index 1a1ab68089beb8596beda82ce5067fe2e89aeb6b..81b68d52fbe9daec805241d9a1ea3d62ced3468a 100644
--- a/pkg/deploy/action/cantian_common/mysql_shell.py
+++ b/pkg/deploy/action/cantian_common/mysql_shell.py
@@ -10,8 +10,18 @@ import shlex
LOCK_INSTANCE_FOR_BACKUP_TIMEOUT = 100000
+
class MysqlShell:
- def __init__(self, mysql_cmd, user, password='', database=None, host=None, port=None, socket=None):
+ def __init__(
+ self,
+ mysql_cmd,
+ user,
+ password="",
+ database=None,
+ host=None,
+ port=None,
+ socket=None,
+ ):
self.mysql_cmd = mysql_cmd
self.user = user
self.password = password
@@ -23,22 +33,24 @@ class MysqlShell:
self.process = None
def start_session(self, timeout=10):
- cmd = shlex.split(self.mysql_cmd) + ['-u', self.user]
+ cmd = shlex.split(self.mysql_cmd) + ["-u", self.user]
if self.password:
- cmd.append('-p' + self.password)
+ cmd.append("-p" + self.password)
else:
- cmd.append('--skip-password')
+ cmd.append("--skip-password")
if self.database:
- cmd.extend(['-D', self.database])
+ cmd.extend(["-D", self.database])
if self.host:
- cmd.extend(['--host', self.host])
+ cmd.extend(["--host", self.host])
if self.port:
- cmd.extend(['--port', str(self.port)])
+ cmd.extend(["--port", str(self.port)])
if self.socket:
- cmd.extend(['--socket', self.socket])
+ cmd.extend(["--socket", self.socket])
self.master_fd, slave_fd = pty.openpty()
- self.process = subprocess.Popen(cmd, stdin=slave_fd, stdout=slave_fd, stderr=slave_fd, close_fds=True)
+ self.process = subprocess.Popen(
+ cmd, stdin=slave_fd, stdout=slave_fd, stderr=slave_fd, close_fds=True
+ )
# 检查连接是否成功
try:
@@ -58,14 +70,14 @@ class MysqlShell:
if self.master_fd in select.select([self.master_fd], [], [], 1)[0]:
data = os.read(self.master_fd, 1024).decode()
output.append(data)
- if 'mysql>' in data:
+ if "mysql>" in data:
break
- elif 'Access denied' in data:
+ elif "Access denied" in data:
raise Exception("Access denied for user.")
except OSError:
break
time.sleep(0.1)
- return ''.join(output)
+ return "".join(output)
def execute_command(self, command, timeout=None):
if self.process is not None:
@@ -77,7 +89,7 @@ class MysqlShell:
def close_session(self, timeout=10):
try:
if self.process is not None:
- os.write(self.master_fd, b'exit\n')
+ os.write(self.master_fd, b"exit\n")
start_time = time.time()
while self.process.poll() is None:
if (time.time() - start_time) > timeout:
@@ -93,13 +105,15 @@ class MysqlShell:
def lock_instance_for_backup():
- parser = argparse.ArgumentParser(description='Lock MySQL instance for backup.')
- parser.add_argument('--mysql_cmd', type=str, required=True, help='Path to mysql executable')
- parser.add_argument('--user', type=str, required=True, help='MySQL username')
- parser.add_argument('--host', type=str, help='MySQL host')
- parser.add_argument('--port', type=int, help='MySQL port')
- parser.add_argument('--database', type=str, help='Database name')
- parser.add_argument('--socket', type=str, help='MySQL socket')
+ parser = argparse.ArgumentParser(description="Lock MySQL instance for backup.")
+ parser.add_argument(
+ "--mysql_cmd", type=str, required=True, help="Path to mysql executable"
+ )
+ parser.add_argument("--user", type=str, required=True, help="MySQL username")
+ parser.add_argument("--host", type=str, help="MySQL host")
+ parser.add_argument("--port", type=int, help="MySQL port")
+ parser.add_argument("--database", type=str, help="Database name")
+ parser.add_argument("--socket", type=str, help="MySQL socket")
args = parser.parse_args()
@@ -114,7 +128,7 @@ def lock_instance_for_backup():
database=args.database,
host=args.host,
port=args.port,
- socket=args.socket
+ socket=args.socket,
)
try:
mysql_shell.start_session()
@@ -154,5 +168,5 @@ def lock_instance_for_backup():
mysql_shell.close_session(timeout=10)
-if __name__ == '__main__':
+if __name__ == "__main__":
lock_instance_for_backup()
diff --git a/pkg/deploy/action/check_pwd.py b/pkg/deploy/action/check_pwd.py
index 2c0b663d49eb04f1f88fd3921d980f37b4cdce80..ea872490ca5879f03539c794929934ceb764ac43 100644
--- a/pkg/deploy/action/check_pwd.py
+++ b/pkg/deploy/action/check_pwd.py
@@ -6,13 +6,13 @@ from OpenSSL import crypto
from om_log import LOGGER as LOG
PWD_LEN = 8
-CERT_FILE_PATH="/opt/cantian/certificate/"
+CERT_FILE_PATH = "/opt/cantian/certificate/"
class PassWordChecker:
def __init__(self, pwd):
self.pwd = pwd
- self.user = 'ctcliuser'
+ self.user = "ctcliuser"
def verify_new_passwd(self, shortest_len=PWD_LEN):
"""
@@ -36,14 +36,19 @@ class PassWordChecker:
if passwd_set & cases:
types += 1
if types < 3:
- LOG.error("Error: Password must contains at least three different types of characters.")
+ LOG.error(
+ "Error: Password must contains at least three different types of characters."
+ )
return 1
# Only can contains enumerated cases
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- LOG.error("Error: There are characters that are not allowed in the password: '%s'", "".join(un_cases))
+ LOG.error(
+ "Error: There are characters that are not allowed in the password: '%s'",
+ "".join(un_cases),
+ )
return 1
return 0
@@ -51,21 +56,26 @@ class PassWordChecker:
def check_cert_passwd(self):
cert_file = os.path.join(CERT_FILE_PATH, "mes.crt")
key_file = os.path.join(CERT_FILE_PATH, "mes.key")
- # 加载证书
- with open(cert_file, 'rb') as f:
+ # 加载证书
+ with open(cert_file, "rb") as f:
cert_data = f.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
# 加载私钥
- with open(key_file, 'rb') as f:
+ with open(key_file, "rb") as f:
key_data = f.read()
- private_key = serialization.load_pem_private_key(key_data, self.pwd.encode("utf-8"), default_backend())
+ private_key = serialization.load_pem_private_key(
+ key_data, self.pwd.encode("utf-8"), default_backend()
+ )
# 获取证书的公钥
cert_public_key = cert.get_pubkey().to_cryptography_key()
# 校验证书和私钥是否匹配
# 比较证书的公钥和私钥的公钥是否匹配
- if cert_public_key.public_numbers() == private_key.public_key().public_numbers():
+ if (
+ cert_public_key.public_numbers()
+ == private_key.public_key().public_numbers()
+ ):
LOG.info("Certificate and private key are valid.")
return 0
else:
@@ -73,13 +83,13 @@ class PassWordChecker:
return 1
-if __name__ == '__main__':
+if __name__ == "__main__":
pwd_checker = PassWordChecker(input())
action = "check_pwd"
if len(sys.argv) > 1:
action = sys.argv[1]
operator = {
"check_pwd": pwd_checker.verify_new_passwd,
- "check_cert_pwd": pwd_checker.check_cert_passwd
+ "check_cert_pwd": pwd_checker.check_cert_passwd,
}
operator.get(action)()
diff --git a/pkg/deploy/action/clear_history_version.py b/pkg/deploy/action/clear_history_version.py
index 97b4065b546df4e57a943fd919d1494a7d78fa56..cb6eaf9c0b5383fdd4d32ea929b3050455750df4 100644
--- a/pkg/deploy/action/clear_history_version.py
+++ b/pkg/deploy/action/clear_history_version.py
@@ -2,12 +2,12 @@ import os
import time
from pathlib import Path
-VERSION_PATH = '/opt/cantian/upgrade_backup'
+VERSION_PATH = "/opt/cantian/upgrade_backup"
def format_creation_time(file_path):
ori_create_time = os.path.getctime(file_path)
- return time.strftime('%Y%m%d%H%M%S', time.localtime(ori_create_time))
+ return time.strftime("%Y%m%d%H%M%S", time.localtime(ori_create_time))
def delete_version(version_path):
@@ -25,9 +25,14 @@ def delete_version(version_path):
def execute():
version_names = os.listdir(VERSION_PATH)
- version_info = [(str(Path(VERSION_PATH, name)), format_creation_time(str(Path(VERSION_PATH, name))))
- for name in version_names
- if name.startswith('cantian_upgrade_bak')]
+ version_info = [
+ (
+ str(Path(VERSION_PATH, name)),
+ format_creation_time(str(Path(VERSION_PATH, name))),
+ )
+ for name in version_names
+ if name.startswith("cantian_upgrade_bak")
+ ]
version_info.sort(key=lambda x: x[1], reverse=False)
for version_path, _ in version_info[:-2]:
delete_version(version_path)
diff --git a/pkg/deploy/action/clear_upgrade_backup.py b/pkg/deploy/action/clear_upgrade_backup.py
index 977f6d3b1df002e1bec0605d4ad5d1d24769efbf..afe89cd5c0a675dd3c6a5641af84b002d678e2a0 100644
--- a/pkg/deploy/action/clear_upgrade_backup.py
+++ b/pkg/deploy/action/clear_upgrade_backup.py
@@ -41,16 +41,20 @@ class BackupRemover:
LOG.error(f"upgrade backup note: {BACKUP_NOTE_FILE} is not exist")
exit(1)
- with open(BACKUP_NOTE_FILE, 'r', encoding='utf-8') as file:
- backup_lis = file.read().split('\n')
+ with open(BACKUP_NOTE_FILE, "r", encoding="utf-8") as file:
+ backup_lis = file.read().split("\n")
backup_lis.remove("")
for each_backup_info in backup_lis:
# each_backup_info为:版本号:生成备份时的时间戳
- backup_info_lis = each_backup_info.split(':')
-
- if len(backup_info_lis) < 2: # 长度小于2 说明文件格式不对; 为兼容错误情况,返回空列表
- LOG.error(f"upgrade backup note: {BACKUP_NOTE_FILE} with un_excepted format")
+ backup_info_lis = each_backup_info.split(":")
+
+ if (
+ len(backup_info_lis) < 2
+ ): # 长度小于2 说明文件格式不对; 为兼容错误情况,返回空列表
+ LOG.error(
+ f"upgrade backup note: {BACKUP_NOTE_FILE} with un_excepted format"
+ )
exit(1)
return_list.append(backup_info_lis[0])
@@ -75,7 +79,7 @@ class BackupRemover:
LOG.info(f"remove buackup files: {rm_dir} success")
-if __name__ == '__main__':
+if __name__ == "__main__":
br = BackupRemover()
try:
br.clear_upgrade_backup()
diff --git a/pkg/deploy/action/cms/cms_node0_stop.py b/pkg/deploy/action/cms/cms_node0_stop.py
index ec23ccdc6d9419372fbd6b452d6faddd7de2b9b8..667047791cea0d82563f61fc62be40bcb1a92b39 100644
--- a/pkg/deploy/action/cms/cms_node0_stop.py
+++ b/pkg/deploy/action/cms/cms_node0_stop.py
@@ -12,8 +12,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
py_version = platform.python_version()
if py_version[0] == "3":
@@ -54,15 +59,18 @@ def stop_services():
def ping_kubernetes_service():
try:
- subprocess.check_output(["timeout", "1", "ping", "-c", "1", "kubernetes.default.svc"], stderr=subprocess.STDOUT)
+ subprocess.check_output(
+ ["timeout", "1", "ping", "-c", "1", "kubernetes.default.svc"],
+ stderr=subprocess.STDOUT,
+ )
return True
except subprocess.CalledProcessError:
return False
def main():
- node_id = get_value('node_id')
- cantian_in_container = get_value('cantian_in_container')
+ node_id = get_value("node_id")
+ cantian_in_container = get_value("cantian_in_container")
if node_id == "0" and cantian_in_container in ["1", "2"]:
if not ping_kubernetes_service():
@@ -75,4 +83,4 @@ if __name__ == "__main__":
try:
main()
except Exception as err:
- LOGGER.error(f"Error stopping CMS process: {err}")
\ No newline at end of file
+ LOGGER.error(f"Error stopping CMS process: {err}")
diff --git a/pkg/deploy/action/cms/cmsctl.py b/pkg/deploy/action/cms/cmsctl.py
index 5bafe0b2c7843c02debbe71c6479210c0c3ddd32..177407cc6cd6204884cb694e4b5f3fa588e080dc 100644
--- a/pkg/deploy/action/cms/cmsctl.py
+++ b/pkg/deploy/action/cms/cmsctl.py
@@ -18,7 +18,8 @@ import json
import glob
from log import LOGGER
from get_config_info import get_value
-sys.path.append('../')
+
+sys.path.append("../")
try:
from obtains_lsid import LSIDGenerate
except Exception as err:
@@ -53,7 +54,7 @@ class CommonValue(object):
MAX_DIRECTORY_MODE = 750
MIN_DIRECTORY_MODE = 550
- KEY_DIRECTORY_MODE_STR = '0700'
+ KEY_DIRECTORY_MODE_STR = "0700"
MIN_FILE_PERMISSION = 0o400
MID_FILE_PERMISSION = 0o500
@@ -76,10 +77,20 @@ CANTIAN_GROUP = get_value("deploy_user")
USE_DBSTOR = ["dbstor", "combined"]
USE_DSS = ["dss"]
-VALID_RUNNING_MODE = {CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST, CANTIAND_IN_CLUSTER,
- CANTIAND_WITH_MYSQL_IN_CLUSTER, MYSQLD}
+VALID_RUNNING_MODE = {
+ CANTIAND,
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ MYSQLD,
+}
-VALID_SINGLE_MYSQL_RUNNING_MODE = {CANTIAND_WITH_MYSQL_IN_CLUSTER, CANTIAND_WITH_MYSQL_ST, CANTIAND_WITH_MYSQL}
+VALID_SINGLE_MYSQL_RUNNING_MODE = {
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL,
+}
CLUSTER_SIZE = 2 # default to 2, 4 node cluster mode need add parameter to specify this
CMS_TOOL_CONFIG_COUNT = 4
@@ -90,18 +101,18 @@ MYSQL_BIN_DIR = "/opt/cantian/mysql/install/mysql"
MYSQL_DATA_DIR = ""
MYSQL_LOG_FILE = ""
-a_ascii = ord('a')
-z_ascii = ord('z')
-aa_ascii = ord('A')
-zz_ascii = ord('Z')
-num0_ascii = ord('0')
-num9_ascii = ord('9')
-blank_ascii = ord(' ')
+a_ascii = ord("a")
+z_ascii = ord("z")
+aa_ascii = ord("A")
+zz_ascii = ord("Z")
+num0_ascii = ord("0")
+num9_ascii = ord("9")
+blank_ascii = ord(" ")
sep1_ascii = ord(os.sep)
-sep2_ascii = ord('_')
-sep3_ascii = ord(':')
-sep4_ascii = ord('-')
-sep5_ascii = ord('.')
+sep2_ascii = ord("_")
+sep3_ascii = ord(":")
+sep4_ascii = ord("-")
+sep5_ascii = ord(".")
SEP_SED = r"\/"
@@ -132,7 +143,10 @@ def check_runner():
if owner_uid == 0:
if runner_uid != 0:
runner = pwd.getpwuid(runner_uid).pw_name
- err_msg = "the owner of *.sh has root privilege,can't run it by user [%s]." % runner
+ err_msg = (
+ "the owner of *.sh has root privilege,can't run it by user [%s]."
+ % runner
+ )
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
@@ -146,9 +160,15 @@ def check_runner():
elif runner_uid != owner_uid:
runner = pwd.getpwuid(runner_uid).pw_name
owner = pwd.getpwuid(owner_uid).pw_name
- LOGGER.error("the owner of *.sh [%s] is different with the executor [%s]." % (owner, runner))
+ LOGGER.error(
+ "the owner of *.sh [%s] is different with the executor [%s]."
+ % (owner, runner)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("the owner of *.sh [%s] is different with the executor [%s]." % (owner, runner))
+ raise Exception(
+ "the owner of *.sh [%s] is different with the executor [%s]."
+ % (owner, runner)
+ )
def _exec_popen(cmd, values=None):
@@ -160,8 +180,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
pobj.stdin.write(cmd.encode())
@@ -202,7 +227,9 @@ def run_cmd(str_cmd, wrong_info):
output = stdout + stderr
LOGGER.error("%s.\ncommand: %s.\noutput: %s" % (wrong_info, str_cmd, output))
if FORCE_UNINSTALL != "force":
- raise Exception("%s.\ncommand: %s.\noutput: %s" % (wrong_info, str_cmd, output))
+ raise Exception(
+ "%s.\ncommand: %s.\noutput: %s" % (wrong_info, str_cmd, output)
+ )
return stdout
@@ -251,26 +278,36 @@ def check_user(user, group):
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
elif user == "root" or user_.pw_uid == 0:
- LOGGER.error("parameter input error: -U, can not install program to"
- " root user.")
+ LOGGER.error(
+ "parameter input error: -U, can not install program to" " root user."
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("parameter input error: -U, can not install program to"
- " root user.")
+ raise Exception(
+ "parameter input error: -U, can not install program to" " root user."
+ )
elif group == "root" or user_.pw_gid == 0:
- LOGGER.error("parameter input error: -U, can not install program to"
- " user with root privilege.")
+ LOGGER.error(
+ "parameter input error: -U, can not install program to"
+ " user with root privilege."
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("parameter input error: -U, can not install program to"
- " user with root privilege.")
+ raise Exception(
+ "parameter input error: -U, can not install program to"
+ " user with root privilege."
+ )
runner_uid = os.getuid()
if runner_uid != 0 and runner_uid != user_.pw_uid:
runner = pwd.getpwuid(runner_uid).pw_name
- LOGGER.error("Parameter input error: -U, has to be the same as the"
- " executor [%s]" % runner)
+ LOGGER.error(
+ "Parameter input error: -U, has to be the same as the"
+ " executor [%s]" % runner
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("Parameter input error: -U, has to be the same as the"
- " executor [%s]" % runner)
+ raise Exception(
+ "Parameter input error: -U, has to be the same as the"
+ " executor [%s]" % runner
+ )
def check_path(path_type_in):
@@ -280,19 +317,9 @@ def check_path(path_type_in):
:return: weather validity
"""
path_len = len(path_type_in)
- char_check_list1 = [blank_ascii,
- sep1_ascii,
- sep2_ascii,
- sep4_ascii,
- sep5_ascii
- ]
-
- char_check_list2 = [blank_ascii,
- sep1_ascii,
- sep2_ascii,
- sep3_ascii,
- sep4_ascii
- ]
+ char_check_list1 = [blank_ascii, sep1_ascii, sep2_ascii, sep4_ascii, sep5_ascii]
+
+ char_check_list2 = [blank_ascii, sep1_ascii, sep2_ascii, sep3_ascii, sep4_ascii]
if CURRENT_OS == "Linux":
return check_path_linux(path_len, path_type_in, char_check_list1)
elif CURRENT_OS == "Windows":
@@ -305,10 +332,12 @@ def check_path(path_type_in):
def check_path_linux(path_len, path_type_in, char_check_list):
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (a_ascii <= char_check <= z_ascii
- or aa_ascii <= char_check <= zz_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in char_check_list)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or aa_ascii <= char_check <= zz_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in char_check_list
+ ):
return False
return True
@@ -316,10 +345,12 @@ def check_path_linux(path_len, path_type_in, char_check_list):
def check_path_windows(path_len, path_type_in, char_check_list):
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (a_ascii <= char_check <= z_ascii
- or aa_ascii <= char_check <= zz_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in char_check_list)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or aa_ascii <= char_check <= zz_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in char_check_list
+ ):
return False
return True
@@ -343,12 +374,13 @@ def genreg_string(text):
ins_list = ins_str.split(os.sep)
reg_string = ""
for i in ins_list:
- if (i == ""):
+ if i == "":
continue
else:
reg_string += SEP_SED + i
return reg_string
+
deploy_mode = get_value("deploy_mode")
cantian_in_container = get_value("cantian_in_container")
mes_type = get_value("mes_type") if deploy_mode in USE_DBSTOR else "TCP"
@@ -380,41 +412,31 @@ CMS_CONFIG = {
"_CMS_MES_MESSAGE_CHANNEL_NUM": "1",
"_CMS_GCC_BAK": "", # generate by installer
"_CLUSTER_ID": 0,
- "_USE_DBSTOR": "", # generate by installer
- "_DBSTOR_NAMESPACE": "", # generate by installer
- "_CMS_MES_PIPE_TYPE": mes_type
+ "_USE_DBSTOR": "", # generate by installer
+ "_DBSTOR_NAMESPACE": "", # generate by installer
+ "_CMS_MES_PIPE_TYPE": mes_type,
}
PRIMARY_KEYSTORE = "/opt/cantian/common/config/primary_keystore_bak.ks"
STANDBY_KEYSTORE = "/opt/cantian/common/config/standby_keystore_bak.ks"
YOUMAI_DEMO = "/opt/cantian/youmai_demo"
MES_CONFIG = {
- "_CMS_MES_SSL_SWITCH": mes_ssl_switch,
- "_CMS_MES_SSL_KEY_PWD": None,
- "_CMS_MES_SSL_CRT_KEY_PATH": "/opt/cantian/common/config/certificates",
- "KMC_KEY_FILES": f"({PRIMARY_KEYSTORE}, {STANDBY_KEYSTORE})"
+ "_CMS_MES_SSL_SWITCH": mes_ssl_switch,
+ "_CMS_MES_SSL_KEY_PWD": None,
+ "_CMS_MES_SSL_CRT_KEY_PATH": "/opt/cantian/common/config/certificates",
+ "KMC_KEY_FILES": f"({PRIMARY_KEYSTORE}, {STANDBY_KEYSTORE})",
}
-GCC_TYPE = {
- "dbstor": "DBS",
- "combined": "DBS",
- "file": "file",
- "dss": "SD"
-}
+GCC_TYPE = {"dbstor": "DBS", "combined": "DBS", "file": "file", "dss": "SD"}
-GCC_HOME = {
- "dbstor": "",
- "combined": "",
- "file": "",
- "dss": ""
-}
+GCC_HOME = {"dbstor": "", "combined": "", "file": "", "dss": ""}
CMS_CONFIG.update(MES_CONFIG)
class NormalException(Exception):
"""
- Exception for exit(0)
+ Exception for exit(0)
"""
@@ -463,7 +485,9 @@ class CmsCtl(object):
def __init__(self):
install_config = "../../config/deploy_param.json"
- self.install_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), install_config)
+ self.install_config_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), install_config
+ )
@staticmethod
def cms_check_share_logic_ip_isvalid(node_ip):
@@ -478,17 +502,24 @@ class CmsCtl(object):
def ping_execute(p_cmd):
cmd = "%s %s -i 1 -c 3 | grep ttl | wc -l" % (p_cmd, node_ip)
ret_code, stdout, stderr = _exec_popen(cmd)
- if ret_code or stdout != '3':
- LOGGER.info("The invalid IP address is %s. "
- "ret_code : %s, stdout : %s, stderr : %s" % (node_ip, ret_code, stdout, stderr))
+ if ret_code or stdout != "3":
+ LOGGER.info(
+ "The invalid IP address is %s. "
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (node_ip, ret_code, stdout, stderr)
+ )
return False
return True
LOGGER.info("check the node IP address or domain name.")
if not ping_execute("ping") and not ping_execute("ping6"):
- LOGGER.error("checked the node IP address or domain name failed: %s" % node_ip)
+ LOGGER.error(
+ "checked the node IP address or domain name failed: %s" % node_ip
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("checked the node IP address or domain name failed: %s" % node_ip)
+ raise Exception(
+ "checked the node IP address or domain name failed: %s" % node_ip
+ )
LOGGER.info("checked the node IP address or domain name success: %s" % node_ip)
@@ -562,9 +593,13 @@ class CmsCtl(object):
self.group = load_dict["group"]
def check_link_type(self, load_dict):
- if "link_type" in load_dict and (load_dict["link_type"] == "0" or load_dict["link_type"] == "TCP"):
+ if "link_type" in load_dict and (
+ load_dict["link_type"] == "0" or load_dict["link_type"] == "TCP"
+ ):
self.link_type = "TCP"
- if "link_type" in load_dict and (load_dict["link_type"] == "2" or load_dict["link_type"] == "RDMA_1823"):
+ if "link_type" in load_dict and (
+ load_dict["link_type"] == "2" or load_dict["link_type"] == "RDMA_1823"
+ ):
self.link_type = "RDMA_1823"
def parse_parameters(self, config_file):
@@ -572,7 +607,7 @@ class CmsCtl(object):
flags = os.O_RDONLY | os.O_EXCL
modes = stat.S_IWUSR | stat.S_IRUSR
try:
- with os.fdopen(os.open(config_file, flags, modes), 'r') as load_f:
+ with os.fdopen(os.open(config_file, flags, modes), "r") as load_f:
load_dict = json.load(load_f)
self.load_user_config(load_dict)
self.load_cms_run_config(load_dict)
@@ -580,7 +615,9 @@ class CmsCtl(object):
node_str = "node" + str(self.node_id)
metadata_str = "metadata_" + self.storage_metadata_fs
global MYSQL_DATA_DIR
- MYSQL_DATA_DIR = os.path.join("/mnt/dbdata/remote", metadata_str, node_str)
+ MYSQL_DATA_DIR = os.path.join(
+ "/mnt/dbdata/remote", metadata_str, node_str
+ )
global MYSQL_LOG_FILE
MYSQL_LOG_FILE = os.path.join(MYSQL_DATA_DIR, "mysql.log")
except OSError as ex:
@@ -630,7 +667,9 @@ class CmsCtl(object):
flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
modes = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
try:
- with os.fdopen(os.open(self.cms_new_config, flags, modes), "w") as file_object:
+ with os.fdopen(
+ os.open(self.cms_new_config, flags, modes), "w"
+ ) as file_object:
json.dump(conf_dict, file_object)
except OSError as ex:
LOGGER.error("failed to read config : %s" % str(ex))
@@ -665,7 +704,9 @@ class CmsCtl(object):
if "GCC_TYPE" in common_parameters:
common_parameters["GCC_DIR"] = common_parameters["GCC_HOME"]
if deploy_mode not in USE_DSS:
- common_parameters["GCC_HOME"] = os.path.join(common_parameters["GCC_HOME"], "gcc_file")
+ common_parameters["GCC_HOME"] = os.path.join(
+ common_parameters["GCC_HOME"], "gcc_file"
+ )
self.clean_old_conf(list(common_parameters.keys()), conf_file)
self.set_new_conf(common_parameters, conf_file)
@@ -685,17 +726,25 @@ class CmsCtl(object):
node_ip.append("127.0.0.1")
gcc_home = self.gcc_home
gcc_dir = self.gcc_dir
-
+
# gcc_home is lun, gcc_home:/dev/sda
if self.gcc_type != "SD":
gcc_dir = gcc_home
gcc_home = os.path.join(gcc_home, "gcc_file")
- if 'LD_LIBRARY_PATH' in os.environ:
- ld_library_path = ("%s:%s:%s" % (os.path.join(self.install_path, "lib"), os.path.join(
- self.install_path, "add-ons",), os.environ['LD_LIBRARY_PATH']))
+ if "LD_LIBRARY_PATH" in os.environ:
+ ld_library_path = "%s:%s:%s" % (
+ os.path.join(self.install_path, "lib"),
+ os.path.join(
+ self.install_path,
+ "add-ons",
+ ),
+ os.environ["LD_LIBRARY_PATH"],
+ )
else:
- ld_library_path = ("%s:%s" % (os.path.join(self.install_path, "lib"), os.path.join(
- self.install_path, "add-ons"),))
+ ld_library_path = "%s:%s" % (
+ os.path.join(self.install_path, "lib"),
+ os.path.join(self.install_path, "add-ons"),
+ )
common_parameters = {
"GCC_HOME": gcc_home,
"REPORT_FILE": LOG_FILE,
@@ -744,7 +793,10 @@ class CmsCtl(object):
"""
cmd = ""
for parameter in param_list:
- cmd += "sed -i '/^%s/d' %s;" % (parameter.replace('[', '\[').replace(']', '\]'), conf_file)
+ cmd += "sed -i '/^%s/d' %s;" % (
+ parameter.replace("[", "\[").replace("]", "\]"),
+ conf_file,
+ )
if cmd:
cmd = cmd.strip(";")
run_cmd(cmd, "failed to write the %s" % conf_file)
@@ -767,27 +819,41 @@ class CmsCtl(object):
socket_check.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_check.settimeout(time_out)
if gPyVersion < PYTHON242:
- LOGGER.error("this install script can not support python version"
- " : " + gPyVersion)
+ LOGGER.error(
+ "this install script can not support python version" " : " + gPyVersion
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("this install script can not support python version"
- " : " + gPyVersion)
+ raise Exception(
+ "this install script can not support python version"
+ " : " + gPyVersion
+ )
try:
socket_check.bind((node_ip, inner_port))
except socket.error as err_socket:
socket_check.close()
- if (int(err_socket.errno) == 98 or int(err_socket.errno) == 95
- or int(err_socket.errno) == 13):
- LOGGER.info("Error: port %s has been used,the detail"
- " information is as follows:" % value)
+ if (
+ int(err_socket.errno) == 98
+ or int(err_socket.errno) == 95
+ or int(err_socket.errno) == 13
+ ):
+ LOGGER.info(
+ "Error: port %s has been used,the detail"
+ " information is as follows:" % value
+ )
str_cmd = "netstat -unltp | grep %s" % value
ret_code, stdout, stderr = _exec_popen(str_cmd)
- LOGGER.error("can not get detail information of the"
- " port, command:%s, output:%s, stderr:%s" % (str_cmd, str(stdout), stderr))
+ LOGGER.error(
+ "can not get detail information of the"
+ " port, command:%s, output:%s, stderr:%s"
+ % (str_cmd, str(stdout), stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("can not get detail information of the"
- " port, command:%s, output:%s, stderr:%s" % (str_cmd, str(stdout), stderr))
+ raise Exception(
+ "can not get detail information of the"
+ " port, command:%s, output:%s, stderr:%s"
+ % (str_cmd, str(stdout), stderr)
+ )
socket_check.close()
@@ -826,7 +892,7 @@ class CmsCtl(object):
"""
if not nodeip:
return False
- allowed_chars = set('0:.')
+ allowed_chars = set("0:.")
if set(nodeip).issubset(allowed_chars):
return True
else:
@@ -839,7 +905,7 @@ class CmsCtl(object):
output: NA
"""
LOGGER.info("check the node IP address.")
- if get_value("cantian_in_container") == '0':
+ if get_value("cantian_in_container") == "0":
try:
socket.inet_aton(nodeip)
except socket.error:
@@ -847,7 +913,10 @@ class CmsCtl(object):
try:
socket.inet_pton(socket.AF_INET6, nodeip)
except socket.error:
- err_msg = "the invalid IP address : %s is not ipv4 or ipv6 format." % nodeip
+ err_msg = (
+ "the invalid IP address : %s is not ipv4 or ipv6 format."
+ % nodeip
+ )
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
@@ -859,12 +928,18 @@ class CmsCtl(object):
cmd = "%s %s -i 1 -c 3 | grep ttl | wc -l" % (ping_cmd, nodeip)
ret_code, stdout, stderr = _exec_popen(cmd)
- if ret_code or stdout != '3':
- LOGGER.error("The invalid IP address is %s. "
- "ret_code : %s, stdout : %s, stderr : %s" % (nodeip, ret_code, stdout, stderr))
+ if ret_code or stdout != "3":
+ LOGGER.error(
+ "The invalid IP address is %s. "
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (nodeip, ret_code, stdout, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("The invalid IP address is %s. "
- "ret_code : %s, stdout : %s, stderr : %s" % (nodeip, ret_code, stdout, stderr))
+ raise Exception(
+ "The invalid IP address is %s. "
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (nodeip, ret_code, stdout, stderr)
+ )
ip_is_found = 1
if nodeip in self.ip_addr:
@@ -879,11 +954,17 @@ class CmsCtl(object):
ip_is_found = 0
if ret_code or not int(ip_is_found):
- LOGGER.error("The invalid IP address is %s. "
- "ret_code : %s, ip_is_found : %s, stderr : %s" % (nodeip, ret_code, ip_is_found, stderr))
+ LOGGER.error(
+ "The invalid IP address is %s. "
+ "ret_code : %s, ip_is_found : %s, stderr : %s"
+ % (nodeip, ret_code, ip_is_found, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("The invalid IP address is %s. "
- "ret_code : %s, ip_is_found : %s, stderr : %s" % (nodeip, ret_code, ip_is_found, stderr))
+ raise Exception(
+ "The invalid IP address is %s. "
+ "ret_code : %s, ip_is_found : %s, stderr : %s"
+ % (nodeip, ret_code, ip_is_found, stderr)
+ )
LOGGER.info("checked the node IP address : %s" % nodeip)
@@ -891,26 +972,39 @@ class CmsCtl(object):
"""
function: after decompression install package, change file permission
"""
- str_cmd = "chmod %s %s -R" % (CommonValue.KEY_DIRECTORY_MODE,
- self.install_path)
- str_cmd += ("&& find '%s'/add-ons -type f | xargs chmod %s "
- % (self.install_path, CommonValue.MID_FILE_MODE))
- str_cmd += ("&& find '%s'/admin -type f | xargs chmod %s "
- % (self.install_path, CommonValue.MIN_FILE_MODE))
- str_cmd += ("&& find '%s'/lib -type f | xargs chmod %s"
- % (self.install_path, CommonValue.MID_FILE_MODE))
- str_cmd += ("&& find '%s'/bin -type f | xargs chmod %s "
- % (self.install_path, CommonValue.MID_FILE_MODE))
- str_cmd += ("&& find '%s'/cfg -type f | xargs chmod %s "
- % (self.install_path, CommonValue.KEY_FILE_MODE))
+ str_cmd = "chmod %s %s -R" % (CommonValue.KEY_DIRECTORY_MODE, self.install_path)
+ str_cmd += "&& find '%s'/add-ons -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.MID_FILE_MODE,
+ )
+ str_cmd += "&& find '%s'/admin -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.MIN_FILE_MODE,
+ )
+ str_cmd += "&& find '%s'/lib -type f | xargs chmod %s" % (
+ self.install_path,
+ CommonValue.MID_FILE_MODE,
+ )
+ str_cmd += "&& find '%s'/bin -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.MID_FILE_MODE,
+ )
+ str_cmd += "&& find '%s'/cfg -type f | xargs chmod %s " % (
+ self.install_path,
+ CommonValue.KEY_FILE_MODE,
+ )
package_xml = os.path.join(self.install_path, "package.xml")
if os.path.exists(package_xml):
- str_cmd += ("&& chmod %s '%s'/package.xml"
- % (CommonValue.MIN_FILE_MODE, self.install_path))
+ str_cmd += "&& chmod %s '%s'/package.xml" % (
+ CommonValue.MIN_FILE_MODE,
+ self.install_path,
+ )
LOGGER.info("change app permission cmd: %s" % str_cmd)
run_cmd(str_cmd, "failed to chmod %s" % CommonValue.KEY_DIRECTORY_MODE)
- if cantian_in_container == "0" and (deploy_mode == "file" or os.path.exists(YOUMAI_DEMO)):
+ if cantian_in_container == "0" and (
+ deploy_mode == "file" or os.path.exists(YOUMAI_DEMO)
+ ):
self.chown_gcc_dirs()
def export_user_env(self):
@@ -921,20 +1015,29 @@ class CmsCtl(object):
modes = stat.S_IWUSR | stat.S_IRUSR
try:
with os.fdopen(os.open(self.user_profile, flags, modes), "a") as _file:
- _file.write("export CMS_HOME=\"%s\"" % self.cms_home)
+ _file.write('export CMS_HOME="%s"' % self.cms_home)
_file.write(os.linesep)
- _file.write("export PATH=\"%s\":$PATH"
- % os.path.join(self.install_path, "bin"))
+ _file.write(
+ 'export PATH="%s":$PATH' % os.path.join(self.install_path, "bin")
+ )
_file.write(os.linesep)
if "LD_LIBRARY_PATH" in os.environ:
- _file.write("export LD_LIBRARY_PATH=\"%s\":\"%s\""
- ":$LD_LIBRARY_PATH"
- % (os.path.join(self.install_path, "lib"),
- os.path.join(self.install_path, "add-ons")))
+ _file.write(
+ 'export LD_LIBRARY_PATH="%s":"%s"'
+ ":$LD_LIBRARY_PATH"
+ % (
+ os.path.join(self.install_path, "lib"),
+ os.path.join(self.install_path, "add-ons"),
+ )
+ )
else:
- _file.write("export LD_LIBRARY_PATH=\"%s\":\"%s\""
- % (os.path.join(self.install_path, "lib"),
- os.path.join(self.install_path, "add-ons")))
+ _file.write(
+ 'export LD_LIBRARY_PATH="%s":"%s"'
+ % (
+ os.path.join(self.install_path, "lib"),
+ os.path.join(self.install_path, "add-ons"),
+ )
+ )
_file.write(os.linesep)
_file.flush()
LOGGER.info("write export for cms_home, path, lib_path")
@@ -952,11 +1055,16 @@ class CmsCtl(object):
str_cmd = "echo ~"
ret_code, stdout, stderr = _exec_popen(str_cmd)
if ret_code:
- LOGGER.error("failed to get user home."
- "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr))
+ LOGGER.error(
+ "failed to get user home."
+ "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("failed to get user home."
- "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr))
+ raise Exception(
+ "failed to get user home."
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (ret_code, stdout, stderr)
+ )
output = os.path.realpath(os.path.normpath(stdout))
if not check_path(output):
err_msg = "the user home directory is invalid."
@@ -984,14 +1092,17 @@ class CmsCtl(object):
with os.fdopen(os.open(self.user_profile, flags, modes), "r") as _file:
while True:
str_line = _file.readline()
- if (not str_line):
+ if not str_line:
break
str_line = str_line.strip()
- if (str_line.startswith("#")):
+ if str_line.startswith("#"):
continue
user_info = str_line.split()
- if (len(user_info) >= 2 and user_info[0] == "export"
- and user_info[1].startswith("CMS_HOME=") > 0):
+ if (
+ len(user_info) >= 2
+ and user_info[0] == "export"
+ and user_info[1].startswith("CMS_HOME=") > 0
+ ):
is_find = True
break
else:
@@ -1003,7 +1114,10 @@ class CmsCtl(object):
raise Exception(err_msg)
def skip_execute_in_node_1(self):
- if self.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER] and self.node_id == 1:
+ if (
+ self.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]
+ and self.node_id == 1
+ ):
return True
return False
@@ -1012,7 +1126,7 @@ class CmsCtl(object):
chown data and gcc dirs
:return:
"""
- cmd = "chown %s:%s -hR \"%s\";" % (self.user, self.group, self.gcc_home)
+ cmd = 'chown %s:%s -hR "%s";' % (self.user, self.group, self.gcc_home)
LOGGER.info("change owner cmd: %s" % cmd)
self.cms_check_share_logic_ip_isvalid(self.share_logic_ip)
LOGGER.info("if blocked here, please check if the network is normal")
@@ -1023,23 +1137,37 @@ class CmsCtl(object):
LOGGER.info("prepare gcc home dir")
self.cms_check_share_logic_ip_isvalid(self.share_logic_ip)
LOGGER.info("if blocked here, please check if the network is normal")
- if (deploy_mode == "file" or os.path.exists(YOUMAI_DEMO)) and not os.path.exists(self.gcc_home):
+ if (
+ deploy_mode == "file" or os.path.exists(YOUMAI_DEMO)
+ ) and not os.path.exists(self.gcc_home):
os.makedirs(self.gcc_home, CommonValue.KEY_DIRECTORY_PERMISSION)
LOGGER.info("makedir for gcc_home %s" % self.gcc_home)
def install_xnet_lib(self):
if self.is_rdma_startup():
- str_cmd = "cp -arf %s/add-ons/mlnx/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -arf %s/add-ons/mlnx/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
elif self.is_rdma_1823_startup():
- str_cmd = "cp -arf %s/add-ons/1823/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -arf %s/add-ons/1823/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
else:
- str_cmd = "cp -arf %s/add-ons/nomlnx/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -arf %s/add-ons/nomlnx/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
LOGGER.info("install xnet lib cmd: " + str_cmd)
run_cmd(str_cmd, "failed to install xnet lib")
def install_kmc_lib(self):
- str_cmd = "cp -arf %s/add-ons/kmc_shared/lib* %s/add-ons/" % (self.install_path, self.install_path)
+ str_cmd = "cp -arf %s/add-ons/kmc_shared/lib* %s/add-ons/" % (
+ self.install_path,
+ self.install_path,
+ )
LOGGER.info("install kmc lib cmd: " + str_cmd)
run_cmd(str_cmd, "failed to install kmc lib")
@@ -1049,30 +1177,45 @@ class CmsCtl(object):
"""
ret_code, stdout, stderr = _exec_popen("which ofed_info")
if ret_code:
- LOGGER.info("no ofed_info cmd found"
- "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr))
+ LOGGER.info(
+ "no ofed_info cmd found"
+ "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr)
+ )
return False
ret_code, stdout, stderr = _exec_popen("ofed_info -s")
if ret_code:
- LOGGER.info("exec ofed_info cmd failed"
- "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr))
+ LOGGER.info(
+ "exec ofed_info cmd failed"
+ "ret_code : %s, stdout : %s, stderr : %s" % (ret_code, stdout, stderr)
+ )
return False
- if 'MLNX_OFED_LINUX-5.5' in stdout:
+ if "MLNX_OFED_LINUX-5.5" in stdout:
LOGGER.info("mlnx version 5.5")
return True
ret_code, os_arch, stderr = _exec_popen("uname -i")
if ret_code:
- LOGGER.error("failed to get linux release version."
- "ret_code : %s, os_arch : %s, stderr : %s" % (ret_code, os_arch, stderr))
+ LOGGER.error(
+ "failed to get linux release version."
+ "ret_code : %s, os_arch : %s, stderr : %s" % (ret_code, os_arch, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("failed to get linux release version."
- "ret_code : %s, os_arch : %s, stderr : %s" % (ret_code, os_arch, stderr))
- aarch_mlnx_version_list = ['OFED-internal-5.8-2.0.3', 'MLNX_OFED_LINUX-5.8', 'MLNX_OFED_LINUX-5.9']
- aarch_version_check_result = any(mlnx_version if mlnx_version in stdout else False
- for mlnx_version in aarch_mlnx_version_list)
+ raise Exception(
+ "failed to get linux release version."
+ "ret_code : %s, os_arch : %s, stderr : %s"
+ % (ret_code, os_arch, stderr)
+ )
+ aarch_mlnx_version_list = [
+ "OFED-internal-5.8-2.0.3",
+ "MLNX_OFED_LINUX-5.8",
+ "MLNX_OFED_LINUX-5.9",
+ ]
+ aarch_version_check_result = any(
+ mlnx_version if mlnx_version in stdout else False
+ for mlnx_version in aarch_mlnx_version_list
+ )
if os_arch == "aarch64" and aarch_version_check_result == True:
LOGGER.info("Is mlnx 5.8~5.9")
return True
@@ -1098,15 +1241,17 @@ class CmsCtl(object):
def check_parameter_install(self):
if self.ip_cluster != "":
- _list = self.ip_cluster.split(';')
+ _list = self.ip_cluster.split(";")
self.ip_addr = _list[self.node_id]
for item in re.split(r"[;,]", self.ip_cluster):
if len(_list) != 1 and self.all_zero_addr_after_ping(item):
- LOGGER.error("ip contains all-zero ip,"
- " can not specify other ip.")
+ LOGGER.error(
+ "ip contains all-zero ip," " can not specify other ip."
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("ip contains all-zero ip,"
- " can not specify other ip.")
+ raise Exception(
+ "ip contains all-zero ip," " can not specify other ip."
+ )
if cantian_in_container == "0":
self.check_ip_isvaild(item)
else:
@@ -1136,10 +1281,18 @@ class CmsCtl(object):
if not os.path.exists(self.install_path):
os.makedirs(self.install_path, CommonValue.KEY_DIRECTORY_PERMISSION)
cms_pkg_file = "/opt/cantian/image/Cantian-RUN-CENTOS-64bit"
- str_cmd = ("cp -arf %s/add-ons %s/admin %s/bin %s/cfg %s/lib %s/package.xml %s"
- % (cms_pkg_file, cms_pkg_file, cms_pkg_file,
- cms_pkg_file, cms_pkg_file, cms_pkg_file,
- self.install_path))
+ str_cmd = (
+ "cp -arf %s/add-ons %s/admin %s/bin %s/cfg %s/lib %s/package.xml %s"
+ % (
+ cms_pkg_file,
+ cms_pkg_file,
+ cms_pkg_file,
+ cms_pkg_file,
+ cms_pkg_file,
+ cms_pkg_file,
+ self.install_path,
+ )
+ )
LOGGER.info("copy install files cmd: " + str_cmd)
run_cmd(str_cmd, "failed to install cms lib files")
@@ -1171,14 +1324,24 @@ class CmsCtl(object):
self.gcc_home = "/dev/gcc-disk"
self.cms_gcc_bak = "/dev/gcc-disk"
else:
- self.gcc_home = os.path.join("/mnt/dbdata/remote/share_" + self.storage_share_fs, "gcc_home")
- self.cms_gcc_bak = os.path.join("/mnt/dbdata/remote", "archive_" + self.storage_archive_fs)
+ self.gcc_home = os.path.join(
+ "/mnt/dbdata/remote/share_" + self.storage_share_fs, "gcc_home"
+ )
+ self.cms_gcc_bak = os.path.join(
+ "/mnt/dbdata/remote", "archive_" + self.storage_archive_fs
+ )
if os.path.exists(YOUMAI_DEMO):
- self.gcc_home = os.path.join("/mnt/dbdata/remote/share_" + self.storage_share_fs, "gcc_home")
- self.cms_gcc_bak = os.path.join("/mnt/dbdata/remote", "archive_" + self.storage_archive_fs)
+ self.gcc_home = os.path.join(
+ "/mnt/dbdata/remote/share_" + self.storage_share_fs, "gcc_home"
+ )
+ self.cms_gcc_bak = os.path.join(
+ "/mnt/dbdata/remote", "archive_" + self.storage_archive_fs
+ )
self.gcc_dir = self.gcc_home
- LOGGER.info("======================== begin to pre_install cms configs ========================")
+ LOGGER.info(
+ "======================== begin to pre_install cms configs ========================"
+ )
check_user(self.user, self.group)
if deploy_mode == "file" and not check_path(self.gcc_home):
@@ -1191,85 +1354,174 @@ class CmsCtl(object):
self.check_old_install()
self.install_step = 1
self.set_cms_conf()
- LOGGER.info("======================== pre_install cms configs successfully ========================")
+ LOGGER.info(
+ "======================== pre_install cms configs successfully ========================"
+ )
def prepare_cms_tool_dbstor_config(self):
for i in range(10, CMS_TOOL_CONFIG_COUNT + 10):
file_num = i - 9
uuid_generate = LSIDGenerate(2, self.cluster_id, i, self.node_id)
inst_id, cms_tool_uuid = uuid_generate.execute()
- str_cmd = ("cp -raf /opt/cantian/dbstor/tools/dbstor_config.ini"
- " %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini" % (self.cms_home, str(file_num)))
- str_cmd += " && echo 'DBSTOR_OWNER_NAME = cms' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini" % (
- self.cms_home, str(file_num))
- str_cmd += " && echo 'CLUSTER_NAME = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini" % (
- self.cluster_name, self.cms_home, str(file_num))
- str_cmd += " && echo 'CLUSTER_UUID = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini" % (
- self.cluster_uuid, self.cms_home, str(file_num))
- str_cmd += (" && echo 'INST_ID = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
- % (inst_id, self.cms_home, str(file_num)))
- str_cmd += " && echo 'CMS_TOOL_UUID = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini" % (
- cms_tool_uuid, self.cms_home, str(file_num))
- str_cmd += (" && sed -i '/^\s*$/d' %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
- % (self.cms_home, str(file_num)))
- str_cmd += " && chown -R %s:%s %s/dbstor" % (self.user, self.group, self.cms_home)
- str_cmd += " && chmod 640 %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini" % (self.cms_home, str(file_num))
+ str_cmd = (
+ "cp -raf /opt/cantian/dbstor/tools/dbstor_config.ini"
+ " %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
+ % (self.cms_home, str(file_num))
+ )
+ str_cmd += (
+ " && echo 'DBSTOR_OWNER_NAME = cms' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
+ % (self.cms_home, str(file_num))
+ )
+ str_cmd += (
+ " && echo 'CLUSTER_NAME = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
+ % (self.cluster_name, self.cms_home, str(file_num))
+ )
+ str_cmd += (
+ " && echo 'CLUSTER_UUID = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
+ % (self.cluster_uuid, self.cms_home, str(file_num))
+ )
+ str_cmd += (
+ " && echo 'INST_ID = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
+ % (inst_id, self.cms_home, str(file_num))
+ )
+ str_cmd += (
+ " && echo 'CMS_TOOL_UUID = %s' >> %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
+ % (cms_tool_uuid, self.cms_home, str(file_num))
+ )
+ str_cmd += (
+ " && sed -i '/^\s*$/d' %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini"
+ % (self.cms_home, str(file_num))
+ )
+ str_cmd += " && chown -R %s:%s %s/dbstor" % (
+ self.user,
+ self.group,
+ self.cms_home,
+ )
+ str_cmd += " && chmod 640 %s/dbstor/conf/dbs/dbstor_config_tool_%s.ini" % (
+ self.cms_home,
+ str(file_num),
+ )
ret_code, stdout, stderr = _exec_popen(str_cmd)
if ret_code:
- LOGGER.error("prepare cms tool dbstor config file failed, return: " +
- str(ret_code) + os.linesep + stdout + os.linesep + stderr)
+ LOGGER.error(
+ "prepare cms tool dbstor config file failed, return: "
+ + str(ret_code)
+ + os.linesep
+ + stdout
+ + os.linesep
+ + stderr
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("prepare cms tool dbstor config file failed, return: " +
- str(ret_code) + os.linesep + stdout + os.linesep + stderr)
+ raise Exception(
+ "prepare cms tool dbstor config file failed, return: "
+ + str(ret_code)
+ + os.linesep
+ + stdout
+ + os.linesep
+ + stderr
+ )
def copy_dbstor_config(self):
if os.path.exists(os.path.join(self.cms_home, "dbstor")):
shutil.rmtree(os.path.join(self.cms_home, "dbstor"))
- os.makedirs("%s/dbstor/conf/dbs" % self.cms_home, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/conf/infra/config" % self.cms_home, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/data/logs" % self.cms_home, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/data/ftds" % self.cms_home, CommonValue.KEY_DIRECTORY_PERMISSION)
+ os.makedirs(
+ "%s/dbstor/conf/dbs" % self.cms_home, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
+ os.makedirs(
+ "%s/dbstor/conf/infra/config" % self.cms_home,
+ CommonValue.KEY_DIRECTORY_PERMISSION,
+ )
+ os.makedirs(
+ "%s/dbstor/data/logs" % self.cms_home, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
+ os.makedirs(
+ "%s/dbstor/data/ftds" % self.cms_home, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
if self.is_rdma_startup() or self.is_rdma_1823_startup():
- str_cmd = "cp -rfa %s/cfg/node_config_rdma_cms.xml %s/dbstor/conf/infra/config/node_config.xml" % (
- self.install_path, self.cms_home)
+ str_cmd = (
+ "cp -rfa %s/cfg/node_config_rdma_cms.xml %s/dbstor/conf/infra/config/node_config.xml"
+ % (self.install_path, self.cms_home)
+ )
else:
- str_cmd = "cp -raf %s/cfg/node_config_tcp_cms.xml %s/dbstor/conf/infra/config/node_config.xml" % (
- self.install_path, self.cms_home)
+ str_cmd = (
+ "cp -raf %s/cfg/node_config_tcp_cms.xml %s/dbstor/conf/infra/config/node_config.xml"
+ % (self.install_path, self.cms_home)
+ )
generate_cluster_uuid = LSIDGenerate(0, self.cluster_id, 0, 0)
generate_inst_id = LSIDGenerate(2, self.cluster_id, 0, self.node_id)
_, self.cluster_uuid = generate_cluster_uuid.execute()
inst_id, _ = generate_inst_id.execute()
- str_cmd += " && cp -raf %s/cfg/osd.cfg %s/dbstor/conf/infra/config/osd.cfg" % (self.install_path, self.cms_home)
- str_cmd += " && cp -raf /opt/cantian/dbstor/tools/dbstor_config.ini %s/dbstor/conf/dbs/" % (self.cms_home)
- str_cmd += " && echo 'DBSTOR_OWNER_NAME = cms' >> %s/dbstor/conf/dbs/dbstor_config.ini" % (self.cms_home)
- str_cmd += (" && echo 'CLUSTER_NAME = %s' >> %s/dbstor/conf/dbs/dbstor_config.ini"
- % (self.cluster_name, self.cms_home))
- str_cmd += (" && echo 'CLUSTER_UUID = %s' >> %s/dbstor/conf/dbs/dbstor_config.ini"
- % (self.cluster_uuid, self.cms_home))
- str_cmd += " && echo 'INST_ID = %s' >> %s/dbstor/conf/dbs/dbstor_config.ini" % (inst_id, self.cms_home)
- str_cmd += " && sed -i '/^\s*$/d' %s/dbstor/conf/dbs/dbstor_config.ini" % (self.cms_home)
- str_cmd += " && chown -R %s:%s %s/dbstor" % (self.user, self.group, self.cms_home)
- str_cmd += " && chmod 640 %s/dbstor/conf/dbs/dbstor_config.ini" % (self.cms_home)
+ str_cmd += " && cp -raf %s/cfg/osd.cfg %s/dbstor/conf/infra/config/osd.cfg" % (
+ self.install_path,
+ self.cms_home,
+ )
+ str_cmd += (
+ " && cp -raf /opt/cantian/dbstor/tools/dbstor_config.ini %s/dbstor/conf/dbs/"
+ % (self.cms_home)
+ )
+ str_cmd += (
+ " && echo 'DBSTOR_OWNER_NAME = cms' >> %s/dbstor/conf/dbs/dbstor_config.ini"
+ % (self.cms_home)
+ )
+ str_cmd += (
+ " && echo 'CLUSTER_NAME = %s' >> %s/dbstor/conf/dbs/dbstor_config.ini"
+ % (self.cluster_name, self.cms_home)
+ )
+ str_cmd += (
+ " && echo 'CLUSTER_UUID = %s' >> %s/dbstor/conf/dbs/dbstor_config.ini"
+ % (self.cluster_uuid, self.cms_home)
+ )
+ str_cmd += " && echo 'INST_ID = %s' >> %s/dbstor/conf/dbs/dbstor_config.ini" % (
+ inst_id,
+ self.cms_home,
+ )
+ str_cmd += " && sed -i '/^\s*$/d' %s/dbstor/conf/dbs/dbstor_config.ini" % (
+ self.cms_home
+ )
+ str_cmd += " && chown -R %s:%s %s/dbstor" % (
+ self.user,
+ self.group,
+ self.cms_home,
+ )
+ str_cmd += " && chmod 640 %s/dbstor/conf/dbs/dbstor_config.ini" % (
+ self.cms_home
+ )
LOGGER.info("copy config files cmd: " + str_cmd)
ret_code, stdout, stderr = _exec_popen(str_cmd)
if ret_code:
- LOGGER.error("copy dbstor config file failed, return: " +
- str(ret_code) + os.linesep + stdout + os.linesep + stderr)
+ LOGGER.error(
+ "copy dbstor config file failed, return: "
+ + str(ret_code)
+ + os.linesep
+ + stdout
+ + os.linesep
+ + stderr
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("copy dbstor config file failed, return: " +
- str(ret_code) + os.linesep + stdout + os.linesep + stderr)
+ raise Exception(
+ "copy dbstor config file failed, return: "
+ + str(ret_code)
+ + os.linesep
+ + stdout
+ + os.linesep
+ + stderr
+ )
def install(self):
"""
install cms process, copy bin
"""
- LOGGER.info("======================== begin to install cms ========================")
+ LOGGER.info(
+ "======================== begin to install cms ========================"
+ )
if self.install_step == 2:
LOGGER.info("cms install already")
- LOGGER.info("======================== install cms module successfully ========================")
+ LOGGER.info(
+ "======================== install cms module successfully ========================"
+ )
return
if self.install_step == 0:
err_msg = "please run pre_install previously"
@@ -1290,20 +1542,27 @@ class CmsCtl(object):
self.copy_dbstor_config()
self.prepare_cms_tool_dbstor_config()
- cmd = "sh %s -P install_cms >> %s 2>&1" % (os.path.join(self.cms_scripts, "start_cms.sh"), LOG_FILE)
+ cmd = "sh %s -P install_cms >> %s 2>&1" % (
+ os.path.join(self.cms_scripts, "start_cms.sh"),
+ LOG_FILE,
+ )
run_cmd(cmd, "failed to set cms node information")
self.install_step = 2
self.set_cms_conf()
- LOGGER.info("======================== install cms module successfully ========================")
+ LOGGER.info(
+ "======================== install cms module successfully ========================"
+ )
def check_start_status(self):
"""
cms启动后检查server和voting状态,确保在拉起cantiand前cms状态正常
"""
- server_status_cmd = "source ~/.bashrc && cms stat -server %s | grep -v \"NODE_ID\" | awk '{print $2}'" \
- % self.node_id
+ server_status_cmd = (
+ "source ~/.bashrc && cms stat -server %s | grep -v \"NODE_ID\" | awk '{print $2}'"
+ % self.node_id
+ )
voting_status_cmd = "source ~/.bashrc && cms node -connected | awk '{print $1, $NF}' | grep -v \"VOTING\""
timeout = 300
while timeout:
@@ -1314,7 +1573,9 @@ class CmsCtl(object):
LOGGER.error(err_msg)
raise Exception(err_msg)
try:
- server_status = run_cmd(server_status_cmd, "failed query cms server status")
+ server_status = run_cmd(
+ server_status_cmd, "failed query cms server status"
+ )
except ValueError as _err:
time.sleep(1)
continue
@@ -1337,7 +1598,9 @@ class CmsCtl(object):
LOGGER.error(err_msg)
raise Exception(err_msg)
try:
- voting_status = run_cmd(voting_status_cmd, "failed to query voting status")
+ voting_status = run_cmd(
+ voting_status_cmd, "failed to query voting status"
+ )
except ValueError as _err2:
time.sleep(1)
continue
@@ -1361,28 +1624,39 @@ class CmsCtl(object):
"""
start cms process: check>>start>>change status
"""
- LOGGER.info("========================= begin to start cms process ========================")
+ LOGGER.info(
+ "========================= begin to start cms process ========================"
+ )
if self.install_step <= 1:
LOGGER.error("please run install previously")
if FORCE_UNINSTALL != "force":
raise Exception("please run install previously")
if self.install_step == 3:
LOGGER.info("warning: cms started already")
- LOGGER.info("========================= start cms process successfully ========================")
+ LOGGER.info(
+ "========================= start cms process successfully ========================"
+ )
return
- cmd = "sh %s -P start_cms >> %s 2>&1" % (os.path.join(self.cms_scripts, "start_cms.sh"), LOG_FILE)
+ cmd = "sh %s -P start_cms >> %s 2>&1" % (
+ os.path.join(self.cms_scripts, "start_cms.sh"),
+ LOG_FILE,
+ )
run_cmd(cmd, "failed to start cms process")
self.install_step = 3
self.set_cms_conf()
self.check_start_status()
- LOGGER.info("======================== start cms process successfully ========================")
+ LOGGER.info(
+ "======================== start cms process successfully ========================"
+ )
def check_status(self):
"""
check cms process status
"""
- LOGGER.info("======================== begin to check cms process status ========================")
+ LOGGER.info(
+ "======================== begin to check cms process status ========================"
+ )
if self.install_step <= 1:
err_msg = "please install cms previously"
LOGGER.error(err_msg)
@@ -1394,7 +1668,9 @@ class CmsCtl(object):
for node_info in node_list:
_node_id = node_info.split(" ")[0].strip(" ")
if self.node_id == int(_node_id) and "TRUE" in node_info:
- LOGGER.info("======================== check cms process status successfully ========================")
+ LOGGER.info(
+ "======================== check cms process status successfully ========================"
+ )
return
err_msg = "check cms process status failed"
LOGGER.error(err_msg)
@@ -1405,15 +1681,24 @@ class CmsCtl(object):
"""
stop cms process: kill>>change status
"""
- LOGGER.info("======================== begin to stop the cms process ========================")
- cms_processes = ["cms server", "cmsctl.py start", "cms/start.sh", "start_cms.sh"]
+ LOGGER.info(
+ "======================== begin to stop the cms process ========================"
+ )
+ cms_processes = [
+ "cms server",
+ "cmsctl.py start",
+ "cms/start.sh",
+ "start_cms.sh",
+ ]
for cms_process in cms_processes:
self.kill_process(cms_process)
self.check_process_status(cms_process)
self.install_step = 2
self.set_cms_conf()
- LOGGER.info("======================== stop the cms successfully ========================")
+ LOGGER.info(
+ "======================== stop the cms successfully ========================"
+ )
def check_gcc_home_process(self):
"""
@@ -1436,7 +1721,10 @@ class CmsCtl(object):
LOGGER.info("gcc_home occupied by a process:%s" % _stdout)
def delete_only_start_file(self):
- cmd = "dbstor --delete-file --fs-name=%s --file-name=onlyStart.file" % self.storage_share_fs
+ cmd = (
+ "dbstor --delete-file --fs-name=%s --file-name=onlyStart.file"
+ % self.storage_share_fs
+ )
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
LOGGER.error("Failed to delete onlyStart.file")
@@ -1445,29 +1733,46 @@ class CmsCtl(object):
"""
uninstall cms: environment values and app files
"""
- LOGGER.info("======================== begin to uninstall cms module ========================")
+ LOGGER.info(
+ "======================== begin to uninstall cms module ========================"
+ )
if self.gcc_home == "":
if deploy_mode not in USE_DBSTOR:
- self.gcc_home = os.path.join("/mnt/dbdata/remote/share_" + self.storage_share_fs, "gcc_home")
+ self.gcc_home = os.path.join(
+ "/mnt/dbdata/remote/share_" + self.storage_share_fs, "gcc_home"
+ )
elif deploy_mode == "dss":
self.gcc_home = "/dev/gcc-disk"
-
+
if self.node_id == 0:
stdout, stderr = "", ""
if cantian_in_container == "0":
self.cms_check_share_logic_ip_isvalid(self.share_logic_ip)
LOGGER.info("if blocked here, please check if the network is normal")
if deploy_mode == "file" or os.path.exists(YOUMAI_DEMO):
- versions_yml = os.path.join("/mnt/dbdata/remote/share_" + self.storage_share_fs, "versions.yml")
- gcc_backup = os.path.join("/mnt/dbdata/remote/archive_" + self.storage_archive_fs, "gcc_backup")
- str_cmd = "rm -rf %s && rm -rf %s && rm -rf %s" % (self.gcc_home, versions_yml, gcc_backup)
- ret_code, stdout, stderr = _exec_popen("timeout 10 ls %s" % self.gcc_home)
+ versions_yml = os.path.join(
+ "/mnt/dbdata/remote/share_" + self.storage_share_fs, "versions.yml"
+ )
+ gcc_backup = os.path.join(
+ "/mnt/dbdata/remote/archive_" + self.storage_archive_fs,
+ "gcc_backup",
+ )
+ str_cmd = "rm -rf %s && rm -rf %s && rm -rf %s" % (
+ self.gcc_home,
+ versions_yml,
+ gcc_backup,
+ )
+ ret_code, stdout, stderr = _exec_popen(
+ "timeout 10 ls %s" % self.gcc_home
+ )
if deploy_mode in USE_DBSTOR:
self.delete_only_start_file()
- str_cmd = "cms gcc -del && dbstor --delete-file --fs-name=%s --file-name=versions.yml && " \
- "dbstor --delete-file --fs-name=%s --file-name=gcc_backup" \
- % (self.storage_share_fs, self.storage_archive_fs)
+ str_cmd = (
+ "cms gcc -del && dbstor --delete-file --fs-name=%s --file-name=versions.yml && "
+ "dbstor --delete-file --fs-name=%s --file-name=gcc_backup"
+ % (self.storage_share_fs, self.storage_archive_fs)
+ )
ret_code = 0
if deploy_mode in USE_DSS:
str_cmd = "dd if=/dev/zero of=/dev/gcc-disk bs=1M count=1 conv=notrunc"
@@ -1480,38 +1785,55 @@ class CmsCtl(object):
elif ret_code:
output = stdout + stderr
self.check_gcc_home_process()
- err_msg = "failed to remove gcc home.\ncommand: %s.\noutput: %s \
+ err_msg = (
+ "failed to remove gcc home.\ncommand: %s.\noutput: %s \
\npossible reasons: \
\n1. user was using cms tool when uninstall. \
\n2. cms has not stopped. \
\n3. dbstor link was error. \
- \n4. others, please contact the engineer to solve." % (str_cmd, output)
+ \n4. others, please contact the engineer to solve."
+ % (str_cmd, output)
+ )
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
elif FORCE_UNINSTALL != "force" and ret_code != 2:
- LOGGER.error("can not connect to remote %s"
- "ret_code : %s, stdout : %s, stderr : %s" % (self.gcc_home, ret_code, stdout, stderr))
+ LOGGER.error(
+ "can not connect to remote %s"
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (self.gcc_home, ret_code, stdout, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("can not connect to remote %s"
- "ret_code : %s, stdout : %s, stderr : %s" % (self.gcc_home, ret_code, stdout, stderr))
+ raise Exception(
+ "can not connect to remote %s"
+ "ret_code : %s, stdout : %s, stderr : %s"
+ % (self.gcc_home, ret_code, stdout, stderr)
+ )
self.clean_environment()
self.clean_install_path()
str_cmd = "rm -rf {0}/cms_server.lck {0}/local {0}/gcc_backup {0}/cantian.ctd.cms*".format(
- self.cms_home)
+ self.cms_home
+ )
run_cmd(str_cmd, "failed to remove running files in cms home")
- LOGGER.info("======================== uninstall cms module successfully ========================")
+ LOGGER.info(
+ "======================== uninstall cms module successfully ========================"
+ )
def backup(self):
"""
save cms config
"""
- LOGGER.info("======================== begin to backup config files ========================")
+ LOGGER.info(
+ "======================== begin to backup config files ========================"
+ )
config_json = os.path.join(self.cms_home, "cfg/cms.json")
if os.path.exists(config_json):
- str_cmd = ("cp -arf %s/cfg/* %s" % (self.cms_home, os.path.dirname(self.cms_old_config)))
+ str_cmd = "cp -arf %s/cfg/* %s" % (
+ self.cms_home,
+ os.path.dirname(self.cms_old_config),
+ )
LOGGER.info("save cms json files cmd: " + str_cmd)
run_cmd(str_cmd, "failed to save cms config files")
else:
@@ -1519,21 +1841,31 @@ class CmsCtl(object):
LOGGER.error(err_msg)
if FORCE_UNINSTALL != "force":
raise Exception(err_msg)
- LOGGER.info("======================== backup config files successfully ========================")
+ LOGGER.info(
+ "======================== backup config files successfully ========================"
+ )
def upgrade(self):
- LOGGER.info("======================== begin to upgrade cms dbstor config ========================")
- if deploy_mode in USE_DBSTOR and not glob.glob("%s/dbstor/conf/dbs/dbstor_config_tool*" % self.cms_home):
+ LOGGER.info(
+ "======================== begin to upgrade cms dbstor config ========================"
+ )
+ if deploy_mode in USE_DBSTOR and not glob.glob(
+ "%s/dbstor/conf/dbs/dbstor_config_tool*" % self.cms_home
+ ):
self.copy_dbstor_config()
self.prepare_cms_tool_dbstor_config()
- LOGGER.info("======================== upgrade cms dbstor config successfully ========================")
+ LOGGER.info(
+ "======================== upgrade cms dbstor config successfully ========================"
+ )
def init_container(self):
"""
cms init in container
"""
- LOGGER.info("======================== begin to init cms process =======================")
+ LOGGER.info(
+ "======================== begin to init cms process ======================="
+ )
if self.install_step == 3:
LOGGER.info("Warning: cms start already")
return
@@ -1541,15 +1873,22 @@ class CmsCtl(object):
self.copy_dbstor_config()
if deploy_mode == "dbstor" or deploy_mode == "combined":
self.prepare_cms_tool_dbstor_config()
-
- LOGGER.info("======================= init cms process ============================")
- if self.node_id == 0:
- cmd = "sh %s -P init_container >> %s 2>&1" %(os.path.join(os.path.dirname(__file__), "start_cms.sh"), LOG_FILE)
+
+ LOGGER.info(
+ "======================= init cms process ============================"
+ )
+ if self.node_id == 0:
+ cmd = "sh %s -P init_container >> %s 2>&1" % (
+ os.path.join(os.path.dirname(__file__), "start_cms.sh"),
+ LOG_FILE,
+ )
run_cmd(cmd, "failed to init cms")
-
+
self.install_step = 2
self.set_cms_conf()
- LOGGER.info("======================= init cms process successfully ======================")
+ LOGGER.info(
+ "======================= init cms process successfully ======================"
+ )
def kill_process(self, process_name):
"""
@@ -1557,10 +1896,14 @@ class CmsCtl(object):
input: process name
output: NA
"""
- kill_cmd = (r"proc_pid_list=`ps ux | grep '%s' | grep -v grep"
- r"|awk '{print $2}'` && " % process_name)
- kill_cmd += (r"(if [[ X\"$proc_pid_list\" != X\"\" ]];then echo "
- r"$proc_pid_list | xargs kill -9; fi)")
+ kill_cmd = (
+ r"proc_pid_list=`ps ux | grep '%s' | grep -v grep"
+ r"|awk '{print $2}'` && " % process_name
+ )
+ kill_cmd += (
+ r"(if [[ X\"$proc_pid_list\" != X\"\" ]];then echo "
+ r"$proc_pid_list | xargs kill -9; fi)"
+ )
LOGGER.info("kill process cmd: %s" % kill_cmd)
run_cmd(kill_cmd, "failed to kill process %s" % process_name)
@@ -1573,9 +1916,15 @@ class CmsCtl(object):
get_cmd = "ps ux | grep '%s' | grep -v grep | awk '{print $2}'" % process_name
ret_code, stdout, stderr = _exec_popen(get_cmd)
if ret_code:
- LOGGER.error("Failed to get %s pid. cmd: %s. Error: %s" % (process_name, get_cmd, stderr))
+ LOGGER.error(
+ "Failed to get %s pid. cmd: %s. Error: %s"
+ % (process_name, get_cmd, stderr)
+ )
if FORCE_UNINSTALL != "force":
- raise Exception("Failed to get %s pid. cmd: %s. Error: %s" % (process_name, get_cmd, stderr))
+ raise Exception(
+ "Failed to get %s pid. cmd: %s. Error: %s"
+ % (process_name, get_cmd, stderr)
+ )
return stdout
def check_process_status(self, process_name):
@@ -1587,7 +1936,9 @@ class CmsCtl(object):
for i in range(CHECK_MAX_TIMES):
pid = self.get_pid(process_name)
if pid:
- LOGGER.info("checked %s times, %s pid is %s" % (i + 1, process_name, pid))
+ LOGGER.info(
+ "checked %s times, %s pid is %s" % (i + 1, process_name, pid)
+ )
if i != CHECK_MAX_TIMES - 1:
time.sleep(5)
else:
@@ -1621,11 +1972,13 @@ class CmsCtl(object):
output: NA
"""
LOGGER.info("cleaning user environment variables...")
- path_cmd = (r"/^\s*export\s*PATH=\"%s\/bin\":\$PATH$/d"
- % genreg_string(self.install_path))
- lib_cmd = (r"/^\s*export\s*LD_LIBRARY_PATH=\"%s\/lib\":\"%s\/add-ons\".*$/d"
- % (genreg_string(self.install_path),
- genreg_string(self.install_path)))
+ path_cmd = r"/^\s*export\s*PATH=\"%s\/bin\":\$PATH$/d" % genreg_string(
+ self.install_path
+ )
+ lib_cmd = r"/^\s*export\s*LD_LIBRARY_PATH=\"%s\/lib\":\"%s\/add-ons\".*$/d" % (
+ genreg_string(self.install_path),
+ genreg_string(self.install_path),
+ )
cms_cmd = r"/^\s*export\s*CMS_HOME=\".*\"$/d"
cmds = [path_cmd, lib_cmd, cms_cmd]
@@ -1653,7 +2006,15 @@ def main():
cms.parse_parameters(cms.cms_new_config)
cms.install()
- if arg in {"start", "check_status", "stop", "uninstall", "backup", "upgrade", "init_container"}:
+ if arg in {
+ "start",
+ "check_status",
+ "stop",
+ "uninstall",
+ "backup",
+ "upgrade",
+ "init_container",
+ }:
if os.path.exists("/opt/cantian/cms/cfg/cms.json"):
install_cms_cfg = "/opt/cantian/cms/cfg/cms.json"
diff --git a/pkg/deploy/action/cms/get_config_info.py b/pkg/deploy/action/cms/get_config_info.py
index 3bc1a92218d442eeaa5e07558417b7c87337705a..10ce9e2fff1c292587739f653f57bd1d142ca3e3 100644
--- a/pkg/deploy/action/cms/get_config_info.py
+++ b/pkg/deploy/action/cms/get_config_info.py
@@ -34,9 +34,11 @@ def get_value(param):
return cms_conf.get("install_step")
if param == "deploy_mode":
- if info.get('deploy_mode', ""):
- return info.get('deploy_mode')
- return "dbstor" if info.get('deploy_policy', "") in ["ModeB", "ModeC"] else "file"
+ if info.get("deploy_mode", ""):
+ return info.get("deploy_mode")
+ return (
+ "dbstor" if info.get("deploy_policy", "") in ["ModeB", "ModeC"] else "file"
+ )
return info.get(param, "")
@@ -45,4 +47,3 @@ if __name__ == "__main__":
_param = sys.argv[1]
res = get_value(_param)
print(res)
-
diff --git a/pkg/deploy/action/cms/log.py b/pkg/deploy/action/cms/log.py
index 7991b87e7e6c52578c94d20933b9730ba1f7d70e..c516ff38c1650c9b05a87d60d3abeeac066361f7 100644
--- a/pkg/deploy/action/cms/log.py
+++ b/pkg/deploy/action/cms/log.py
@@ -16,9 +16,9 @@ CONSOLE_CONF = {
"log_file_backup_count": 5,
"log_date_format": "%Y-%m-%d %H:%M:%S",
"logging_default_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
"logging_context_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s"
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
}
}
@@ -41,14 +41,30 @@ def _get_log_file_path(project):
os.makedirs(logger_dir)
return os.path.join(logger_dir, "{}.log".format(project))
- return ''
+ return ""
SENSITIVE_STR = [
- 'Password', 'passWord', 'PASSWORD', 'password', 'Pswd',
- 'PSWD', 'pwd', 'signature', 'HmacSHA256', 'newPasswd',
- 'private', 'certfile', 'secret', 'token', 'Token', 'pswd',
- 'passwd', 'mysql -u', 'session', 'cookie'
+ "Password",
+ "passWord",
+ "PASSWORD",
+ "password",
+ "Pswd",
+ "PSWD",
+ "pwd",
+ "signature",
+ "HmacSHA256",
+ "newPasswd",
+ "private",
+ "certfile",
+ "secret",
+ "token",
+ "Token",
+ "pswd",
+ "passwd",
+ "mysql -u",
+ "session",
+ "cookie",
]
@@ -74,8 +90,10 @@ def setup(project_name):
log_path = _get_log_file_path(project_name)
if log_path:
file_log = handlers.RotatingFileHandler(
- log_path, maxBytes=log_config.get("log_file_max_size"),
- backupCount=log_config.get("log_file_backup_count"))
+ log_path,
+ maxBytes=log_config.get("log_file_max_size"),
+ backupCount=log_config.get("log_file_backup_count"),
+ )
log_root.addHandler(file_log)
log_root.addFilter(DefaultLogFilter())
@@ -83,7 +101,9 @@ def setup(project_name):
handler.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")))
+ datefmt=log_config.get("log_date_format"),
+ )
+ )
if log_config.get("debug"):
log_root.setLevel(logging.DEBUG)
@@ -95,4 +115,4 @@ def setup(project_name):
LOGGER = setup(os.path.basename(LOG_FILE).split(".")[0])
log_directory = log_config.get("log_dir")
os.chmod(log_directory, 0o750)
-os.chmod(f'{str(Path(log_directory, os.path.basename(LOG_FILE)))}', 0o640)
+os.chmod(f"{str(Path(log_directory, os.path.basename(LOG_FILE)))}", 0o640)
diff --git a/pkg/deploy/action/dbstor/dbstor_backup.py b/pkg/deploy/action/dbstor/dbstor_backup.py
index ad5144f7be0cb2789e57030b95041928b0b46f5d..a106006af972b3da34ef465756418cca8bc298ab 100644
--- a/pkg/deploy/action/dbstor/dbstor_backup.py
+++ b/pkg/deploy/action/dbstor/dbstor_backup.py
@@ -31,6 +31,7 @@ MAX_DIRECTORY_MODE = 755
class ReadConfigParserNoCast(ConfigParser):
"Inherit from built-in class: ConfigParser"
+
def optionxform(self, optionstr):
"Rewrite without lower()"
return optionstr
@@ -40,6 +41,7 @@ class Options(object):
"""
class for command line options
"""
+
def __init__(self):
self.install_user_privilege = "withoutroot"
self.conf_file_path = "/opt/cantian/backup/files"
@@ -48,9 +50,11 @@ class Options(object):
self.log_file = "/opt/cantian/log/dbstor/backup.log"
self.ini_file = "/opt/cantian/dbstor/tools/dbstor_config.ini"
self.docker_ini_file = "/home/regress/cantian_data"
- self.js_conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json")
+ self.js_conf_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json"
+ )
self.dbstor_config = {}
- self.section = 'CLIENT'
+ self.section = "CLIENT"
self.note_id = ""
self.cluster_name = ""
@@ -60,11 +64,13 @@ gPyVersion = platform.python_version()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
-logger_handle = logging.FileHandler(g_opts.log_file, 'a', "utf-8")
+logger_handle = logging.FileHandler(g_opts.log_file, "a", "utf-8")
logger_handle.setLevel(logging.DEBUG)
-logger_formatter = logging.Formatter('[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-'
- '%(message)s-[%(process)s]')
+logger_formatter = logging.Formatter(
+ "[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-"
+ "%(message)s-[%(process)s]"
+)
logger_handle.setFormatter(logger_formatter)
logger.addHandler(logger_handle)
logger.info("init logging success")
@@ -88,8 +94,10 @@ def log_exit(msg):
:return: NA
"""
console_and_log("Error: " + msg)
- print("Please refer to install log \"%s\" for more detailed information."
- % g_opts.log_file)
+ print(
+ 'Please refer to install log "%s" for more detailed information.'
+ % g_opts.log_file
+ )
raise ValueError(str(msg))
@@ -102,8 +110,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- p_obj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p_obj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
p_obj.stdin.write(cmd.encode())
@@ -170,7 +183,10 @@ def clean_backup_ini():
try:
os.remove(g_opts.backup_ini_file)
except IOError as io_err:
- log_exit("Error: Can not remove backup dbstor config file: " + g_opts.backup_ini_file)
+ log_exit(
+ "Error: Can not remove backup dbstor config file: "
+ + g_opts.backup_ini_file
+ )
def read_ini_parameter():
@@ -211,13 +227,19 @@ def check_backup_ini_path():
def read_file_path():
- with os.fdopen(os.open(g_opts.js_conf_file, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r",)\
- as file_handle:
+ with os.fdopen(
+ os.open(
+ g_opts.js_conf_file, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR
+ ),
+ "r",
+ ) as file_handle:
json_data = json.load(file_handle)
- g_opts.note_id = json_data.get('node_id', "").strip()
- g_opts.cluster_name = json_data.get('cluster_name', "").strip()
+ g_opts.note_id = json_data.get("node_id", "").strip()
+ g_opts.cluster_name = json_data.get("cluster_name", "").strip()
g_opts.ini_file = "/opt/cantian/dbstor/tools/dbstor_config.ini"
- g_opts.backup_ini_file = os.path.join(g_opts.conf_file_path, "dbstor_config.ini")
+ g_opts.backup_ini_file = os.path.join(
+ g_opts.conf_file_path, "dbstor_config.ini"
+ )
def main():
diff --git a/pkg/deploy/action/dbstor/dbstor_install.py b/pkg/deploy/action/dbstor/dbstor_install.py
index 920ac0a8966c7e42721afa91855555d4a7524712..1db4a3cc9855f2fed492103f4fa354de1376311b 100644
--- a/pkg/deploy/action/dbstor/dbstor_install.py
+++ b/pkg/deploy/action/dbstor/dbstor_install.py
@@ -30,12 +30,16 @@ try:
LOG_PATH = "/opt/cantian/log/dbstor"
LOG_FILE = "/opt/cantian/log/dbstor/install.log"
- JS_CONF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json")
+ JS_CONF_FILE = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json"
+ )
DBSTOR_CONF_FILE = "/mnt/dbdata/remote/share_"
- CONTAINER_DBSTOR_CONF_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/container")
+ CONTAINER_DBSTOR_CONF_DIR = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/container"
+ )
DOCKER_DBSTOR_CONF_FILE = "/home/regress/cantian_data"
BACKUP_CONF_FILE = "/opt/cantian/backup/files"
- SECTION = 'CLIENT'
+ SECTION = "CLIENT"
MAX_DIRECTORY_MODE = 0o755
PYTHON242 = "2.4.2"
PYTHON25 = "2.5"
@@ -43,11 +47,13 @@ try:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
- logger_handle = logging.FileHandler(LOG_FILE, 'a', "utf-8")
+ logger_handle = logging.FileHandler(LOG_FILE, "a", "utf-8")
logger_handle.setLevel(logging.DEBUG)
- logger_formatter = logging.Formatter('[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-'
- '%(message)s-[%(process)s]')
+ logger_formatter = logging.Formatter(
+ "[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-"
+ "%(message)s-[%(process)s]"
+ )
logger_handle.setFormatter(logger_formatter)
logger.addHandler(logger_handle)
logger.info("init logging success")
@@ -57,7 +63,9 @@ try:
elif gPyVersion >= PYTHON25:
pass
else:
- import_msg = "This install script can not support python version: %s" % gPyVersion
+ import_msg = (
+ "This install script can not support python version: %s" % gPyVersion
+ )
logger.info(import_msg)
raise ValueError(import_msg)
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
@@ -71,6 +79,7 @@ GLOBAL_KMC_EXT = None
class ReadConfigParserNoCast(ConfigParser):
"Inherit from built-in class: ConfigParser"
+
def optionxform(self, optionstr):
"Rewrite without lower()"
return optionstr
@@ -83,22 +92,35 @@ def check_path(path_type_in):
:return: weather validity
"""
path_len = len(path_type_in)
- ascii_map = {'a_ascii': ord('a'), 'z_ascii': ord('z'), 'a_upper_ascii': ord('A'), 'z_upper_ascii': ord('Z'),
- 'num0_ascii': ord('0'), 'num9_ascii': ord('9'), 'blank_ascii': ord(' '), 'sep1_ascii': ord(os.sep),
- 'sep2_ascii': ord('_'), 'sep3_ascii': ord(':'), 'sep4_ascii': ord('-'), 'sep5_ascii': ord('.')}
- char_check_list1 = [ascii_map.get('blank_ascii', 0),
- ascii_map.get('sep1_ascii', 0),
- ascii_map.get('sep2_ascii', 0),
- ascii_map.get('sep4_ascii', 0),
- ascii_map.get('sep5_ascii', 0)
- ]
-
- char_check_list2 = [ascii_map.get('blank_ascii', 0),
- ascii_map.get('sep1_ascii', 0),
- ascii_map.get('sep2_ascii', 0),
- ascii_map.get('sep3_ascii', 0),
- ascii_map.get('sep4_ascii', 0)
- ]
+ ascii_map = {
+ "a_ascii": ord("a"),
+ "z_ascii": ord("z"),
+ "a_upper_ascii": ord("A"),
+ "z_upper_ascii": ord("Z"),
+ "num0_ascii": ord("0"),
+ "num9_ascii": ord("9"),
+ "blank_ascii": ord(" "),
+ "sep1_ascii": ord(os.sep),
+ "sep2_ascii": ord("_"),
+ "sep3_ascii": ord(":"),
+ "sep4_ascii": ord("-"),
+ "sep5_ascii": ord("."),
+ }
+ char_check_list1 = [
+ ascii_map.get("blank_ascii", 0),
+ ascii_map.get("sep1_ascii", 0),
+ ascii_map.get("sep2_ascii", 0),
+ ascii_map.get("sep4_ascii", 0),
+ ascii_map.get("sep5_ascii", 0),
+ ]
+
+ char_check_list2 = [
+ ascii_map.get("blank_ascii", 0),
+ ascii_map.get("sep1_ascii", 0),
+ ascii_map.get("sep2_ascii", 0),
+ ascii_map.get("sep3_ascii", 0),
+ ascii_map.get("sep4_ascii", 0),
+ ]
if CURRENT_OS == "Linux":
return check_path_linux(path_len, path_type_in, ascii_map, char_check_list1)
elif CURRENT_OS == "Windows":
@@ -112,10 +134,16 @@ def check_path(path_type_in):
def check_path_linux(path_len, path_type_in, ascii_map, char_check_list):
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (ascii_map.get('a_ascii', 0) <= char_check <= ascii_map.get('z_ascii', 0)
- or ascii_map.get('a_upper_ascii', 0) <= char_check <= ascii_map.get('z_upper_ascii', 0)
- or ascii_map.get('num0_ascii', 0) <= char_check <= ascii_map.get('num9_ascii', 0)
- or char_check in char_check_list)):
+ if not (
+ ascii_map.get("a_ascii", 0) <= char_check <= ascii_map.get("z_ascii", 0)
+ or ascii_map.get("a_upper_ascii", 0)
+ <= char_check
+ <= ascii_map.get("z_upper_ascii", 0)
+ or ascii_map.get("num0_ascii", 0)
+ <= char_check
+ <= ascii_map.get("num9_ascii", 0)
+ or char_check in char_check_list
+ ):
return False
return True
@@ -123,10 +151,16 @@ def check_path_linux(path_len, path_type_in, ascii_map, char_check_list):
def check_path_windows(path_len, path_type_in, ascii_map, char_check_list):
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (ascii_map.get('a_ascii', 0) <= char_check <= ascii_map.get('z_ascii', 0)
- or ascii_map.get('a_upper_ascii', 0) <= char_check <= ascii_map.get('z_upper_ascii', 0)
- or ascii_map.get('num0_ascii', 0) <= char_check <= ascii_map.get('num9_ascii', 0)
- or char_check in char_check_list)):
+ if not (
+ ascii_map.get("a_ascii", 0) <= char_check <= ascii_map.get("z_ascii", 0)
+ or ascii_map.get("a_upper_ascii", 0)
+ <= char_check
+ <= ascii_map.get("z_upper_ascii", 0)
+ or ascii_map.get("num0_ascii", 0)
+ <= char_check
+ <= ascii_map.get("num9_ascii", 0)
+ or char_check in char_check_list
+ ):
return False
return True
@@ -149,8 +183,7 @@ def log_exit(msg):
:return: NA
"""
console_and_log("Error: " + msg)
- print("Please refer to install log \"%s\" for more detailed information."
- % LOG_FILE)
+ print('Please refer to install log "%s" for more detailed information.' % LOG_FILE)
raise ValueError(str(msg))
@@ -162,18 +195,24 @@ def check_runner():
if owner_uid == 0:
if runner_uid != 0:
runner = pwd.getpwuid(runner_uid).pw_name
- log_exit("Error: The owner of install.py has root privilege,"
- " can't run it by user [%s]." % runner)
+ log_exit(
+ "Error: The owner of install.py has root privilege,"
+ " can't run it by user [%s]." % runner
+ )
else:
if runner_uid == 0:
owner = pwd.getpwuid(owner_uid).pw_name
- log_exit("Error: The owner of install.py is [%s],"
- " can't run it by root." % owner)
+ log_exit(
+ "Error: The owner of install.py is [%s],"
+ " can't run it by root." % owner
+ )
elif runner_uid != owner_uid:
runner = pwd.getpwuid(runner_uid).pw_name
owner = pwd.getpwuid(owner_uid).pw_name
- log_exit("Error: The owner of install.py [%s] is different"
- " with the executor [%s]." % (owner, runner))
+ log_exit(
+ "Error: The owner of install.py [%s] is different"
+ " with the executor [%s]." % (owner, runner)
+ )
def _exec_popen(cmd, values=None):
@@ -185,8 +224,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- p_obj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p_obj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
p_obj.stdin.write(cmd.encode())
@@ -217,14 +261,18 @@ def check_user(user, group):
try:
user_ = pwd.getpwnam(user)
except KeyError as key_error_msg:
- message = "Parameter input error: -U, the user does not exists.%s" % str(key_error_msg)
+ message = "Parameter input error: -U, the user does not exists.%s" % str(
+ key_error_msg
+ )
logger.error(message)
raise ValueError(message) from key_error_msg
try:
group_ = grp.getgrnam(group)
except KeyError as key_error_msg:
- message = "Parameter input error: -U, the group does not exists.%s" % str(key_error_msg)
+ message = "Parameter input error: -U, the group does not exists.%s" % str(
+ key_error_msg
+ )
logger.error(message)
raise ValueError(message) from key_error_msg
@@ -244,16 +292,19 @@ def check_user(user, group):
runner_uid = os.getuid()
if runner_uid != 0 and runner_uid != user_.pw_uid:
runner = pwd.getpwuid(runner_uid).pw_name
- message = "Parameter input error: -U, has to be the same as the executor [%s]" % runner
+ message = (
+ "Parameter input error: -U, has to be the same as the executor [%s]"
+ % runner
+ )
logger.error(message)
raise ValueError(message)
class DBStor:
- """ This is DBStor installer. """
+ """This is DBStor installer."""
def __init__(self):
- """ Constructor for the Installer class. """
+ """Constructor for the Installer class."""
logger.info("Begin init...")
logger.info("dbstor install runs on python version : %s", gPyVersion)
@@ -287,8 +338,8 @@ class DBStor:
self.dbstor_fs_vstore_id = "0"
self.dbstor_page_fs_vstore_id = "0"
self.dbstor_archive_fs_vstore_id = "0"
- self.dbstor_home="/opt/cantian/dbstor"
- self.dbstor_log="/opt/cantian/log/dbstor"
+ self.dbstor_home = "/opt/cantian/dbstor"
+ self.dbstor_log = "/opt/cantian/log/dbstor"
def check_ini(self):
"""
@@ -296,12 +347,16 @@ class DBStor:
"""
# check the log path
if not check_path(self.dbstor_conf_file):
- log_exit("Error: There is invalid character in specified dbstor config file.")
+ log_exit(
+ "Error: There is invalid character in specified dbstor config file."
+ )
if os.path.exists(self.dbstor_conf_file):
try:
os.remove(self.dbstor_conf_file)
except OSError as ex:
- log_exit("Error: Can not remove dbstor config file: " + self.dbstor_conf_file)
+ log_exit(
+ "Error: Can not remove dbstor config file: " + self.dbstor_conf_file
+ )
def check_log(self):
"""
@@ -323,58 +378,80 @@ class DBStor:
except OSError as ex:
log_exit("Error: Can not chmod log file: %s", LOG_FILE)
-
def read_dbstor_para(self):
- with os.fdopen(os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r") as file_obj:
+ with os.fdopen(
+ os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR),
+ "r",
+ ) as file_obj:
json_data = json.load(file_obj)
- self.dbstor_config['NAMESPACE_FSNAME'] = json_data.get('storage_dbstor_fs', "").strip()
- self.dbstor_config['NAMESPACE_PAGE_FSNAME'] = json_data.get('storage_dbstor_page_fs', "").strip()
- self.dbstor_config['LOCAL_IP'] = json_data.get('cantian_vlan_ip', "").strip()
- self.dbstor_config['REMOTE_IP'] = json_data.get('storage_vlan_ip', "").strip()
- self.dbstor_config['NODE_ID'] = json_data.get('node_id', "").strip()
- self.dbstor_config['LINK_TYPE'] = json_data.get('link_type', "").strip()
- self.dbstor_config['LOG_VSTOR'] = json_data.get('dbstor_fs_vstore_id', "0").strip()
- self.dbstor_config['PAGE_VSTOR'] = json_data.get('dbstor_page_fs_vstore_id', "0").strip()
- self.dbstor_config['ARCHIVE_VSTOR'] = json_data.get('dbstor_archive_fs_vstore_id', "0").strip()
- if json_data.get('link_type', "").strip() != '0':
- self.dbstor_config['LINK_TYPE'] = '1'
- self.dbstor_config['CLUSTER_ID'] = json_data.get('cluster_id', "").strip()
+ self.dbstor_config["NAMESPACE_FSNAME"] = json_data.get(
+ "storage_dbstor_fs", ""
+ ).strip()
+ self.dbstor_config["NAMESPACE_PAGE_FSNAME"] = json_data.get(
+ "storage_dbstor_page_fs", ""
+ ).strip()
+ self.dbstor_config["LOCAL_IP"] = json_data.get(
+ "cantian_vlan_ip", ""
+ ).strip()
+ self.dbstor_config["REMOTE_IP"] = json_data.get(
+ "storage_vlan_ip", ""
+ ).strip()
+ self.dbstor_config["NODE_ID"] = json_data.get("node_id", "").strip()
+ self.dbstor_config["LINK_TYPE"] = json_data.get("link_type", "").strip()
+ self.dbstor_config["LOG_VSTOR"] = json_data.get(
+ "dbstor_fs_vstore_id", "0"
+ ).strip()
+ self.dbstor_config["PAGE_VSTOR"] = json_data.get(
+ "dbstor_page_fs_vstore_id", "0"
+ ).strip()
+ self.dbstor_config["ARCHIVE_VSTOR"] = json_data.get(
+ "dbstor_archive_fs_vstore_id", "0"
+ ).strip()
+ if json_data.get("link_type", "").strip() != "0":
+ self.dbstor_config["LINK_TYPE"] = "1"
+ self.dbstor_config["CLUSTER_ID"] = json_data.get("cluster_id", "").strip()
if self.cantian_in_container == "0":
- self.dbstor_config['IS_CONTAINER'] = "0"
+ self.dbstor_config["IS_CONTAINER"] = "0"
else:
- self.dbstor_config['IS_CONTAINER'] = "1"
- self.cluster_name = json_data.get('cluster_name', '')
- self.dbstor_config['NAMESPACE_SHARE_FSNAME'] = json_data.get('storage_share_fs', "").strip()
- self.dbstor_config['NAMESPACE_ARCHIVE_FSNAME'] = json_data.get('storage_archive_fs', "").strip()
+ self.dbstor_config["IS_CONTAINER"] = "1"
+ self.cluster_name = json_data.get("cluster_name", "")
+ self.dbstor_config["NAMESPACE_SHARE_FSNAME"] = json_data.get(
+ "storage_share_fs", ""
+ ).strip()
+ self.dbstor_config["NAMESPACE_ARCHIVE_FSNAME"] = json_data.get(
+ "storage_archive_fs", ""
+ ).strip()
def check_dbstor_para(self):
logger.info("Checking parameters.")
- if len(self.dbstor_config.get('NAMESPACE_FSNAME', "").strip()) == 0:
+ if len(self.dbstor_config.get("NAMESPACE_FSNAME", "").strip()) == 0:
message = "The storage_dbstor_fs parameter is not entered"
console_and_log(message)
raise ValueError(message)
- if len(self.dbstor_config.get('NAMESPACE_PAGE_FSNAME', "").strip()) == 0:
+ if len(self.dbstor_config.get("NAMESPACE_PAGE_FSNAME", "").strip()) == 0:
message = "The storage_dbstor_page_fs parameter is not entered"
console_and_log(message)
raise ValueError(message)
- if len(self.dbstor_config.get('LOCAL_IP', "").strip()) == 0:
+ if len(self.dbstor_config.get("LOCAL_IP", "").strip()) == 0:
message = "The cantian_vlan_ip parameter is not entered"
console_and_log(message)
raise ValueError(message)
- if len(self.dbstor_config.get('REMOTE_IP', "").strip()) == 0:
+ if len(self.dbstor_config.get("REMOTE_IP", "").strip()) == 0:
message = "The storage_vlan_ip parameter is not entered"
console_and_log(message)
raise ValueError(message)
- if len(self.dbstor_config.get('NODE_ID', "").strip()) == 0:
+ if len(self.dbstor_config.get("NODE_ID", "").strip()) == 0:
message = "The node_id parameter is not entered"
console_and_log(message)
raise ValueError(message)
- if len(self.dbstor_config.get('CLUSTER_ID', "").strip()) == 0:
+ if len(self.dbstor_config.get("CLUSTER_ID", "").strip()) == 0:
message = "The cluster_id parameter is not entered"
console_and_log(message)
raise ValueError(message)
elif self.cantian_in_container == "0":
- remote_ip_list = re.split(r"[;|]", self.dbstor_config.get('REMOTE_IP', "").strip())
+ remote_ip_list = re.split(
+ r"[;|]", self.dbstor_config.get("REMOTE_IP", "").strip()
+ )
link_cnt = 0
global DBSTOR_WARN_TYPE
for remote_ip in remote_ip_list:
@@ -382,30 +459,32 @@ class DBStor:
logger.info(cmd)
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
- console_and_log("Failed to ping remote ip. Error: %s" % remote_ip.strip())
+ console_and_log(
+ "Failed to ping remote ip. Error: %s" % remote_ip.strip()
+ )
DBSTOR_WARN_TYPE += 1
else:
link_cnt += 1
if link_cnt == 0:
log_exit("Error: failed to ping all remote ip")
- if len(self.dbstor_config.get('DPU_UUID', "").strip()) == 0:
+ if len(self.dbstor_config.get("DPU_UUID", "").strip()) == 0:
cmd = "uuidgen"
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
log_exit("Failed to get dpu uuid. Error: %s", stderr)
- self.dbstor_config['DPU_UUID'] = stdout.strip()
+ self.dbstor_config["DPU_UUID"] = stdout.strip()
if len(self.cluster_name) == 0:
message = "The cluster_name parameter is not entered"
console_and_log(message)
raise ValueError(message)
- if len(self.dbstor_config.get('NAMESPACE_SHARE_FSNAME', "").strip()) == 0:
+ if len(self.dbstor_config.get("NAMESPACE_SHARE_FSNAME", "").strip()) == 0:
message = "The storage_share_fs parameter is not entered"
console_and_log(message)
raise ValueError(message)
- if len(self.dbstor_config.get('NAMESPACE_ARCHIVE_FSNAME', "").strip()) == 0:
+ if len(self.dbstor_config.get("NAMESPACE_ARCHIVE_FSNAME", "").strip()) == 0:
message = "The storage_archive_fs parameter is not entered"
console_and_log(message)
raise ValueError(message)
@@ -417,10 +496,12 @@ class DBStor:
"""
if len(passwd) < shortest_len or len(passwd) > longest_len:
- console_and_log("The length of input must be %s to %s."
- % (shortest_len, longest_len))
- raise ValueError("The length of input must be %s to %s."
- % (shortest_len, longest_len))
+ console_and_log(
+ "The length of input must be %s to %s." % (shortest_len, longest_len)
+ )
+ raise ValueError(
+ "The length of input must be %s to %s." % (shortest_len, longest_len)
+ )
# Can't save with user name
upper_cases = set("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
lower_cases = set("abcdefghijklmnopqrstuvwxyz")
@@ -437,12 +518,14 @@ class DBStor:
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- console_and_log("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
- raise ValueError("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
+ console_and_log(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
+ raise ValueError(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
logger.info("Successfully written user name.")
def verify_dbstor_passwd(self, in_type, passwd, shortest_len, longest_len):
@@ -452,23 +535,33 @@ class DBStor:
"""
# eg 'length in [8-16]'
if len(passwd) < shortest_len or len(passwd) > longest_len:
- console_and_log("The length of input must be %s to %s."
- % (shortest_len, longest_len))
- raise ValueError("The length of input must be %s to %s."
- % (shortest_len, longest_len))
+ console_and_log(
+ "The length of input must be %s to %s." % (shortest_len, longest_len)
+ )
+ raise ValueError(
+ "The length of input must be %s to %s." % (shortest_len, longest_len)
+ )
# Can't save with user name
- user_name = self.dbstor_config.get('USER_NAME', "")
+ user_name = self.dbstor_config.get("USER_NAME", "")
if user_name and passwd == user_name:
console_and_log("Error: Password can't be the same as username.")
raise ValueError("Error: Password can't be the same as username.")
elif user_name and passwd == user_name[::-1]:
- console_and_log("Error: Password cannot be the same as username in reverse order")
- raise ValueError("Error: Password cannot be the same as username in reverse order")
+ console_and_log(
+ "Error: Password cannot be the same as username in reverse order"
+ )
+ raise ValueError(
+ "Error: Password cannot be the same as username in reverse order"
+ )
# The same character cannot appear three times consecutively
for i in range(0, len(passwd) - 2):
if passwd[i] == passwd[i + 1] and passwd[i + 1] == passwd[i + 2]:
- console_and_log("Error: The same character cannot appear three times consecutively ")
- raise ValueError("Error: The same character cannot appear three times consecutively")
+ console_and_log(
+ "Error: The same character cannot appear three times consecutively "
+ )
+ raise ValueError(
+ "Error: The same character cannot appear three times consecutively"
+ )
upper_cases = set("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
lower_cases = set("abcdefghijklmnopqrstuvwxyz")
@@ -487,19 +580,25 @@ class DBStor:
if passwd_set & cases:
types += 1
if types < 3:
- console_and_log("Error: Password must contains at least three different types of characters.")
- raise ValueError("Error: Password must contains at least three"
- " different types of characters.")
+ console_and_log(
+ "Error: Password must contains at least three different types of characters."
+ )
+ raise ValueError(
+ "Error: Password must contains at least three"
+ " different types of characters."
+ )
# Only can contains enumerated cases
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- console_and_log("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
- raise ValueError("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
+ console_and_log(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
+ raise ValueError(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
def getch(self):
file_handle = sys.stdin.fileno()
@@ -529,7 +628,9 @@ class DBStor:
sys.stdout.flush()
password += char
- def get_dbstor_usernm_passwd(self, input_prompt, file_prompt, shortest_len, longest_len):
+ def get_dbstor_usernm_passwd(
+ self, input_prompt, file_prompt, shortest_len, longest_len
+ ):
"""Get new passwd"""
flag = 0
new_param = ""
@@ -538,10 +639,14 @@ class DBStor:
try:
if input_prompt == "UserName" and self.cantian_in_container == "0":
new_param = input("UserName: ")
- self.verify_dbstor_usernm(input_prompt, new_param, shortest_len, longest_len)
+ self.verify_dbstor_usernm(
+ input_prompt, new_param, shortest_len, longest_len
+ )
elif self.cantian_in_container == "0":
new_param = input("PassWord: ")
- self.verify_dbstor_passwd(input_prompt, new_param, shortest_len, longest_len)
+ self.verify_dbstor_passwd(
+ input_prompt, new_param, shortest_len, longest_len
+ )
break
except ValueError as error:
logger.error(str(error))
@@ -566,7 +671,9 @@ class DBStor:
conf.add_section(SECTION)
# 对密码进行加密
if encrypt_passwd and self.cantian_in_container == "0":
- self.dbstor_config['PASSWORD'] = GLOBAL_KMC_EXT.encrypt(self.dbstor_config.get('PASSWORD', ""))
+ self.dbstor_config["PASSWORD"] = GLOBAL_KMC_EXT.encrypt(
+ self.dbstor_config.get("PASSWORD", "")
+ )
for key in self.dbstor_config:
conf.set(SECTION, key, self.dbstor_config[key])
flags = os.O_CREAT | os.O_RDWR | os.O_TRUNC
@@ -579,24 +686,29 @@ class DBStor:
"""Generate DBstor Config parameter."""
try:
logger.info("Get username and password of dbstor config.")
- self.dbstor_config['USER_NAME'] = self.get_dbstor_usernm_passwd(input_prompt="UserName",
- file_prompt="dbstor config",
- shortest_len=6, longest_len=32)
- if self.dbstor_config.get('USER_NAME', 0) == 0:
+ self.dbstor_config["USER_NAME"] = self.get_dbstor_usernm_passwd(
+ input_prompt="UserName",
+ file_prompt="dbstor config",
+ shortest_len=6,
+ longest_len=32,
+ )
+ if self.dbstor_config.get("USER_NAME", 0) == 0:
raise ValueError("create config file failed")
- self.dbstor_config['PASSWORD'] = self.get_dbstor_usernm_passwd(input_prompt="PassWord",
- file_prompt="dbstor config",
- shortest_len=8, longest_len=16)
- if self.dbstor_config.get('PASSWORD', 0) == 0:
+ self.dbstor_config["PASSWORD"] = self.get_dbstor_usernm_passwd(
+ input_prompt="PassWord",
+ file_prompt="dbstor config",
+ shortest_len=8,
+ longest_len=16,
+ )
+ if self.dbstor_config.get("PASSWORD", 0) == 0:
raise ValueError("create config file failed")
logger.info("Successfully to get user name and password")
logger.info("Generate DBstor Config File.")
- self.dbstor_config['DBS_LOG_PATH'] = self.dbstor_log
+ self.dbstor_config["DBS_LOG_PATH"] = self.dbstor_log
self.set_dbstor_conf(self.dbstor_config, self.dbstor_conf_file, True)
except ValueError as error:
log_exit(str(error))
-
def read_old_dbstor_config(self):
"""read old DBstor Config file."""
logger.info("read old DBstor Config file.")
@@ -624,9 +736,15 @@ class DBStor:
check backup ini file exists
"""
if not check_path(self.backup_conf_file):
- log_exit("Error: There is invalid character in specified backup dbstor config file.")
+ log_exit(
+ "Error: There is invalid character in specified backup dbstor config file."
+ )
if not os.path.exists(self.backup_conf_file):
- log_exit("Error: Backup dbstor config file {} not existed " .format(self.backup_conf_file))
+ log_exit(
+ "Error: Backup dbstor config file {} not existed ".format(
+ self.backup_conf_file
+ )
+ )
def check_ini_path(self):
"""
@@ -641,21 +759,24 @@ class DBStor:
except ValueError as error:
log_exit("Failed to create dbstor config file path. Error: %s", error)
-
def install(self):
self.dbstor_config = self.dbstor_config_tmp
self.check_log()
- with os.fdopen(os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r")\
- as file_handle:
+ with os.fdopen(
+ os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR),
+ "r",
+ ) as file_handle:
json_data = json.load(file_handle)
- self.backup = json_data.get('install_type', "override").strip()
- self.node_id = json_data.get('node_id', "").strip()
- self.cluster_id = json_data.get('cluster_id', "").strip()
- self.cantian_in_container = json_data.get('cantian_in_container', "0").strip()
- self.dbstor_fs_vstore_id = json_data.get('dbstor_fs_vstore_id', "0").strip()
+ self.backup = json_data.get("install_type", "override").strip()
+ self.node_id = json_data.get("node_id", "").strip()
+ self.cluster_id = json_data.get("cluster_id", "").strip()
+ self.cantian_in_container = json_data.get(
+ "cantian_in_container", "0"
+ ).strip()
+ self.dbstor_fs_vstore_id = json_data.get("dbstor_fs_vstore_id", "0").strip()
self.conf_file_path = "/opt/cantian/dbstor/tools"
self.backup_conf_file = os.path.join(BACKUP_CONF_FILE, "dbstor_config.ini")
- self.cluster_name = json_data.get("cluster_name", '')
+ self.cluster_name = json_data.get("cluster_name", "")
self.check_ini_path()
self.dbstor_conf_file = os.path.join(self.conf_file_path, "dbstor_config.ini")
@@ -666,16 +787,22 @@ class DBStor:
self.read_dbstor_para()
self.check_dbstor_para()
self.generate_db_config()
- with os.fdopen(os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r") as file_obj:
+ with os.fdopen(
+ os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR),
+ "r",
+ ) as file_obj:
json_data = json.load(file_obj)
deploy_mode = json_data.get("deploy_mode")
if deploy_mode == "dbstor" or deploy_mode == "combined":
configTool = ConfigTool()
configTool.create_unify_dbstor_config()
-
+
def cp_ini_to_client_test(self):
self.dbstor_conf_file
- cmd = "cp -rf %s/dbstor_config.ini /opt/cantian/dbstor/tools/dbstor_config.ini" % self.conf_file_path.strip()
+ cmd = (
+ "cp -rf %s/dbstor_config.ini /opt/cantian/dbstor/tools/dbstor_config.ini"
+ % self.conf_file_path.strip()
+ )
logger.info(cmd)
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
diff --git a/pkg/deploy/action/dbstor/dbstor_uninstall.py b/pkg/deploy/action/dbstor/dbstor_uninstall.py
index 290d4a85dd18e4a2bbfd14cba83c077edeb0ffd4..6da0f1b09a650a3831d09b4d0752d295915d7829 100644
--- a/pkg/deploy/action/dbstor/dbstor_uninstall.py
+++ b/pkg/deploy/action/dbstor/dbstor_uninstall.py
@@ -31,6 +31,7 @@ TIMEOUT_COUNT = 1800
class ReadConfigParserNoCast(ConfigParser):
"Inherit from built-in class: ConfigParser"
+
def optionxform(self, optionstr):
"Rewrite without lower()"
return optionstr
@@ -40,6 +41,7 @@ class Options(object):
"""
class for command line options
"""
+
def __init__(self):
self.install_user_privilege = "withoutroot"
self.dbstor_log_path = "/opt/cantian/dbstor"
@@ -47,7 +49,9 @@ class Options(object):
self.log_file = "/opt/cantian/log/dbstor/uninstall.log"
self.ini_file = "/opt/cantian/dbstor/tools/dbstor_config.ini"
self.docker_ini_file = "/home/regress/cantian_data"
- self.js_conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json")
+ self.js_conf_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json"
+ )
self.note_id = ""
self.ini_exist = False
self.share_logic_ip = ""
@@ -59,11 +63,13 @@ g_opts = Options()
gPyVersion = platform.python_version()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
-logger_handle = logging.FileHandler(g_opts.log_file, 'a', "utf-8")
+logger_handle = logging.FileHandler(g_opts.log_file, "a", "utf-8")
logger_handle.setLevel(logging.DEBUG)
-logger_formatter = logging.Formatter('[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-'
- '%(message)s-[%(process)s]')
+logger_formatter = logging.Formatter(
+ "[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-"
+ "%(message)s-[%(process)s]"
+)
logger_handle.setFormatter(logger_formatter)
logger.addHandler(logger_handle)
logger.info("init logging success")
@@ -78,8 +84,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- p_obj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p_obj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
p_obj.stdin.write(cmd.encode())
@@ -122,8 +133,10 @@ def log_exit(msg):
:return: NA
"""
console_and_log("Error: " + msg)
- print("Please refer to install log \"%s\" for more detailed information."
- % g_opts.log_file)
+ print(
+ 'Please refer to install log "%s" for more detailed information.'
+ % g_opts.log_file
+ )
if g_opts.force_uninstall != "force":
raise ValueError(str(msg))
@@ -171,17 +184,23 @@ def clean_ini():
try:
os.remove(g_opts.ini_file)
except OSError as error_msg:
- log_exit("Clean dbstor config: can not delete dbstor_config.ini "
- "%s\nPlease manually delete it." % str(error_msg))
+ log_exit(
+ "Clean dbstor config: can not delete dbstor_config.ini "
+ "%s\nPlease manually delete it." % str(error_msg)
+ )
def read_file_path():
- with os.fdopen(os.open(g_opts.js_conf_file, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r")\
- as file_handle:
+ with os.fdopen(
+ os.open(
+ g_opts.js_conf_file, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR
+ ),
+ "r",
+ ) as file_handle:
json_data = json.load(file_handle)
- g_opts.note_id = json_data.get('node_id', "").strip()
- g_opts.share_logic_ip = json_data.get('share_logic_ip', "").strip()
- g_opts.cluster_name = json_data.get('cluster_name', "").strip()
+ g_opts.note_id = json_data.get("node_id", "").strip()
+ g_opts.share_logic_ip = json_data.get("share_logic_ip", "").strip()
+ g_opts.cluster_name = json_data.get("cluster_name", "").strip()
g_opts.ini_file = "/opt/cantian/dbstor/tools/dbstor_config.ini"
@@ -200,8 +219,10 @@ def main():
check_ini()
if g_opts.ini_exist:
clean_ini()
- console_and_log("dbstor config was successfully removed from your computer, "
- "for more message please see %s." % g_opts.log_file)
+ console_and_log(
+ "dbstor config was successfully removed from your computer, "
+ "for more message please see %s." % g_opts.log_file
+ )
if __name__ == "__main__":
diff --git a/pkg/deploy/action/dbstor/init_unify_config.py b/pkg/deploy/action/dbstor/init_unify_config.py
index ee2a56a4cc4577d1b51f7bf119f822ac14c229d1..4391ada5afdff15b05dfbf853251a3082a32f57b 100644
--- a/pkg/deploy/action/dbstor/init_unify_config.py
+++ b/pkg/deploy/action/dbstor/init_unify_config.py
@@ -10,11 +10,15 @@ from configparser import ConfigParser
LOG_PATH = "/opt/cantian/log/dbstor"
LOG_FILE = "/opt/cantian/log/dbstor/install.log"
-JS_CONF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json")
+JS_CONF_FILE = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json"
+)
DBSTOR_CONF_FILE = "/mnt/dbdata/remote/share_"
-CONTAINER_DBSTOR_CONF_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/container")
+CONTAINER_DBSTOR_CONF_DIR = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/container"
+)
DOCKER_DBSTOR_CONF_FILE = "/home/regress/cantian_data"
-SECTION = 'CLIENT'
+SECTION = "CLIENT"
MAX_DIRECTORY_MODE = 0o755
PYTHON242 = "2.4.2"
PYTHON25 = "2.5"
@@ -24,14 +28,17 @@ gPyVersion = platform.python_version()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
-logger_handle = logging.FileHandler(LOG_FILE, 'a', "utf-8")
+logger_handle = logging.FileHandler(LOG_FILE, "a", "utf-8")
logger_handle.setLevel(logging.DEBUG)
-logger_formatter = logging.Formatter('[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-'
- '%(message)s-[%(process)s]')
+logger_formatter = logging.Formatter(
+ "[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-"
+ "%(message)s-[%(process)s]"
+)
logger_handle.setFormatter(logger_formatter)
logger.addHandler(logger_handle)
+
def _exec_popen(cmd, values=None):
"""
subprocess.Popen in python2 and 3.
@@ -41,8 +48,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- p_obj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p_obj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
p_obj.stdin.write(cmd.encode())
@@ -67,6 +79,7 @@ def _exec_popen(cmd, values=None):
stderr = stderr[:-1]
return p_obj.returncode, stdout, stderr
+
class ConfigTool:
def __init__(self) -> None:
self.conf_file_path = ""
@@ -79,8 +92,8 @@ class ConfigTool:
self.dbstor_fs_vstore_id = "0"
self.dbstor_page_fs_vstore_id = "0"
self.dbstor_archive_fs_vstore_id = "0"
- self.dbstor_home="/opt/cantian/dbstor"
- self.dbstor_log_path="/opt/cantian/log/dbstor"
+ self.dbstor_home = "/opt/cantian/dbstor"
+ self.dbstor_log_path = "/opt/cantian/log/dbstor"
self.dbstor_config_tmp = { # dbstor_config.ini default parameters
"NAMESPACE_FSNAME": "",
"NAMESPACE_PAGE_FSNAME": "",
@@ -96,64 +109,85 @@ class ConfigTool:
"NAMESPACE_ARCHIVE_FSNAME": "",
}
self.dbstor_config = self.dbstor_config_tmp
- with os.fdopen(os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r")\
- as file_handle:
+ with os.fdopen(
+ os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR),
+ "r",
+ ) as file_handle:
json_data = json.load(file_handle)
- self.node_id = json_data.get('node_id', "").strip()
- self.cluster_id = json_data.get('cluster_id', "").strip()
- self.cantian_in_container = json_data.get('cantian_in_container', "0").strip()
- self.dbstor_fs_vstore_id = json_data.get('dbstor_fs_vstore_id', "0").strip()
- self.cluster_name = json_data.get("cluster_name", '')
-
+ self.node_id = json_data.get("node_id", "").strip()
+ self.cluster_id = json_data.get("cluster_id", "").strip()
+ self.cantian_in_container = json_data.get(
+ "cantian_in_container", "0"
+ ).strip()
+ self.dbstor_fs_vstore_id = json_data.get("dbstor_fs_vstore_id", "0").strip()
+ self.cluster_name = json_data.get("cluster_name", "")
+
def create_unify_dbstor_config(self):
- logger.info('Deploy_mode = dbstor, begin to set config.')
+ logger.info("Deploy_mode = dbstor, begin to set config.")
config = ConfigParser()
config.optionxform = str
conf_file_path = "/opt/cantian/dbstor/tools"
dbstor_conf_file = os.path.join(conf_file_path, "dbstor_config.ini")
config.read(dbstor_conf_file)
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
- for i in range(7,11):
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
+ for i in range(7, 11):
file_num = i - 6
cmd = "python3 %s/../obtains_lsid.py %s %s %s %s"
- ret_code, stdout, stderr = _exec_popen(cmd % ("/opt/cantian/action/dbstor", 2, self.cluster_id, i, self.node_id))
+ ret_code, stdout, stderr = _exec_popen(
+ cmd
+ % ("/opt/cantian/action/dbstor", 2, self.cluster_id, i, self.node_id)
+ )
if ret_code:
- raise OSError("Failed to execute LSIDGenerate."
- " Error: %s" % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to execute LSIDGenerate."
+ " Error: %s" % (stderr + os.linesep + stderr)
+ )
data = stdout.split("\n")
if len(data) == 2:
inst_id, dbs_tool_uuid = data[0], data[1]
else:
raise ValueError("Data parse error: length of parsed data is not 2.")
- logger.info('Generate inst_id, dbs_tool_uuid success.')
- ret_code, stdout, stderr = _exec_popen(cmd % ("/opt/cantian/action/dbstor", 0, self.cluster_id, 0, 0))
+ logger.info("Generate inst_id, dbs_tool_uuid success.")
+ ret_code, stdout, stderr = _exec_popen(
+ cmd % ("/opt/cantian/action/dbstor", 0, self.cluster_id, 0, 0)
+ )
if ret_code:
- raise OSError("Failed to execute LSIDGenerate."
- " Error: %s" % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to execute LSIDGenerate."
+ " Error: %s" % (stderr + os.linesep + stderr)
+ )
data = stdout.split("\n")
if len(data) == 2:
self.cluster_uuid = data[1]
else:
raise ValueError("Data parse error: length of parsed data is not 2.")
- logger.info('Generate cluster_uuid success.')
+ logger.info("Generate cluster_uuid success.")
folder_path = "%s/conf/dbs/" % (self.dbstor_home)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
- config.set('CLIENT', 'DBSTOR_OWNER_NAME', 'dbstor')
- config.set('CLIENT', 'CLUSTER_NAME', str(self.cluster_name))
- config.set('CLIENT', 'CLUSTER_UUID', str(self.cluster_uuid))
- config.set('CLIENT', 'INST_ID', inst_id)
- config.set('CLIENT', 'DBS_TOOL_UUID', dbs_tool_uuid)
- config.set('CLIENT', 'DBS_LOG_PATH', self.dbstor_log_path)
+ config.set("CLIENT", "DBSTOR_OWNER_NAME", "dbstor")
+ config.set("CLIENT", "CLUSTER_NAME", str(self.cluster_name))
+ config.set("CLIENT", "CLUSTER_UUID", str(self.cluster_uuid))
+ config.set("CLIENT", "INST_ID", inst_id)
+ config.set("CLIENT", "DBS_TOOL_UUID", dbs_tool_uuid)
+ config.set("CLIENT", "DBS_LOG_PATH", self.dbstor_log_path)
flags = os.O_CREAT | os.O_RDWR | os.O_TRUNC
modes = stat.S_IWUSR | stat.S_IRUSR
- file_path = "%s/conf/dbs/dbstor_config_tool_%s.ini" % (self.dbstor_home, str(file_num))
+ file_path = "%s/conf/dbs/dbstor_config_tool_%s.ini" % (
+ self.dbstor_home,
+ str(file_num),
+ )
with os.fdopen(os.open(file_path, flags, modes), "w") as file_obj:
config.write(file_obj)
- logger.info('Set config success.')
+ logger.info("Set config success.")
+
-if __name__ == '__main__':
+if __name__ == "__main__":
configTool = ConfigTool()
- configTool.create_unify_dbstor_config()
\ No newline at end of file
+ configTool.create_unify_dbstor_config()
diff --git a/pkg/deploy/action/dbstor/kmc_adapter.py b/pkg/deploy/action/dbstor/kmc_adapter.py
index a005f971247377b550de60788d6dd69f89ffd0c0..2129771f842c1a49143aa1408b17ad22069bac12 100644
--- a/pkg/deploy/action/dbstor/kmc_adapter.py
+++ b/pkg/deploy/action/dbstor/kmc_adapter.py
@@ -7,7 +7,16 @@ import logging
import ctypes
import json
import base64
-from ctypes import CFUNCTYPE, c_char, c_int, c_void_p, pointer, c_char_p, Structure, POINTER
+from ctypes import (
+ CFUNCTYPE,
+ c_char,
+ c_int,
+ c_void_p,
+ pointer,
+ c_char_p,
+ Structure,
+ POINTER,
+)
from enum import Enum
"""
@@ -21,20 +30,17 @@ SEC_PATH_MAX = 4096 # PATH_MAX: 待确认linux环境下大小
MAX_MK_COUNT = 4096
USE_DBSTOR = ["combined", "dbstor"]
-JS_CONF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json")
+JS_CONF_FILE = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json"
+)
class KeCallBackParam(Structure):
- _fields_ = [
- ("notifyCbCtx", c_void_p),
- ("loggerCtx", c_void_p),
- ("hwCtx", c_void_p)]
+ _fields_ = [("notifyCbCtx", c_void_p), ("loggerCtx", c_void_p), ("hwCtx", c_void_p)]
class KmcHardWareParm(Structure):
- _fields_ = [
- ("len", c_int),
- ("hardParam", c_char_p)]
+ _fields_ = [("len", c_int), ("hardParam", c_char_p)]
class KmcConfig(Structure):
@@ -52,7 +58,8 @@ class KmcConfig(Structure):
("innerHmacAlgId", c_int),
("innerKdfAlgId", c_int),
("workKeyIter", c_int),
- ("rootKeyIter", c_int)]
+ ("rootKeyIter", c_int),
+ ]
def __init__(self, *args, **kw):
super(KmcConfig, self).__init__(*args, **kw)
@@ -79,7 +86,8 @@ class KmcConfigEx(Structure):
("kmcHardWareParm", KmcHardWareParm),
("keCbParam", POINTER(KeCallBackParam)),
("kmcConfig", KmcConfig),
- ("useDefaultHwCB", c_int)]
+ ("useDefaultHwCB", c_int),
+ ]
class HmacAlgorithm(Enum):
@@ -122,7 +130,7 @@ class KmcRole(Enum):
@CFUNCTYPE(None, c_void_p, c_int, c_char_p)
def kmc_log(ctx, level, _msg):
- msg = str(_msg, encoding='utf-8')
+ msg = str(_msg, encoding="utf-8")
if level == KmcLogLevel.LOG_ERROR.value:
logging.error(msg)
elif level == KmcLogLevel.LOG_WARN.value:
@@ -143,15 +151,21 @@ class KmcLogLevel(Enum):
class KmcInitConf(object):
- def __init__(self, _primary_ks, _standby_ks, _hw_param=None,
- _enable_hw=False,
- _use_default_hw_cb=0,
- _domain_count=2,
- _role=KmcRole.ROLE_MASTER,
- _sdp_alg=SdpAlgorithm.AES256_GCM,
- _hmac_alg=HmacAlgorithm.HMAC_SHA256,
- _kdf_alg=KdfAlgorithm.PBKDF2_HMAC_SHA256,
- _hash_alg=HashAlgorithm.HASH_SHA256, **kwargs):
+ def __init__(
+ self,
+ _primary_ks,
+ _standby_ks,
+ _hw_param=None,
+ _enable_hw=False,
+ _use_default_hw_cb=0,
+ _domain_count=2,
+ _role=KmcRole.ROLE_MASTER,
+ _sdp_alg=SdpAlgorithm.AES256_GCM,
+ _hmac_alg=HmacAlgorithm.HMAC_SHA256,
+ _kdf_alg=KdfAlgorithm.PBKDF2_HMAC_SHA256,
+ _hash_alg=HashAlgorithm.HASH_SHA256,
+ **kwargs
+ ):
self.primary_ks = _primary_ks
self.standby_ks = _standby_ks
self.hw_param = _hw_param
@@ -184,19 +198,23 @@ class CApiConstant:
class CApiWrapper(object):
- """ python调用kmc-ext接口封装器
+ """python调用kmc-ext接口封装器
- 设计说明: 该类仅处理python到C语言类型转换的功能,不包括任何业务逻辑
+ 设计说明: 该类仅处理python到C语言类型转换的功能,不包括任何业务逻辑
- 约束:
- 1. 不支持硬件加密初始化
- 2. 不支持日志接口注册
- 3. 不支持密钥更新接口注册
+ 约束:
+ 1. 不支持硬件加密初始化
+ 2. 不支持日志接口注册
+ 3. 不支持密钥更新接口注册
"""
- KMC_EXT_LIB_PATH = '/opt/cantian/dbstor/lib/libkmcext.so'
- def __init__(self, primary_keystore="/opt/cantian/common/config/primary_keystore.ks",
- standby_keystore="/opt/cantian/common/config/standby_keystore.ks"):
+ KMC_EXT_LIB_PATH = "/opt/cantian/dbstor/lib/libkmcext.so"
+
+ def __init__(
+ self,
+ primary_keystore="/opt/cantian/common/config/primary_keystore.ks",
+ standby_keystore="/opt/cantian/common/config/standby_keystore.ks",
+ ):
self.deploy_mode = None
self.initialized = False
self.kmc_ctx = None
@@ -207,9 +225,12 @@ class CApiWrapper(object):
self.kmc_ext = ctypes.cdll.LoadLibrary(self.KMC_EXT_LIB_PATH)
def get_dbstor_para(self):
- with os.fdopen(os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r") as file_obj:
+ with os.fdopen(
+ os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR),
+ "r",
+ ) as file_obj:
json_data = json.load(file_obj)
- self.deploy_mode = json_data.get('deploy_mode', "").strip()
+ self.deploy_mode = json_data.get("deploy_mode", "").strip()
def initialize(self):
if self.initialized or self.deploy_mode not in USE_DBSTOR:
@@ -244,8 +265,13 @@ class CApiWrapper(object):
kmc_conf.innerKdfAlgId = c_int(conf.kdf_alg.value)
kmc_conf.workKeyIter = c_int(CApiConstant.DEFAULT_WORK_KEY_ITER)
kmc_conf.rootKeyIter = c_int(CApiConstant.DEFAULT_ROOT_KEY_ITER)
- cfg = KmcConfigEx(c_int(conf.enable_hw), kmc_hardware_param,
- pointer(ke_cb_param), kmc_conf, c_int(use_default_hw_cb))
+ cfg = KmcConfigEx(
+ c_int(conf.enable_hw),
+ kmc_hardware_param,
+ pointer(ke_cb_param),
+ kmc_conf,
+ c_int(use_default_hw_cb),
+ )
ctx = c_void_p()
# 初始化日志
@@ -271,16 +297,21 @@ class CApiWrapper(object):
# 函数原型: int KeEncryptByDomainEx(const void *ctx, unsigned int domainID,
# const char *plainText, int plainTextLen,
# char **cipherText, int *cipherTextLen)
- status = self.kmc_ext.KeEncryptByDomainEx(ctx, CApiConstant.DEFAULT_DOMAINID,
- plain_text, plain_len,
- pointer(cipher), pointer(cipher_len))
+ status = self.kmc_ext.KeEncryptByDomainEx(
+ ctx,
+ CApiConstant.DEFAULT_DOMAINID,
+ plain_text,
+ plain_len,
+ pointer(cipher),
+ pointer(cipher_len),
+ )
if status == 0:
- result = cipher.value.decode('utf-8')
+ result = cipher.value.decode("utf-8")
return result
def encrypt_by_base64(self, plain):
- encoded = base64.b64encode(plain.encode('utf-8'))
- return encoded.decode('utf-8')
+ encoded = base64.b64encode(plain.encode("utf-8"))
+ return encoded.decode("utf-8")
def encrypt(self, plain):
if self.deploy_mode in USE_DBSTOR:
@@ -300,16 +331,21 @@ class CApiWrapper(object):
# 函数原型: int KeDecryptByDomainEx(const void *ctx, unsigned int domainID,
# const char *cipherText, int cipherTextLen,
# char **plainText, int *plainTextLen)
- status = self.kmc_ext.KeDecryptByDomainEx(ctx, CApiConstant.DEFAULT_DOMAINID,
- cipher_text, cipher_len,
- pointer(plain), pointer(plain_len))
+ status = self.kmc_ext.KeDecryptByDomainEx(
+ ctx,
+ CApiConstant.DEFAULT_DOMAINID,
+ cipher_text,
+ cipher_len,
+ pointer(plain),
+ pointer(plain_len),
+ )
if status == 0:
- result = plain.value.decode('utf-8')
+ result = plain.value.decode("utf-8")
return result
def decrypt_by_base64(self, cipher):
- decoded = base64.b64decode(cipher.encode('utf-8'))
- return decoded.decode('utf-8')
+ decoded = base64.b64decode(cipher.encode("utf-8"))
+ return decoded.decode("utf-8")
def decrypt(self, cipher):
if self.deploy_mode in USE_DBSTOR:
@@ -324,7 +360,9 @@ class CApiWrapper(object):
return self.kmc_ext.KeFinalizeEx(pointer(self.kmc_ctx))
def update_mk(self):
- return self.kmc_ext.KeActiveNewKeyEx(self.kmc_ctx, CApiConstant.DEFAULT_DOMAINID)
+ return self.kmc_ext.KeActiveNewKeyEx(
+ self.kmc_ctx, CApiConstant.DEFAULT_DOMAINID
+ )
def get_mk_count(self):
return self.kmc_ext.KeGetMkCountEx(self.kmc_ctx)
@@ -335,6 +373,7 @@ class CApiWrapper(object):
if __name__ == "__main__":
import sys
+
e_pwd = ""
d_pwd = ""
mode = ""
@@ -362,7 +401,10 @@ if __name__ == "__main__":
mk_count = res.get_mk_count()
res.finalize()
if ret == 0:
- print("update master key success\nNow there are %s master keys in ksf" % mk_count)
+ print(
+ "update master key success\nNow there are %s master keys in ksf"
+ % mk_count
+ )
exit(0)
else:
exit(1)
@@ -374,10 +416,10 @@ if __name__ == "__main__":
exit(0)
else:
exit(1)
-
+
if mode == "encrypted":
e_pwd = sys.argv[4]
encrypted_passwd = res.encrypt(e_pwd)
print(encrypted_passwd)
-
- res.finalize()
\ No newline at end of file
+
+ res.finalize()
diff --git a/pkg/deploy/action/dbstor/update_dbstor_config.py b/pkg/deploy/action/dbstor/update_dbstor_config.py
index 524c4ba19af8372b7562e6e708d60bad3642cdbf..0f1c9c6270fe87c7e7a1e8990100e43882e6285a 100644
--- a/pkg/deploy/action/dbstor/update_dbstor_config.py
+++ b/pkg/deploy/action/dbstor/update_dbstor_config.py
@@ -1,5 +1,6 @@
import re
import sys
+
sys.dont_write_bytecode = True
try:
@@ -17,6 +18,7 @@ try:
import copy
from configparser import ConfigParser
from kmc_adapter import CApiWrapper
+
PYTHON242 = "2.4.2"
PYTHON25 = "2.5"
gPyVersion = platform.python_version()
@@ -26,8 +28,9 @@ try:
elif gPyVersion >= PYTHON25:
pass
else:
- raise ImportError("This install script can not support python version: %s"
- % gPyVersion)
+ raise ImportError(
+ "This install script can not support python version: %s" % gPyVersion
+ )
except ImportError as import_err:
raise ValueError("Unable to import module: %s." % str(import_err)) from import_err
@@ -40,6 +43,7 @@ MAX_DIRECTORY_MODE = 750
class ReadConfigParserNoCast(ConfigParser):
"Inherit from built-in class: ConfigParser"
+
def optionxform(self, optionstr):
"Rewrite without lower()"
return optionstr
@@ -49,30 +53,38 @@ class Options(object):
"""
command line options
"""
+
def __init__(self):
- self.section = 'CLIENT'
+ self.section = "CLIENT"
self.dbstor_config = {}
self.inipath = "/mnt/dbdata/remote/share_"
self.log_path = "/opt/cantian/log/dbstor"
self.log_file = "/opt/cantian/log/dbstor/update.log"
- self.js_conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json")
+ self.js_conf_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json"
+ )
self.note_id = ""
- self.local_ini_path = "/mnt/dbdata/local/cantian/tmp/data/dbstor/conf/dbs/dbstor_config.ini"
+ self.local_ini_path = (
+ "/mnt/dbdata/local/cantian/tmp/data/dbstor/conf/dbs/dbstor_config.ini"
+ )
self.cstool_ini_path = "/opt/cantian/dbstor/conf/dbs/dbstor_config.ini"
self.tools_ini_path = "/opt/cantian/dbstor/tools/dbstor_config.ini"
self.cms_ini_path = "/opt/cantian/cms/dbstor/conf/dbs/dbstor_config.ini"
self.cluster_name = ""
+
db_opts = Options()
if not os.path.exists(db_opts.log_path):
os.makedirs(db_opts.log_path, MAX_DIRECTORY_MODE)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
-logger_handle = logging.FileHandler(db_opts.log_file, 'a', "utf-8")
+logger_handle = logging.FileHandler(db_opts.log_file, "a", "utf-8")
logger_handle.setLevel(logging.DEBUG)
-logger_formatter = logging.Formatter('[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-'
- '%(message)s-[%(process)s]')
+logger_formatter = logging.Formatter(
+ "[%(asctime)s]-[%(filename)s]-[line:%(lineno)d]-[%(levelname)s]-"
+ "%(message)s-[%(process)s]"
+)
logger_handle.setFormatter(logger_formatter)
logger.addHandler(logger_handle)
logger.info("init logging success")
@@ -95,8 +107,10 @@ def log_exit(msg):
:return: NA
"""
console_and_log("Error: " + msg)
- print("Please refer to install log \"%s\" for more detailed information."
- % db_opts.log_file)
+ print(
+ 'Please refer to install log "%s" for more detailed information.'
+ % db_opts.log_file
+ )
raise ValueError(str(msg))
@@ -110,7 +124,9 @@ def check_log():
modes = stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP
if not os.path.exists(db_opts.log_file):
try:
- with os.fdopen(os.open(db_opts.log_file, flags, modes), "w", encoding="utf-8"):
+ with os.fdopen(
+ os.open(db_opts.log_file, flags, modes), "w", encoding="utf-8"
+ ):
pass
except IOError as ex:
log_exit("Error: Can not create or open log file: %s", db_opts.log_file)
@@ -130,8 +146,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
pobj.stdin.write(cmd.encode())
@@ -180,7 +201,7 @@ def usage():
def read_default_parameter():
- #Read default parameters
+ # Read default parameters
conf = ReadConfigParserNoCast()
conf.read(db_opts.inipath, encoding="utf-8")
for option in conf.options(db_opts.section):
@@ -203,8 +224,21 @@ def parse_parameter():
try:
# Parameters are passed into argv. After parsing, they are stored
# in opts as binary tuples. Unresolved parameters are stored in args.
- opts, args = getopt.getopt(sys.argv[1:], "",
- ["help", "NAMESPACE_FSNAME=", "NAMESPACE_PAGE_FSNAME=", "DPU_UUID=", "LINK_TYPE=", "LOCAL_IP=", "REMOTE_IP=", "NAMESPACE_SHARE_FSNAME=", "NAMESPACE_ARCHIVE_FSNAME="])
+ opts, args = getopt.getopt(
+ sys.argv[1:],
+ "",
+ [
+ "help",
+ "NAMESPACE_FSNAME=",
+ "NAMESPACE_PAGE_FSNAME=",
+ "DPU_UUID=",
+ "LINK_TYPE=",
+ "LOCAL_IP=",
+ "REMOTE_IP=",
+ "NAMESPACE_SHARE_FSNAME=",
+ "NAMESPACE_ARCHIVE_FSNAME=",
+ ],
+ )
if args:
log_exit("Parameter input error: " + str(args[0]))
@@ -219,21 +253,29 @@ def parse_parameter():
if _value.strip() == "":
continue
if _key == "--NAMESPACE_FSNAME":
- db_opts.dbstor_config["NAMESPACE_FSNAME"] = _value.strip().replace('/', ';')
+ db_opts.dbstor_config["NAMESPACE_FSNAME"] = _value.strip().replace(
+ "/", ";"
+ )
if _key == "--NAMESPACE_PAGE_FSNAME":
- db_opts.dbstor_config["NAMESPACE_PAGE_FSNAME"] = _value.strip().replace('/', ';')
+ db_opts.dbstor_config["NAMESPACE_PAGE_FSNAME"] = _value.strip().replace(
+ "/", ";"
+ )
elif _key == "--DPU_UUID":
db_opts.dbstor_config["DPU_UUID"] = _value.strip()
elif _key == "--LINK_TYPE":
db_opts.dbstor_config["LINK_TYPE"] = _value.strip()
elif _key == "--LOCAL_IP":
- db_opts.dbstor_config["LOCAL_IP"] = _value.strip().replace('/', ';')
+ db_opts.dbstor_config["LOCAL_IP"] = _value.strip().replace("/", ";")
elif _key == "--REMOTE_IP":
- db_opts.dbstor_config["REMOTE_IP"] = _value.strip().replace('/', ';')
+ db_opts.dbstor_config["REMOTE_IP"] = _value.strip().replace("/", ";")
elif _key == "--NAMESPACE_SHARE_FSNAME":
- db_opts.dbstor_config["NAMESPACE_SHARE_FSNAME"] = _value.strip().replace('/', ';')
+ db_opts.dbstor_config["NAMESPACE_SHARE_FSNAME"] = (
+ _value.strip().replace("/", ";")
+ )
elif _key == "--NAMESPACE_ARCHIVE_FSNAME":
- db_opts.dbstor_config["NAMESPACE_ARCHIVE_FSNAME"] = _value.strip().replace('/', ';')
+ db_opts.dbstor_config["NAMESPACE_ARCHIVE_FSNAME"] = (
+ _value.strip().replace("/", ";")
+ )
except getopt.GetoptError as error:
log_exit("Parameter input error: " + error.msg)
@@ -255,7 +297,9 @@ def check_parameter():
if len(db_opts.dbstor_config.get("NAMESPACE_ARCHIVE_FSNAME", "").strip()) == 0:
log_exit("The storage_archive_fs parameter is not entered")
else:
- remote_ip_list = re.split(r"[;|,]", db_opts.dbstor_config.get("REMOTE_IP", "").strip())
+ remote_ip_list = re.split(
+ r"[;|,]", db_opts.dbstor_config.get("REMOTE_IP", "").strip()
+ )
for remote_ip in remote_ip_list:
cmd = "ping -c 1 %s" % remote_ip.strip()
logger.info("exec cmd: %s", cmd)
@@ -274,13 +318,13 @@ def clean_ini(file_path):
def update_file_parameter(file_path, ini_file, encrypt_passwd=False):
- #Update parameters
+ # Update parameters
clean_ini(file_path)
conf = ReadConfigParserNoCast()
conf.add_section(db_opts.section)
if encrypt_passwd:
- ini_file['PASSWORD'] = GLOBAL_KMC_EXT.encrypt(ini_file.get('PASSWORD', ""))
+ ini_file["PASSWORD"] = GLOBAL_KMC_EXT.encrypt(ini_file.get("PASSWORD", ""))
# rewrite parameters
for key in ini_file:
conf.set(db_opts.section, key, ini_file[key])
@@ -294,15 +338,15 @@ def update_file_parameter(file_path, ini_file, encrypt_passwd=False):
def update_parameter():
- #Update parameters
+ # Update parameters
console_and_log("Start to update parameters")
cantian_dbstor_config = copy.deepcopy(db_opts.dbstor_config)
cantian_dbstor_config["DBSTOR_OWNER_NAME"] = "cantian"
cms_dbstor_config = copy.deepcopy(db_opts.dbstor_config)
cms_dbstor_config["DBSTOR_OWNER_NAME"] = "cms"
- #update share ini file
+ # update share ini file
update_file_parameter(db_opts.inipath, db_opts.dbstor_config)
- #update local ini file
+ # update local ini file
update_file_parameter(db_opts.local_ini_path, cantian_dbstor_config)
# update cstool ini file
update_file_parameter(db_opts.cstool_ini_path, db_opts.dbstor_config)
@@ -319,6 +363,7 @@ def update_parameter():
if ret_code:
log_exit(f"Failed to execute command: {cmd}.")
+
def verify_dbstor_usernm(in_type, passwd, shortest_len, longest_len):
"""
Verify new password.
@@ -326,8 +371,9 @@ def verify_dbstor_usernm(in_type, passwd, shortest_len, longest_len):
"""
if len(passwd) < shortest_len or len(passwd) > longest_len:
- raise ValueError("The length of input must be %s to %s."
- % (shortest_len, longest_len))
+ raise ValueError(
+ "The length of input must be %s to %s." % (shortest_len, longest_len)
+ )
# Can't save with user name
upper_cases = set("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
lower_cases = set("abcdefghijklmnopqrstuvwxyz")
@@ -344,12 +390,14 @@ def verify_dbstor_usernm(in_type, passwd, shortest_len, longest_len):
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- console_and_log("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
- raise ValueError("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
+ console_and_log(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
+ raise ValueError(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
def verify_dbstor_passwd(in_type, passwd, shortest_len, longest_len):
@@ -359,23 +407,33 @@ def verify_dbstor_passwd(in_type, passwd, shortest_len, longest_len):
"""
# eg 'length in [8-16]'
if len(passwd) < shortest_len or len(passwd) > longest_len:
- console_and_log("The length of input must be %s to %s."
- % (shortest_len, longest_len))
- raise ValueError("The length of input must be %s to %s."
- % (shortest_len, longest_len))
+ console_and_log(
+ "The length of input must be %s to %s." % (shortest_len, longest_len)
+ )
+ raise ValueError(
+ "The length of input must be %s to %s." % (shortest_len, longest_len)
+ )
# Can't save with user name
- user_name = db_opts.dbstor_config.get('USER_NAME')
+ user_name = db_opts.dbstor_config.get("USER_NAME")
if user_name and passwd == user_name:
console_and_log("Error: Password can't be the same as username.")
raise ValueError("Error: Password can't be the same as username.")
elif user_name and passwd == user_name[::-1]:
- console_and_log("Error: Password cannot be the same as username in reverse order")
- raise ValueError("Error: Password cannot be the same as username in reverse order")
+ console_and_log(
+ "Error: Password cannot be the same as username in reverse order"
+ )
+ raise ValueError(
+ "Error: Password cannot be the same as username in reverse order"
+ )
# The same character cannot appear three times consecutively
for i in range(0, len(passwd) - 2):
if passwd[i] == passwd[i + 1] and passwd[i + 1] == passwd[i + 2]:
- console_and_log("Error: The same character cannot appear three times consecutively ")
- raise ValueError("Error: The same character cannot appear three times consecutively")
+ console_and_log(
+ "Error: The same character cannot appear three times consecutively "
+ )
+ raise ValueError(
+ "Error: The same character cannot appear three times consecutively"
+ )
upper_cases = set("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
lower_cases = set("abcdefghijklmnopqrstuvwxyz")
@@ -394,36 +452,50 @@ def verify_dbstor_passwd(in_type, passwd, shortest_len, longest_len):
if passwd_set & cases:
types += 1
if types < 3:
- console_and_log("Error: Password must contains at least three different types of characters.")
- raise ValueError("Error: Password must contains at least three"
- " different types of characters.")
+ console_and_log(
+ "Error: Password must contains at least three different types of characters."
+ )
+ raise ValueError(
+ "Error: Password must contains at least three"
+ " different types of characters."
+ )
# Only can contains enumerated cases
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- console_and_log("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
- raise ValueError("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
+ console_and_log(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
+ raise ValueError(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
def input_username_password():
"""Generate DBstor Config parameter."""
try:
console_and_log("Input Username and Password")
- db_opts.dbstor_config['USER_NAME'] = get_dbstor_usernm_passwd(input_prompt="UserName",
- file_prompt="dbstor config",
- shortest_len=6, longest_len=32)
- if db_opts.dbstor_config.get('USER_NAME', 0) == 0:
+ db_opts.dbstor_config["USER_NAME"] = get_dbstor_usernm_passwd(
+ input_prompt="UserName",
+ file_prompt="dbstor config",
+ shortest_len=6,
+ longest_len=32,
+ )
+ if db_opts.dbstor_config.get("USER_NAME", 0) == 0:
raise ValueError("input param is invalid")
- db_opts.dbstor_config['PASSWORD'] = get_dbstor_usernm_passwd(input_prompt="PassWord",
- file_prompt="dbstor config",
- shortest_len=8, longest_len=16)
- if db_opts.dbstor_config.get('PASSWORD', 0) == 0:
+ db_opts.dbstor_config["PASSWORD"] = get_dbstor_usernm_passwd(
+ input_prompt="PassWord",
+ file_prompt="dbstor config",
+ shortest_len=8,
+ longest_len=16,
+ )
+ if db_opts.dbstor_config.get("PASSWORD", 0) == 0:
raise ValueError("input param is invalid")
- db_opts.dbstor_config['PASSWORD'] = GLOBAL_KMC_EXT.encrypt(db_opts.dbstor_config.get('PASSWORD', ""))
+ db_opts.dbstor_config["PASSWORD"] = GLOBAL_KMC_EXT.encrypt(
+ db_opts.dbstor_config.get("PASSWORD", "")
+ )
console_and_log("\nSuccessfully to input user name and password")
except ValueError as error:
log_exit(str(error))
@@ -493,12 +565,16 @@ def get_dbstor_usernm_passwd(input_prompt, file_prompt, shortest_len, longest_le
def read_file_path():
- with os.fdopen(os.open(db_opts.js_conf_file, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r")\
- as file_obj:
+ with os.fdopen(
+ os.open(
+ db_opts.js_conf_file, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR
+ ),
+ "r",
+ ) as file_obj:
json_data = json.load(file_obj)
- db_opts.note_id = json_data.get('node_id', "").strip()
+ db_opts.note_id = json_data.get("node_id", "").strip()
db_opts.inipath = "/opt/cantian/dbstor/tools/dbstor_config.ini"
- db_opts.cluster_name = json_data.get('cluster_name', "").strip()
+ db_opts.cluster_name = json_data.get("cluster_name", "").strip()
def check_ini():
diff --git a/pkg/deploy/action/do_snapshot.py b/pkg/deploy/action/do_snapshot.py
index 993d0d7aabf27d8baa0e8ba2c44e64c312f44ba4..00fce0c3c8ad50b18cad37bf7dd692d77dfb8057 100644
--- a/pkg/deploy/action/do_snapshot.py
+++ b/pkg/deploy/action/do_snapshot.py
@@ -9,13 +9,13 @@ from rest_client import RestClient
from om_log import SNAPSHOT_LOGS as LOG
CUR_PATH, _ = os.path.split(os.path.abspath(__file__))
-DEPLOY_PARAM_PATH = '/opt/cantian/config/deploy_param.json'
+DEPLOY_PARAM_PATH = "/opt/cantian/config/deploy_param.json"
NORMAL_STATE, ABNORMAL_STATE = 0, 1
def read_helper(file_path):
- with open(file_path, 'r', encoding='utf-8') as f_handler:
+ with open(file_path, "r", encoding="utf-8") as f_handler:
deploy_data = f_handler.read()
return deploy_data
@@ -24,16 +24,16 @@ def write_helper(file_path, data):
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
if data:
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.write(json.dumps(data))
else:
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.truncate()
def get_fs_processed_info(info_path, fs_names):
- json_file_path = os.path.join(info_path, 'processed_snapshots.json')
- init_fs_info = {name: '' for name in fs_names}
+ json_file_path = os.path.join(info_path, "processed_snapshots.json")
+ init_fs_info = {name: "" for name in fs_names}
if not os.path.exists(info_path):
os.makedirs(info_path)
return init_fs_info
@@ -58,11 +58,11 @@ def main(mode, ip_address, main_path):
passwd = input()
config_params = json.loads(read_helper(DEPLOY_PARAM_PATH))
- fs_names = [fs_val
- for fs_name, fs_val in config_params.items()
- if fs_name.endswith('_fs')]
+ fs_names = [
+ fs_val for fs_name, fs_val in config_params.items() if fs_name.endswith("_fs")
+ ]
- process_fs_path = '{}/cantian_upgrade_snapshots'.format(main_path)
+ process_fs_path = "{}/cantian_upgrade_snapshots".format(main_path)
fs_processed_data = get_fs_processed_info(process_fs_path, fs_names)
login_data = (ip_address, user_name, passwd)
@@ -72,19 +72,23 @@ def main(mode, ip_address, main_path):
try:
_ = rest_client_obj.execute(fs_name, mode)
except Exception as error:
- LOG.error('error happened when try to {} snapshot of {}, err_info: {}, '
- 'err_traceback: {}'.format(mode, fs_name, str(error), traceback.format_exc(limit=-1)))
+ LOG.error(
+ "error happened when try to {} snapshot of {}, err_info: {}, "
+ "err_traceback: {}".format(
+ mode, fs_name, str(error), traceback.format_exc(limit=-1)
+ )
+ )
return ABNORMAL_STATE
- if mode == 'rollback':
+ if mode == "rollback":
query_rollback_process(fs_names, rest_client_obj)
- recoder_path = os.path.join(process_fs_path, 'processed_snapshots.json')
- if mode == 'create':
+ recoder_path = os.path.join(process_fs_path, "processed_snapshots.json")
+ if mode == "create":
new_processed_info = rest_client_obj.processed_fs
write_helper(recoder_path, new_processed_info)
- elif mode == 'rollback':
- write_helper(recoder_path, '')
+ elif mode == "rollback":
+ write_helper(recoder_path, "")
return NORMAL_STATE
@@ -100,7 +104,9 @@ def query_rollback_process(fs_names, rest_client_obj):
success_list = []
while query_list:
for fs_name in query_list:
- rollback_rate, rollbacks_status = rest_client_obj.query_rollback_snapshots_process(fs_name)
+ rollback_rate, rollbacks_status = (
+ rest_client_obj.query_rollback_snapshots_process(fs_name)
+ )
if int(rollbacks_status) == 0:
success_list.append(fs_name)
query_list = list(set(query_list) - set(success_list))
@@ -113,8 +119,12 @@ if __name__ == "__main__":
try:
RET_VAL = main(snapshot_mode, ip, main_backup_file_path)
except Exception as err:
- LOG.error('{} snapshots failed, err_details: {}, '
- 'err_traceback: {}'.format(snapshot_mode, str(err), traceback.format_exc(limit=-1)))
+ LOG.error(
+ "{} snapshots failed, err_details: {}, "
+ "err_traceback: {}".format(
+ snapshot_mode, str(err), traceback.format_exc(limit=-1)
+ )
+ )
exit(ABNORMAL_STATE)
exit(RET_VAL)
diff --git a/pkg/deploy/action/docker/cantian_numa.py b/pkg/deploy/action/docker/cantian_numa.py
index c7f385317d5e2047336a9f8273ebf67cf129572e..3fcd2fe4339a7a9b348ac332d118a11f5cbde191 100644
--- a/pkg/deploy/action/docker/cantian_numa.py
+++ b/pkg/deploy/action/docker/cantian_numa.py
@@ -5,7 +5,7 @@ import re
import sys
import time
-sys.path.append('/ctdb/cantian_install/cantian_connector/action')
+sys.path.append("/ctdb/cantian_install/cantian_connector/action")
from logic.common_func import exec_popen
from delete_unready_pod import KubernetesService, get_pod_name_from_info
@@ -15,7 +15,7 @@ from docker_common.file_utils import open_and_lock_json, write_and_unlock_json,
NUMA_INFO_PATH = "/root/.kube/NUMA-INFO/numa-pod.json"
TIME_OUT = 100
MAX_CHECK_TIME = 120 # 最大检查时间
-CHECK_INTERVAL = 3 # 每次检查的间隔
+CHECK_INTERVAL = 3 # 每次检查的间隔
class CPUAllocator:
@@ -45,9 +45,9 @@ class CPUAllocator:
例如输入 '0-25',输出 [0, 1, 2, ..., 25]
"""
cpu_list = []
- for part in cpu_list_str.split(','):
- if '-' in part:
- start, end = map(int, part.split('-'))
+ for part in cpu_list_str.split(","):
+ if "-" in part:
+ start, end = map(int, part.split("-"))
cpu_list.extend(range(start, end + 1))
else:
cpu_list.append(int(part))
@@ -61,7 +61,7 @@ class CPUAllocator:
err_msg = f"Execute cmd[{cmd}] failed, details:{stderr}"
raise Exception(err_msg)
- numa_nodes = int(re.search(r'\d+', stdout).group())
+ numa_nodes = int(re.search(r"\d+", stdout).group())
cmd = "lscpu | grep -i 'NUMA node[0-9] CPU(s)'"
return_code, stdout, stderr = exec_popen(cmd, timeout=TIME_OUT)
@@ -72,7 +72,7 @@ class CPUAllocator:
cpu_info = {}
total_cpus = 0
for line in stdout.splitlines():
- match = re.search(r'NUMA node(\d+) CPU\(s\):\s+([\d,\-]+)', line)
+ match = re.search(r"NUMA node(\d+) CPU\(s\):\s+([\d,\-]+)", line)
if match:
node = str(match.group(1))
cpu_list_str = match.group(2)
@@ -83,7 +83,7 @@ class CPUAllocator:
"available_cpus": cpu_list,
"available_cpu_count": node_cpus,
"max_cpu": max(cpu_list),
- "min_cpu": min(cpu_list)
+ "min_cpu": min(cpu_list),
}
return total_cpus, numa_nodes, cpu_info
@@ -114,7 +114,7 @@ class CPUAllocator:
stdout = self.execute_cmd(cmd)
# 解析 taskset 输出的 CPU 列表
- match = re.search(r'list:\s+([\d,-]+)', stdout)
+ match = re.search(r"list:\s+([\d,-]+)", stdout)
if match:
cpu_list_str = match.group(1)
actual_cpus = self._parse_cpu_list(cpu_list_str)
@@ -132,8 +132,16 @@ class CPUAllocator:
total_cpus, numa_nodes, cpu_info = self.get_numa_info()
numa_info.update(cpu_info)
- max_single_numa = max(info['available_cpu_count'] for info in numa_info.values() if isinstance(info, dict))
- total_available_cpus = sum(info['available_cpu_count'] for info in numa_info.values() if isinstance(info, dict))
+ max_single_numa = max(
+ info["available_cpu_count"]
+ for info in numa_info.values()
+ if isinstance(info, dict)
+ )
+ total_available_cpus = sum(
+ info["available_cpu_count"]
+ for info in numa_info.values()
+ if isinstance(info, dict)
+ )
if cpu_num > total_available_cpus:
return 0, []
@@ -141,27 +149,33 @@ class CPUAllocator:
if cpu_num > max_single_numa:
needed_cpus = cpu_num
binding_cpus = []
- for node, info in sorted(numa_info.items(), key=lambda x: x[1]['available_cpu_count'], reverse=True):
+ for node, info in sorted(
+ numa_info.items(),
+ key=lambda x: x[1]["available_cpu_count"],
+ reverse=True,
+ ):
if isinstance(info, dict):
- if info['available_cpu_count'] >= needed_cpus:
- binding_cpus.extend(info['available_cpus'][:needed_cpus])
+ if info["available_cpu_count"] >= needed_cpus:
+ binding_cpus.extend(info["available_cpus"][:needed_cpus])
return 2, binding_cpus
else:
- binding_cpus.extend(info['available_cpus'])
- needed_cpus -= info['available_cpu_count']
+ binding_cpus.extend(info["available_cpus"])
+ needed_cpus -= info["available_cpu_count"]
return 2, binding_cpus
for node, info in numa_info.items():
- if isinstance(info, dict) and info['available_cpu_count'] >= cpu_num:
- return 1, info['available_cpus'][:cpu_num]
+ if isinstance(info, dict) and info["available_cpu_count"] >= cpu_num:
+ return 1, info["available_cpus"][:cpu_num]
needed_cpus = cpu_num
binding_cpus = []
- for node, info in sorted(numa_info.items(), key=lambda x: x[1]['available_cpu_count'], reverse=True):
+ for node, info in sorted(
+ numa_info.items(), key=lambda x: x[1]["available_cpu_count"], reverse=True
+ ):
if isinstance(info, dict):
if needed_cpus > 0:
- take_cpus = min(needed_cpus, info['available_cpu_count'])
- binding_cpus.extend(info['available_cpus'][:take_cpus])
+ take_cpus = min(needed_cpus, info["available_cpu_count"])
+ binding_cpus.extend(info["available_cpus"][:take_cpus])
needed_cpus -= take_cpus
if needed_cpus == 0:
return 2, binding_cpus
@@ -175,21 +189,34 @@ class CPUAllocator:
for node, info in cpu_info.items():
node_str = str(node)
- existing_available_cpus = set(numa_data[self.numa_info_key][node_str].get("available_cpus", []))
+ existing_available_cpus = set(
+ numa_data[self.numa_info_key][node_str].get("available_cpus", [])
+ )
available_cpus = existing_available_cpus - set(bound_cpus)
- numa_data[self.numa_info_key][node_str]["available_cpus"] = sorted(list(available_cpus))
+ numa_data[self.numa_info_key][node_str]["available_cpus"] = sorted(
+ list(available_cpus)
+ )
numa_data[self.numa_info_key][node_str]["available_cpu_count"] = len(
- numa_data[self.numa_info_key][node_str]["available_cpus"])
+ numa_data[self.numa_info_key][node_str]["available_cpus"]
+ )
def clean_up_json(self, numa_data, pod_info, hostname_pattern):
"""
清理不存在的 Pod 绑定信息,并将绑定的 CPU 恢复到 numa_info 中。
"""
- matching_pods = [pod['pod_name'] for pod in pod_info if re.match(hostname_pattern, pod['pod_name'])]
-
- keys_to_delete = [key for key in numa_data.keys() if key not in matching_pods and key != self.numa_info_key]
+ matching_pods = [
+ pod["pod_name"]
+ for pod in pod_info
+ if re.match(hostname_pattern, pod["pod_name"])
+ ]
+
+ keys_to_delete = [
+ key
+ for key in numa_data.keys()
+ if key not in matching_pods and key != self.numa_info_key
+ ]
for key in keys_to_delete:
pod_file_path = os.path.join(os.path.dirname(NUMA_INFO_PATH), key)
@@ -200,10 +227,14 @@ class CPUAllocator:
self.restore_cpus_to_numa_info(numa_data, bind_cpus)
if not os.path.exists(pod_file_path):
- LOG.info(f"File {pod_file_path} does not exist. Removing entry from JSON: {key}")
+ LOG.info(
+ f"File {pod_file_path} does not exist. Removing entry from JSON: {key}"
+ )
del numa_data[key]
elif LockFile.is_locked(pod_file_path):
- LOG.info(f"Skipping removal of {key} because file {pod_file_path} is currently locked.")
+ LOG.info(
+ f"Skipping removal of {key} because file {pod_file_path} is currently locked."
+ )
else:
LOG.info(f"Removing outdated entry from JSON and deleting file: {key}")
del numa_data[key]
@@ -237,7 +268,9 @@ class CPUAllocator:
if hostname in numa_data:
if numa_data[hostname].get("bind_flag", False):
- LOG.info(f"Host {hostname} is already bound successfully. Skipping binding.")
+ LOG.info(
+ f"Host {hostname} is already bound successfully. Skipping binding."
+ )
return
# 检查 numa_info 是否为空,如果为空则初始化
@@ -245,7 +278,9 @@ class CPUAllocator:
total_cpus, numa_nodes, cpu_info = self.get_numa_info()
numa_data[self.numa_info_key].update(cpu_info)
- binding_status, binding_cpus = self.determine_binding_strategy(cpu_num, numa_data[self.numa_info_key])
+ binding_status, binding_cpus = self.determine_binding_strategy(
+ cpu_num, numa_data[self.numa_info_key]
+ )
# 0-绑定失败,没有足够的 CPU
if binding_status == 0:
@@ -253,11 +288,13 @@ class CPUAllocator:
numa_data[hostname] = {
"bind_cpus": "",
"bind_flag": False,
- "taskset_output": "Binding failed due to insufficient CPUs"
+ "taskset_output": "Binding failed due to insufficient CPUs",
}
elif binding_status in (1, 2):
if binding_status == 2:
- LOG.warning(f"Cross NUMA binding detected for host {hostname}. This may affect performance.")
+ LOG.warning(
+ f"Cross NUMA binding detected for host {hostname}. This may affect performance."
+ )
# 执行绑核,1-单个numa,2-跨numa绑核
self.bind_cpu(binding_cpus)
@@ -267,7 +304,7 @@ class CPUAllocator:
numa_data[hostname] = {
"bind_cpus": ",".join(map(str, binding_cpus)),
"bind_flag": True,
- "taskset_output": taskset_output
+ "taskset_output": taskset_output,
}
self.update_available_cpus(numa_data, cpu_info, binding_cpus)
else:
@@ -275,7 +312,7 @@ class CPUAllocator:
numa_data[hostname] = {
"bind_cpus": "",
"bind_flag": False,
- "taskset_output": "Binding failed during verification"
+ "taskset_output": "Binding failed during verification",
}
def delete_binding_info(self, short_hostname):
@@ -361,7 +398,7 @@ def show_numa_binding_info(numa_info_path):
if bind_cpus:
try:
- cpu_list = list(map(int, bind_cpus.split(','))) if bind_cpus else []
+ cpu_list = list(map(int, bind_cpus.split(","))) if bind_cpus else []
formatted_cpus = format_cpu_ranges(cpu_list)
except ValueError:
formatted_cpus = bind_cpus
@@ -422,11 +459,17 @@ def main():
pod_name_full = get_pod_name_from_info(all_pod_info, short_hostname)
if pod_name_full:
# 清理 JSON 中不再存在的 POD 信息
- hostname_pattern = r'cantian.*-node.*'
- cpu_allocator.clean_up_json(numa_data, all_pod_info, hostname_pattern)
-
- cpu_allocator.execute_binding(cpu_num, pod_name_full, numa_data, cpu_info)
- pod_file_path = os.path.join(os.path.dirname(NUMA_INFO_PATH), pod_name_full)
+ hostname_pattern = r"cantian.*-node.*"
+ cpu_allocator.clean_up_json(
+ numa_data, all_pod_info, hostname_pattern
+ )
+
+ cpu_allocator.execute_binding(
+ cpu_num, pod_name_full, numa_data, cpu_info
+ )
+ pod_file_path = os.path.join(
+ os.path.dirname(NUMA_INFO_PATH), pod_name_full
+ )
break
except Exception as e:
err_msg = f"Error during CPU binding: {e}"
@@ -454,4 +497,4 @@ if __name__ == "__main__":
show_numa_binding_info(NUMA_INFO_PATH)
else:
pod_file_path = main()
- print(pod_file_path)
\ No newline at end of file
+ print(pod_file_path)
diff --git a/pkg/deploy/action/docker/delete_unready_pod.py b/pkg/deploy/action/docker/delete_unready_pod.py
index d43f185a0a20bf3b57425f96b845ba06e345fb22..f80ebae652503c9ce4562f31659541374e09b758 100644
--- a/pkg/deploy/action/docker/delete_unready_pod.py
+++ b/pkg/deploy/action/docker/delete_unready_pod.py
@@ -12,6 +12,7 @@ from get_config_info import get_value
UNREADY_THRESHOLD_SECONDS = 600
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
+
def _exec_popen(cmd, values=None):
"""
subprocess.Popen in python2 and 3.
@@ -21,8 +22,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
pobj.stdin.write(cmd.encode())
pobj.stdin.write(os.linesep.encode())
for value in values:
@@ -55,22 +61,23 @@ def get_pod_name_from_info(pod_info, pod_name):
return None
+
def backup_log():
"""备份日志函数,unready的pod会被重复检测,只有首次打印备份日志,后续直接返回"""
- healthy_file = '/opt/cantian/healthy'
+ healthy_file = "/opt/cantian/healthy"
if os.path.exists(healthy_file):
# 读文件内参数
- with open(healthy_file, 'r') as fread:
+ with open(healthy_file, "r") as fread:
data = fread.read()
try:
healthy_dict = json.loads(data)
except json.decoder.JSONDecodeError as e:
- healthy_dict = {'delete_unready_pod': 0}
- if not healthy_dict['delete_unready_pod']:
- healthy_dict['delete_unready_pod'] = 1
+ healthy_dict = {"delete_unready_pod": 0}
+ if not healthy_dict["delete_unready_pod"]:
+ healthy_dict["delete_unready_pod"] = 1
flags = os.O_CREAT | os.O_RDWR
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(healthy_file, flags, modes), 'w') as fwrite:
+ with os.fdopen(os.open(healthy_file, flags, modes), "w") as fwrite:
json.dump(healthy_dict, fwrite)
else:
return
@@ -78,16 +85,24 @@ def backup_log():
# healthy文件不存在说明pod状态异常,会有其他处理,直接返回
return
- cluster_name = get_value('cluster_name')
- cluster_id = get_value('cluster_id')
- node_id = get_value('node_id')
- deploy_user = get_value('deploy_user')
- storage_metadata_fs = get_value('storage_metadata_fs')
- cmd = "sh %s/log_backup.sh %s %s %s %s %s" % (CUR_PATH, cluster_name, cluster_id, node_id, deploy_user, storage_metadata_fs)
+ cluster_name = get_value("cluster_name")
+ cluster_id = get_value("cluster_id")
+ node_id = get_value("node_id")
+ deploy_user = get_value("deploy_user")
+ storage_metadata_fs = get_value("storage_metadata_fs")
+ cmd = "sh %s/log_backup.sh %s %s %s %s %s" % (
+ CUR_PATH,
+ cluster_name,
+ cluster_id,
+ node_id,
+ deploy_user,
+ storage_metadata_fs,
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
raise Exception("failed to backup log. output:%s" % str(stderr))
+
def monitor_pods(k8s_service, pod_name):
pod = k8s_service.get_pod_by_name(pod_name)
if not pod:
@@ -101,15 +116,24 @@ def monitor_pods(k8s_service, pod_name):
if condition["type"] == "Ready" and condition["status"] != "True":
last_transition_time = condition.get("lastTransitionTime")
if last_transition_time:
- last_transition_time = datetime.strptime(last_transition_time, "%Y-%m-%dT%H:%M:%SZ")
+ last_transition_time = datetime.strptime(
+ last_transition_time, "%Y-%m-%dT%H:%M:%SZ"
+ )
unready_duration = current_time - last_transition_time
- print(f"Pod {pod_name} has been unready for more than {unready_duration.total_seconds()} seconds.")
+ print(
+ f"Pod {pod_name} has been unready for more than {unready_duration.total_seconds()} seconds."
+ )
if unready_duration.total_seconds() > UNREADY_THRESHOLD_SECONDS:
- print(f"Pod {pod_name} has been unready for more than {UNREADY_THRESHOLD_SECONDS} seconds. Deleting...")
+ print(
+ f"Pod {pod_name} has been unready for more than {UNREADY_THRESHOLD_SECONDS} seconds. Deleting..."
+ )
backup_log()
- k8s_service.delete_pod(name=pod_name, namespace=pod["metadata"]["namespace"])
+ k8s_service.delete_pod(
+ name=pod_name, namespace=pod["metadata"]["namespace"]
+ )
return
+
if __name__ == "__main__":
pod_name = os.getenv("HOSTNAME")
if not pod_name:
@@ -125,4 +149,4 @@ if __name__ == "__main__":
monitor_pods(k8s_service, pod_name_full)
else:
print(f"Service not found for pod: {pod_name}")
- exit(1)
\ No newline at end of file
+ exit(1)
diff --git a/pkg/deploy/action/docker/docker_common/file_utils.py b/pkg/deploy/action/docker/docker_common/file_utils.py
index 13a2fb7b8fc9385e3739084631456a0962bba60a..b9bba0e858472e8fb12beb1a0349608ceed35cee 100644
--- a/pkg/deploy/action/docker/docker_common/file_utils.py
+++ b/pkg/deploy/action/docker/docker_common/file_utils.py
@@ -10,6 +10,7 @@ import sys
import pwd
import grp
import stat
+
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, "../../"))
from cantian.get_config_info import get_value
@@ -43,7 +44,7 @@ class LockFile:
@staticmethod
def is_locked(file_path):
try:
- with open(file_path, 'a') as f:
+ with open(file_path, "a") as f:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
fcntl.flock(f, fcntl.LOCK_UN)
@@ -51,7 +52,9 @@ class LockFile:
except IOError:
return True
except Exception as e:
- raise Exception(f"Error checking lock status of {file_path}: {str(e)}") from e
+ raise Exception(
+ f"Error checking lock status of {file_path}: {str(e)}"
+ ) from e
def open_and_lock_json(filepath, timeout=20):
@@ -65,7 +68,7 @@ def open_and_lock_json(filepath, timeout=20):
try:
fd = os.open(filepath, os.O_RDWR | os.O_CREAT, 0o644)
- file = os.fdopen(fd, 'r+')
+ file = os.fdopen(fd, "r+")
LockFile.lock_with_timeout(file, timeout=timeout)
@@ -105,7 +108,7 @@ def open_and_lock_csv(filepath, timeout=20):
try:
fd = os.open(filepath, os.O_RDWR | os.O_CREAT, 0o644)
- file = os.fdopen(fd, 'r+')
+ file = os.fdopen(fd, "r+")
LockFile.lock_with_timeout(file, timeout=timeout)
@@ -143,7 +146,7 @@ def read_file(filepath):
try:
if os.path.exists(filepath):
flags = os.O_RDONLY
- with os.fdopen(os.open(filepath, flags), 'r') as f:
+ with os.fdopen(os.open(filepath, flags), "r") as f:
fcntl.flock(f, fcntl.LOCK_SH)
content = f.readlines()
fcntl.flock(f, fcntl.LOCK_UN)
@@ -175,7 +178,7 @@ def write_file(filepath, content):
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
modes = original_mode
- with os.fdopen(os.open(temp_file, flags, modes), 'w') as f:
+ with os.fdopen(os.open(temp_file, flags, modes), "w") as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.writelines(content)
fcntl.flock(f, fcntl.LOCK_UN)
@@ -216,4 +219,4 @@ def mkdir(path, permissions=0o750):
os.chown(path, uid, gid)
except Exception as e:
- raise RuntimeError(f"Failed to create directory '{path}': {e}")
\ No newline at end of file
+ raise RuntimeError(f"Failed to create directory '{path}': {e}")
diff --git a/pkg/deploy/action/docker/docker_common/kubernetes_service.py b/pkg/deploy/action/docker/docker_common/kubernetes_service.py
index 3b5f5921069ed58b2cd806cc8a195e98a0ee0c61..afe1a14497ed329fcf797fe31a99ca9eb25e36d5 100644
--- a/pkg/deploy/action/docker/docker_common/kubernetes_service.py
+++ b/pkg/deploy/action/docker/docker_common/kubernetes_service.py
@@ -24,19 +24,31 @@ class KubernetesService:
with open(self.kube_config_path, "r") as kube_config_file:
kube_config_content = kube_config_file.read()
- client_cert_data = re.search(r'client-certificate-data: (.+)', kube_config_content).group(1)
- client_key_data = re.search(r'client-key-data: (.+)', kube_config_content).group(1)
+ client_cert_data = re.search(
+ r"client-certificate-data: (.+)", kube_config_content
+ ).group(1)
+ client_key_data = re.search(
+ r"client-key-data: (.+)", kube_config_content
+ ).group(1)
client_cert_data = base64.b64decode(client_cert_data)
client_key_data = base64.b64decode(client_key_data)
cert_file_path = "/tmp/client-cert.pem"
key_file_path = "/tmp/client-key.pem"
- cert_fd = os.open(cert_file_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, stat.S_IRUSR | stat.S_IWUSR)
+ cert_fd = os.open(
+ cert_file_path,
+ os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR,
+ )
with os.fdopen(cert_fd, "wb") as cert_file:
cert_file.write(client_cert_data)
- key_fd = os.open(key_file_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, stat.S_IRUSR | stat.S_IWUSR)
+ key_fd = os.open(
+ key_file_path,
+ os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR,
+ )
with os.fdopen(key_fd, "wb") as key_file:
key_file.write(client_key_data)
@@ -47,7 +59,9 @@ class KubernetesService:
def _get(self, path, timeout=5):
url = f"{self.api_server}{path}"
- response = requests.get(url, headers=self.headers, cert=self.cert, verify=False, timeout=timeout)
+ response = requests.get(
+ url, headers=self.headers, cert=self.cert, verify=False, timeout=timeout
+ )
response.raise_for_status()
return response.json()
@@ -64,7 +78,9 @@ class KubernetesService:
matching_pods = []
for pod in pods_data.get("items", []):
pod_labels = pod["metadata"].get("labels", {})
- if all(item in pod_labels.items() for item in service_selector.items()):
+ if all(
+ item in pod_labels.items() for item in service_selector.items()
+ ):
matching_pods.append(pod)
for pod in matching_pods:
@@ -104,11 +120,13 @@ class KubernetesService:
for port in ports:
container_port = port.get("containerPort")
if pod_name and pod_ip and container_port:
- pod_info.append({
- "pod_name": pod_name,
- "pod_ip": pod_ip,
- "container_port": container_port
- })
+ pod_info.append(
+ {
+ "pod_name": pod_name,
+ "pod_ip": pod_ip,
+ "container_port": container_port,
+ }
+ )
return pod_info
@@ -138,12 +156,14 @@ class KubernetesService:
for port in ports:
container_port = port.get("containerPort")
if pod_name_all and pod_ip and container_port:
- all_pod_info.append({
- "service_name": service["metadata"]["name"],
- "pod_name": pod_name_all,
- "pod_ip": pod_ip,
- "container_port": container_port
- })
+ all_pod_info.append(
+ {
+ "service_name": service["metadata"]["name"],
+ "pod_name": pod_name_all,
+ "pod_ip": pod_ip,
+ "container_port": container_port,
+ }
+ )
return all_pod_info
@@ -152,7 +172,9 @@ class KubernetesService:
def delete_pod(self, name, namespace, timeout=5):
url = f"{self.api_server}/api/v1/namespaces/{namespace}/pods/{name}"
- response = requests.delete(url, headers=self.headers, cert=self.cert, verify=False, timeout=timeout)
+ response = requests.delete(
+ url, headers=self.headers, cert=self.cert, verify=False, timeout=timeout
+ )
response.raise_for_status()
return response.json()
diff --git a/pkg/deploy/action/docker/dr_deploy.py b/pkg/deploy/action/docker/dr_deploy.py
index 2f697a483968c33883823e6da30cda0ad6a6d534..cc190128d8b749ef0491ab4d31277658a2332f87 100644
--- a/pkg/deploy/action/docker/dr_deploy.py
+++ b/pkg/deploy/action/docker/dr_deploy.py
@@ -22,7 +22,9 @@ from om_log import LOGGER as LOG
OPT_CONFIG_PATH = "/opt/cantian/config"
SCRIPT_PATH = os.path.join(CUR_PATH, "..")
CONFIG_PATH = os.path.join(SCRIPT_PATH, "../config")
-DORADO_CONF_PATH = "/ctdb/cantian_install/cantian_connector/config/container_conf/dorado_conf"
+DORADO_CONF_PATH = (
+ "/ctdb/cantian_install/cantian_connector/config/container_conf/dorado_conf"
+)
DM_USER = "DMUser"
DM_PWD = "DMPwd"
@@ -30,13 +32,17 @@ DM_PWD = "DMPwd"
def init_get_info_fun():
try:
get_info_path = os.path.join(CUR_PATH, "../get_config_info.py")
- spec = importlib.util.spec_from_file_location("get_info_from_config", get_info_path)
+ spec = importlib.util.spec_from_file_location(
+ "get_info_from_config", get_info_path
+ )
get_info_from_config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(get_info_from_config)
get_info = get_info_from_config.get_value
return get_info
except Exception as e:
- LOG.error(f"init get_info fun failed {e}, traceback: {traceback.format_exc(limit=-1)}")
+ LOG.error(
+ f"init get_info fun failed {e}, traceback: {traceback.format_exc(limit=-1)}"
+ )
return None
@@ -76,11 +82,11 @@ def close_child_process(proc):
os.killpg(proc.pid, signal.SIGKILL)
except ProcessLookupError as err:
_ = err
- return 'success'
+ return "success"
except Exception as err:
return str(err)
- return 'success'
+ return "success"
def exec_popen(cmd, timeout=None):
@@ -90,8 +96,14 @@ def exec_popen(cmd, timeout=None):
return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=os.setsid,
+ )
pobj.stdin.write(cmd.encode())
pobj.stdin.write(os.linesep.encode())
if not timeout:
@@ -142,8 +154,10 @@ def get_dr_status(dm_password=None):
dm_password = get_dm_password()
if dm_password == "":
LOG.error("DM pass word is empty.")
- cmd = (f"echo -e '{dm_password}' | sh {SCRIPT_PATH}/appctl.sh dr_operate progress_query "
- f"--action=check --display=table 2>&1 | grep -E '^\-|^\|'")
+ cmd = (
+ f"echo -e '{dm_password}' | sh {SCRIPT_PATH}/appctl.sh dr_operate progress_query "
+ f"--action=check --display=table 2>&1 | grep -E '^\-|^\|'"
+ )
execute_command(cmd, raise_flag=True, timeout=30)
data_json = get_file_json(os.path.join(CONFIG_PATH, "dr_status.json"))
return data_json.get("dr_status")
@@ -196,7 +210,7 @@ def check_dr_deploy_process_completed(role):
return False
-def dr_deploy(role=None, dm_password=None, mysql_pwd='', delete_flag=False):
+def dr_deploy(role=None, dm_password=None, mysql_pwd="", delete_flag=False):
if not dm_password:
dm_password = get_dm_password()
if dm_password == "":
@@ -204,8 +218,10 @@ def dr_deploy(role=None, dm_password=None, mysql_pwd='', delete_flag=False):
raise Exception("get dm_pass word failed.")
if not role:
role = get_value("dr_deploy.role")
- cmd = (f"echo -e '{dm_password}' | sh {SCRIPT_PATH}/appctl.sh dr_operate pre_check {role} "
- f"--conf=/opt/cantian/config/deploy_param.json")
+ cmd = (
+ f"echo -e '{dm_password}' | sh {SCRIPT_PATH}/appctl.sh dr_operate pre_check {role} "
+ f"--conf=/opt/cantian/config/deploy_param.json"
+ )
execute_command(cmd, raise_flag=True, timeout=300)
LOG.info("dr_operate pre_check success.")
@@ -213,8 +229,10 @@ def dr_deploy(role=None, dm_password=None, mysql_pwd='', delete_flag=False):
if os.path.exists(dr_process_file):
os.remove(dr_process_file)
- cmd = (f"echo -e '{dm_password}\n{mysql_pwd}' | sh {SCRIPT_PATH}/appctl.sh dr_operate deploy {role} "
- f"--mysql_cmd='mysql' --mysql_user=root")
+ cmd = (
+ f"echo -e '{dm_password}\n{mysql_pwd}' | sh {SCRIPT_PATH}/appctl.sh dr_operate deploy {role} "
+ f"--mysql_cmd='mysql' --mysql_user=root"
+ )
execute_command(cmd, raise_flag=True)
if check_dr_deploy_process_completed(role):
@@ -222,8 +240,10 @@ def dr_deploy(role=None, dm_password=None, mysql_pwd='', delete_flag=False):
if delete_flag:
deploy_user = get_value("deploy_user")
storage_share_fs = get_value("storage_share_fs")
- cmd = (f"su -s /bin/bash - {deploy_user} -c 'dbstor --delete-file "
- f"--fs-name={storage_share_fs} --file-name=onlyStart.file'")
+ cmd = (
+ f"su -s /bin/bash - {deploy_user} -c 'dbstor --delete-file "
+ f"--fs-name={storage_share_fs} --file-name=onlyStart.file'"
+ )
execute_command(cmd, timeout=180)
sys.exit(0)
sys.exit(1)
@@ -238,8 +258,10 @@ def copy_version_yaml(deploy_user, deploy_group):
chown_path(version_file, u_id, g_id)
version_dir = os.path.dirname(version_file)
storage_share_fs = get_value("storage_share_fs")
- cmd = (f'su -s /bin/bash - "{deploy_user}" -c "dbstor --copy-file --import '
- f'--fs-name={storage_share_fs} --source-dir={version_dir} --target-dir=/ --file-name=versions.yml"')
+ cmd = (
+ f'su -s /bin/bash - "{deploy_user}" -c "dbstor --copy-file --import '
+ f'--fs-name={storage_share_fs} --source-dir={version_dir} --target-dir=/ --file-name=versions.yml"'
+ )
execute_command(cmd, timeout=180)
@@ -249,10 +271,12 @@ def start_mysqld(deploy_user=None, deploy_group=None):
if not deploy_group:
deploy_group = get_info("deploy_group")
LOG.info(f"Begin to start mysqld.")
- cmd = (f"su -s /bin/bash - {deploy_user} -c 'python3 "
- f"-B /opt/cantian/image/cantian_connector/CantianKernel/Cantian-DATABASE-CENTOS-64bit/install.py "
- f"-U {deploy_user}:{deploy_group} -l /home/{deploy_user}/logs/install.log "
- f"-M mysqld -m /opt/cantian/image/cantian_connector/cantian-connector-mysql/scripts/my.cnf -g withoutroot'")
+ cmd = (
+ f"su -s /bin/bash - {deploy_user} -c 'python3 "
+ f"-B /opt/cantian/image/cantian_connector/CantianKernel/Cantian-DATABASE-CENTOS-64bit/install.py "
+ f"-U {deploy_user}:{deploy_group} -l /home/{deploy_user}/logs/install.log "
+ f"-M mysqld -m /opt/cantian/image/cantian_connector/cantian-connector-mysql/scripts/my.cnf -g withoutroot'"
+ )
execute_command(cmd, raise_flag=True, timeout=300)
LOG.info(f"start mysqld success.")
@@ -266,12 +290,16 @@ def dr_start_deploy():
count = 0
if deploy_mode == "dbstor":
storage_fs = get_value("storage_share_fs")
- cmd = (f"su -s /bin/bash - {deploy_user} -c 'dbstor --query-file "
- f"--fs-name={storage_fs} --file-dir=/' | grep 'dr_deploy_param.json' | wc -l")
+ cmd = (
+ f"su -s /bin/bash - {deploy_user} -c 'dbstor --query-file "
+ f"--fs-name={storage_fs} --file-dir=/' | grep 'dr_deploy_param.json' | wc -l"
+ )
code, count, err = execute_command(cmd, timeout=180)
else:
storage_fs = get_value("storage_metadata_fs")
- if os.path.exists(f"/mnt/dbdata/remote/metadata_{storage_fs}/dr_deploy_param.json"):
+ if os.path.exists(
+ f"/mnt/dbdata/remote/metadata_{storage_fs}/dr_deploy_param.json"
+ ):
count = 1
if not count.isdigit():
LOG.error("get file count failed.")
@@ -284,28 +312,42 @@ def dr_start_deploy():
g_id = deploy_group_info.gr_gid
os.chown(OPT_CONFIG_PATH, u_id, g_id)
if deploy_mode == "dbstor":
- cmd = (f"su -s /bin/bash - {deploy_user} -c 'dbstor --copy-file --export --fs-name={storage_fs} "
- f"--source-dir=/ --target-dir={OPT_CONFIG_PATH} --file-name=dr_deploy_param.json'")
+ cmd = (
+ f"su -s /bin/bash - {deploy_user} -c 'dbstor --copy-file --export --fs-name={storage_fs} "
+ f"--source-dir=/ --target-dir={OPT_CONFIG_PATH} --file-name=dr_deploy_param.json'"
+ )
execute_command(cmd, timeout=180)
else:
- copy_file(f"/mnt/dbdata/remote/metadata_{storage_fs}/dr_deploy_param.json",
- f"{OPT_CONFIG_PATH}/dr_deploy_param.json")
- copy_file(f"{OPT_CONFIG_PATH}/dr_deploy_param.json", f"{CONFIG_PATH}/dr_deploy_param.json")
+ copy_file(
+ f"/mnt/dbdata/remote/metadata_{storage_fs}/dr_deploy_param.json",
+ f"{OPT_CONFIG_PATH}/dr_deploy_param.json",
+ )
+ copy_file(
+ f"{OPT_CONFIG_PATH}/dr_deploy_param.json",
+ f"{CONFIG_PATH}/dr_deploy_param.json",
+ )
if get_dr_status(dm_password) != "Normal":
- msg = ("DR status is Abnormal. If you need, "
- "please enter the container and manually execute the Dr_deploy process.")
+ msg = (
+ "DR status is Abnormal. If you need, "
+ "please enter the container and manually execute the Dr_deploy process."
+ )
LOG.error(msg)
cmd = f"sh {SCRIPT_PATH}/appctl.sh start"
execute_command(cmd, raise_flag=True, timeout=3600)
- if get_value("cantian_in_container") == "1" and get_value("M_RUNING_MODE") != "cantiand_with_mysql_in_cluster":
+ if (
+ get_value("cantian_in_container") == "1"
+ and get_value("M_RUNING_MODE") != "cantiand_with_mysql_in_cluster"
+ ):
start_mysqld(deploy_user, deploy_group)
else:
if role == "active":
cmd = f"sh {SCRIPT_PATH}/appctl.sh start"
execute_command(cmd, raise_flag=True, timeout=3600)
- if (get_value("cantian_in_container") == "1" and
- get_value("M_RUNING_MODE") != "cantiand_with_mysql_in_cluster"):
+ if (
+ get_value("cantian_in_container") == "1"
+ and get_value("M_RUNING_MODE") != "cantiand_with_mysql_in_cluster"
+ ):
start_mysqld(deploy_user, deploy_group)
copy_version_yaml(deploy_user, deploy_group)
LOG.info("dr_setup is True, executing dr_deploy tasks.")
@@ -315,17 +357,19 @@ def dr_start_deploy():
def main():
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
if "/opt/cantian/dbstor/lib" not in split_env:
- LOG.error(f"cantian-dbstor-lib not found, current envpath[{os.environ['LD_LIBRARY_PATH']}]")
+ LOG.error(
+ f"cantian-dbstor-lib not found, current envpath[{os.environ['LD_LIBRARY_PATH']}]"
+ )
raise Exception("cantian-dbstor-lib not found")
action_dict = {
"get_dm_password": get_dm_password,
"get_dr_status": get_dr_status,
- "start": dr_start_deploy
+ "start": dr_start_deploy,
}
- mysql_pwd = ''
+ mysql_pwd = ""
delete_config = False
if len(sys.argv) == 1:
mysql_pwd = getpass.getpass("Please input mysql login password:")
@@ -341,6 +385,7 @@ if __name__ == "__main__":
try:
main()
except Exception as e:
- LOG.error(f"execute failed, err[{str(e)}], traceback: [{traceback.format_exc(limit=-1)}]")
+ LOG.error(
+ f"execute failed, err[{str(e)}], traceback: [{traceback.format_exc(limit=-1)}]"
+ )
sys.exit(1)
-
diff --git a/pkg/deploy/action/docker/get_config_info.py b/pkg/deploy/action/docker/get_config_info.py
index 9a4b278a3075467826f4b71769a7669efba05289..78c02eb4dc3205874e87a352f68dee87acb12ed8 100644
--- a/pkg/deploy/action/docker/get_config_info.py
+++ b/pkg/deploy/action/docker/get_config_info.py
@@ -35,7 +35,7 @@ def get_value(param):
if param == "M_RUNING_MODE":
return install_config.get("M_RUNING_MODE")
- keys = param.split('.')
+ keys = param.split(".")
value = info
try:
for key in keys:
diff --git a/pkg/deploy/action/docker/pod_record.py b/pkg/deploy/action/docker/pod_record.py
index 1801b4243f9ca58bfa12454ac5094d5679a4c689..717c474c54967c0e1755d93bd71271cce857b913 100644
--- a/pkg/deploy/action/docker/pod_record.py
+++ b/pkg/deploy/action/docker/pod_record.py
@@ -4,7 +4,7 @@ import sys
from datetime import datetime
from docker_common.file_utils import open_and_lock_csv, write_and_unlock_csv
-sys.path.append('/ctdb/cantian_install/cantian_connector/action')
+sys.path.append("/ctdb/cantian_install/cantian_connector/action")
from delete_unready_pod import KubernetesService, get_pod_name_from_info
from om_log import LOGGER as LOG
@@ -17,27 +17,36 @@ RESTART_THRESHOLD = 6
def update_pod_restart_record(k8s_service, pod_name_full, pod_namespace):
pod_record, file_handle = open_and_lock_csv(POD_RECORD_FILE_PATH)
- record_dict = {rows[0]: {'restart_count': int(rows[1]), 'last_restart_time': rows[2]} for rows in pod_record}
+ record_dict = {
+ rows[0]: {"restart_count": int(rows[1]), "last_restart_time": rows[2]}
+ for rows in pod_record
+ }
if pod_name_full not in record_dict:
record_dict[pod_name_full] = {
"restart_count": 1,
- "last_restart_time": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ "last_restart_time": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"),
}
else:
record_dict[pod_name_full]["restart_count"] += 1
- record_dict[pod_name_full]["last_restart_time"] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ record_dict[pod_name_full]["last_restart_time"] = datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%SZ"
+ )
restart_count = record_dict[pod_name_full]["restart_count"]
if restart_count > RESTART_THRESHOLD:
- LOG.info(f"Pod {pod_name_full} has restarted {restart_count} times, Deleting...")
+ LOG.info(
+ f"Pod {pod_name_full} has restarted {restart_count} times, Deleting..."
+ )
k8s_service.delete_pod(pod_name_full, pod_namespace)
else:
LOG.info("Cantian pod start record updated successfully.")
- rows_to_write = [[pod_name, data['restart_count'], data['last_restart_time']]
- for pod_name, data in record_dict.items()]
+ rows_to_write = [
+ [pod_name, data["restart_count"], data["last_restart_time"]]
+ for pod_name, data in record_dict.items()
+ ]
write_and_unlock_csv(rows_to_write, file_handle)
@@ -76,4 +85,4 @@ if __name__ == "__main__":
try:
main()
except Exception as err:
- LOG.error(f"Error in pod_record.py: {err}")
\ No newline at end of file
+ LOG.error(f"Error in pod_record.py: {err}")
diff --git a/pkg/deploy/action/docker/resolve_pwd.py b/pkg/deploy/action/docker/resolve_pwd.py
index 0ad6b79e0e717fcab27a2abbb9a0153df61fc4d0..c1dc96883de7ce0226492aa205b7e72374cac354 100644
--- a/pkg/deploy/action/docker/resolve_pwd.py
+++ b/pkg/deploy/action/docker/resolve_pwd.py
@@ -18,8 +18,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
pobj.stdin.write(cmd.encode())
pobj.stdin.write(os.linesep.encode())
for value in values:
@@ -48,10 +53,16 @@ def resolve_kmc_pwd(encrypt_pwd):
try:
passwd = kmc_adapter.decrypt(encrypt_pwd)
except Exception as error:
- raise Exception("Failed to decrypt password of user [sys]. Error: %s" % str(error)) from error
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ raise Exception(
+ "Failed to decrypt password of user [sys]. Error: %s" % str(error)
+ ) from error
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
return passwd
@@ -72,7 +83,9 @@ def kmc_to_ctencrypt_pwd(encrypt_pwd):
stderr = str(stderr)
stderr.replace(passwd, "****")
if ret_code:
- raise Exception("failed to get _SYS_PASSWORD by ctencrypt. output:%s" % str(stderr))
+ raise Exception(
+ "failed to get _SYS_PASSWORD by ctencrypt. output:%s" % str(stderr)
+ )
return stdout
@@ -82,7 +95,7 @@ def run_upgrade_modify_sys_tables_ctsql(encrypt_pwd):
port = input()
passwd = resolve_kmc_pwd(encrypt_pwd)
try:
- with open(sql_file_path, 'r') as sql_file:
+ with open(sql_file_path, "r") as sql_file:
sql = ""
line = sql_file.readline()
while line:
@@ -110,6 +123,6 @@ if __name__ == "__main__":
"resolve_check_cert_pwd": resolve_check_cert_pwd,
"kmc_to_ctencrypt_pwd": kmc_to_ctencrypt_pwd,
"run_upgrade_modify_sys_tables_ctsql": run_upgrade_modify_sys_tables_ctsql,
- "resolve_kmc_pwd": resolve_kmc_pwd
+ "resolve_kmc_pwd": resolve_kmc_pwd,
}
- print(options.get(action)(encrypt_pwd.strip()))
\ No newline at end of file
+ print(options.get(action)(encrypt_pwd.strip()))
diff --git a/pkg/deploy/action/docker/update_config.py b/pkg/deploy/action/docker/update_config.py
index a6c4e5867c32960ddb602462fdfc2025be7fbd71..6465f46b957c4191a74701825ad4916e13e36266 100644
--- a/pkg/deploy/action/docker/update_config.py
+++ b/pkg/deploy/action/docker/update_config.py
@@ -2,6 +2,7 @@ import os
import json
import re
import sys
+
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, "../"))
from cantian.get_config_info import get_value
@@ -10,7 +11,9 @@ from om_log import LOGGER as LOG
CONFIG_PATH = os.path.join(CUR_PATH, "../../config")
INIT_CONFIG_PATH = os.path.join(CONFIG_PATH, "container_conf/init_conf")
-MY_CNF_FILE = "/opt/cantian/image/cantian_connector/cantian-connector-mysql/scripts/my.cnf"
+MY_CNF_FILE = (
+ "/opt/cantian/image/cantian_connector/cantian-connector-mysql/scripts/my.cnf"
+)
MYSQL_CONFIG_FILE = os.path.join(INIT_CONFIG_PATH, "mysql_config.json")
MYSQL_PARAMS = ["max_connections", "table_open_cache", "table_open_cache_instances"]
@@ -115,5 +118,7 @@ if __name__ == "__main__":
LOG.error(f"An error occurred while executing '{action}': {e}")
sys.exit(1)
else:
- LOG.error(f"Invalid action '{action}'. Available options: {', '.join(func_dict.keys())}")
- sys.exit(1)
\ No newline at end of file
+ LOG.error(
+ f"Invalid action '{action}'. Available options: {', '.join(func_dict.keys())}"
+ )
+ sys.exit(1)
diff --git a/pkg/deploy/action/docker/update_policy_params.py b/pkg/deploy/action/docker/update_policy_params.py
index 0865e0a6b4199e059768c4197391d9f1809d923f..7d0dba27a4d9b438f10b21467b062ac4762cb6c1 100644
--- a/pkg/deploy/action/docker/update_policy_params.py
+++ b/pkg/deploy/action/docker/update_policy_params.py
@@ -2,20 +2,22 @@ import os
import json
import stat
import sys
+
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, ".."))
from om_log import LOGGER as LOG
+
dir_name, _ = os.path.split(os.path.abspath(__file__))
def parse_policy_config_file():
policy_path = os.path.join(dir_name, "../deploy_policy_config.json")
try:
- with open(policy_path, 'r', encoding='utf8') as file_path:
+ with open(policy_path, "r", encoding="utf8") as file_path:
json_data = json.load(file_path)
return json_data
except Exception as error:
- LOG.error('load %s error, error: %s', policy_path, str(error))
+ LOG.error("load %s error, error: %s", policy_path, str(error))
return False
@@ -23,11 +25,11 @@ def parse_cantian_config_file():
cantian_config_dir = os.path.join(dir_name, "../../config/container_conf/init_conf")
cantian_config_path = os.path.join(cantian_config_dir, "deploy_param.json")
try:
- with open(cantian_config_path, 'r', encoding='utf8') as file_path:
+ with open(cantian_config_path, "r", encoding="utf8") as file_path:
json_data = json.load(file_path)
return json_data
except Exception as error:
- LOG.error('load %s error, error: %s', cantian_config_path, str(error))
+ LOG.error("load %s error, error: %s", cantian_config_path, str(error))
return False
@@ -36,7 +38,7 @@ def write_back_to_json(cantian_config_json):
cantian_config_path = os.path.join(cantian_config_dir, "deploy_param.json")
modes = stat.S_IRWXU | stat.S_IROTH | stat.S_IRGRP
flag = os.O_RDWR | os.O_CREAT | os.O_TRUNC
- with os.fdopen(os.open(cantian_config_path, flag, modes), 'w') as file_path:
+ with os.fdopen(os.open(cantian_config_path, flag, modes), "w") as file_path:
config_params = json.dumps(cantian_config_json, indent=4)
file_path.write(config_params)
@@ -57,7 +59,7 @@ def main():
source_config_json["deploy_policy"] = "default"
write_back_to_json(source_config_json)
return
- LOG.info("deploy policy is %s" % deploy_policy_key)
+ LOG.info("deploy policy is %s" % deploy_policy_key)
# 如果配置方案未配置则返回失败,安装结束
deploy_policy_value = source_deploy_policy_json.get(deploy_policy_key, {})
@@ -72,8 +74,8 @@ def main():
write_back_to_json(source_config_json)
-if __name__ == '__main__':
+if __name__ == "__main__":
try:
main()
except Exception as e:
- exit(str(e))
\ No newline at end of file
+ exit(str(e))
diff --git a/pkg/deploy/action/docker/upgrade_version_check.py b/pkg/deploy/action/docker/upgrade_version_check.py
index b808a2efbdbd3ac288b9b043b7b41d35f59c3f1a..d25c0c9be971212aed0329e5af5922b10f6a6ce8 100644
--- a/pkg/deploy/action/docker/upgrade_version_check.py
+++ b/pkg/deploy/action/docker/upgrade_version_check.py
@@ -3,25 +3,34 @@ import os
import re
from pathlib import Path
from get_config_info import get_value
+
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, ".."))
from om_log import LOGGER as LOG
METADATA_FS = get_value("storage_metadata_fs")
-VERSION_PREFIX = 'Version:'
-SUB_VERSION_PREFIX = ('B', 'SP')
+VERSION_PREFIX = "Version:"
+SUB_VERSION_PREFIX = ("B", "SP")
class UpgradeVersionCheck:
def __init__(self):
self.white_list_file = str(Path(os.path.join(CUR_PATH, "white_list.txt")))
- self.source_version_file = str(Path(os.path.join("/mnt/dbdata/remote/metadata_" + METADATA_FS, "versions.yml")))
+ self.source_version_file = str(
+ Path(
+ os.path.join(
+ "/mnt/dbdata/remote/metadata_" + METADATA_FS, "versions.yml"
+ )
+ )
+ )
self.white_list_dict = {}
- self.source_version = ''
- self.err = {'read_failed': 'white list or source version read failed',
- 'match_failed': 'source version not in white list',
- 'updata_system_version_failed': 'update system version failed'}
+ self.source_version = ""
+ self.err = {
+ "read_failed": "white list or source version read failed",
+ "match_failed": "source version not in white list",
+ "updata_system_version_failed": "update system version failed",
+ }
@staticmethod
def update_system_version():
@@ -31,10 +40,10 @@ class UpgradeVersionCheck:
@staticmethod
def execption_handler(err_msg):
LOG.error(err_msg)
- return 'False {}'.format(err_msg)
-
+ return "False {}".format(err_msg)
+
def process_white_list(self):
- with open(self.white_list_file, 'r', encoding='utf-8') as file:
+ with open(self.white_list_file, "r", encoding="utf-8") as file:
white_list_info = file.readlines()
for white_list_detail in white_list_info[1:]:
@@ -44,8 +53,8 @@ class UpgradeVersionCheck:
self.white_list_dict[details[0]] = [details[1], details[2]]
def read_source_version_info(self):
- version = ''
- with open(self.source_version_file, 'r', encoding='utf-8') as file:
+ version = ""
+ with open(self.source_version_file, "r", encoding="utf-8") as file:
source_version_info = file.readlines()
for line in source_version_info:
@@ -56,21 +65,22 @@ class UpgradeVersionCheck:
def source_version_check(self):
for white_list_version, white_list_detail in self.white_list_dict.items():
- *white_main_version, white_sub_version = white_list_version.split('.')
- *source_main_version, source_sub_version = self.source_version.split('.')
+ *white_main_version, white_sub_version = white_list_version.split(".")
+ *source_main_version, source_sub_version = self.source_version.split(".")
if source_main_version != white_main_version:
continue
- if white_sub_version == '*' or white_sub_version == source_sub_version:
+ if white_sub_version == "*" or white_sub_version == source_sub_version:
if "rollup" in white_list_detail[0]:
- return 'True {} rollup'.format(white_list_detail[1])
- return 'True {} offline'.format(white_list_detail[1])
+ return "True {} rollup".format(white_list_detail[1])
+ return "True {} offline".format(white_list_detail[1])
err_msg = "source version '{}' not in white list.".format(self.source_version)
return self.execption_handler(err_msg)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
version_check = UpgradeVersionCheck()
version_check.read_source_version_info()
version_check.process_white_list()
- print(version_check.source_version_check())
\ No newline at end of file
+ print(version_check.source_version_check())
diff --git a/pkg/deploy/action/dss/config.py b/pkg/deploy/action/dss/config.py
index f55b931bab349779b1d0da4db5d7bb7c5bc97180..899d285f8c86c83b78d33604c849a67540b42c1e 100644
--- a/pkg/deploy/action/dss/config.py
+++ b/pkg/deploy/action/dss/config.py
@@ -1,8 +1,4 @@
-VG_CONFIG = {
- "vg1": "/dev/dss-disk1",
- "vg2": "/dev/dss-disk2",
- "vg3": "/dev/dss-disk3"
-}
+VG_CONFIG = {"vg1": "/dev/dss-disk1", "vg2": "/dev/dss-disk2", "vg3": "/dev/dss-disk3"}
INST_CONFIG = {
"INST_ID": 0,
@@ -12,5 +8,5 @@ INST_CONFIG = {
"LSNR_PATH": "",
"_LOG_BACKUP_FILE_COUNT": "40",
"_LOG_MAX_FILE_SIZE": "120M",
- "DSS_CM_SO_NAME": "libdsslock.so"
-}
\ No newline at end of file
+ "DSS_CM_SO_NAME": "libdsslock.so",
+}
diff --git a/pkg/deploy/action/dss/dssctl.py b/pkg/deploy/action/dss/dssctl.py
index 9b52af8b5af77eae6ad36d34df4c32fedb335798..f064bae34569556e0be9b5ceaf6bdd1a2320c5a9 100644
--- a/pkg/deploy/action/dss/dssctl.py
+++ b/pkg/deploy/action/dss/dssctl.py
@@ -45,9 +45,7 @@ def setup():
log = logging.getLogger("dss")
for handler in list(log.handlers):
log.removeHandler(handler)
- file_log = handlers.RotatingFileHandler(
- LOG_FILE, maxBytes=6291456,
- backupCount=5)
+ file_log = handlers.RotatingFileHandler(LOG_FILE, maxBytes=6291456, backupCount=5)
log.addHandler(file_log)
log.addHandler(console)
@@ -55,8 +53,10 @@ def setup():
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s]"
- " [tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S"))
+ " [tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ )
+ )
log.setLevel(logging.INFO)
return log
@@ -72,13 +72,13 @@ class ComOpt:
content.append("{}{}{}".format(key, split, contents[key]))
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.write("\n".join(content))
os.chmod(file_path, 0o640)
@staticmethod
def read_ini(file_path: str) -> str:
- with open(file_path, 'r', encoding="utf-8") as file:
+ with open(file_path, "r", encoding="utf-8") as file:
return file.read()
@@ -105,14 +105,15 @@ class DssCtl(object):
:param action: add/delete
:return:
"""
- home_directory = os.path.expanduser('~')
- bashrc_path = os.path.join(home_directory, '.bashrc')
- with open(bashrc_path, 'r') as bashrc_file:
+ home_directory = os.path.expanduser("~")
+ bashrc_path = os.path.join(home_directory, ".bashrc")
+ with open(bashrc_path, "r") as bashrc_file:
bashrc_content = bashrc_file.readlines()
env = [
"export DSS_HOME=%s\n" % DSS_HOME,
- "export LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH\n" % os.path.join(DSS_HOME, "lib"),
- "export PATH=%s:$PATH\n" % os.path.join(DSS_HOME, "bin")
+ "export LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH\n"
+ % os.path.join(DSS_HOME, "lib"),
+ "export PATH=%s:$PATH\n" % os.path.join(DSS_HOME, "bin"),
]
for line in env:
if action == "add":
@@ -124,7 +125,9 @@ class DssCtl(object):
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(bashrc_path, flags, modes), 'w', encoding='utf-8') as bashrc_file:
+ with os.fdopen(
+ os.open(bashrc_path, flags, modes), "w", encoding="utf-8"
+ ) as bashrc_file:
bashrc_file.writelines(bashrc_content)
@staticmethod
@@ -136,12 +139,15 @@ class DssCtl(object):
LOG.info(err_msg)
if stdout:
LOG.info("dss server pid is[%s].", stdout)
- for line in re.split(r'\n\s', stdout):
+ for line in re.split(r"\n\s", stdout):
kill_cmd = "kill -9 %s" % line.strip()
return_code, stdout, stderr = exec_popen(kill_cmd, timeout=TIMEOUT)
if return_code:
output = stdout + stderr
- err_msg = "Exec kill cmd[%s] failed, details: %s" % (cmd, str(output))
+ err_msg = "Exec kill cmd[%s] failed, details: %s" % (
+ cmd,
+ str(output),
+ )
LOG.error(err_msg)
def wait_dss_instance_started(self):
@@ -151,16 +157,26 @@ class DssCtl(object):
time.sleep(5)
timeout = timeout - 5
if os.path.exists(self.log_file):
- with open(self.log_file, 'r', errors='ignore') as f:
+ with open(self.log_file, "r", errors="ignore") as f:
all_the_text = f.read()
- succ_pattern = re.compile(r'.*(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?DSS SERVER STARTED.*?',
- re.IGNORECASE)
- fail_pattern = re.compile(r'.*(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?dss failed to startup.*?',
- re.IGNORECASE)
+ succ_pattern = re.compile(
+ r".*(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?DSS SERVER STARTED.*?",
+ re.IGNORECASE,
+ )
+ fail_pattern = re.compile(
+ r".*(\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}\:\d{2}).*?dss failed to startup.*?",
+ re.IGNORECASE,
+ )
succ_timestamps = re.findall(succ_pattern, all_the_text)
fail_timestamps = re.findall(fail_pattern, all_the_text)
- is_instance_started = len(succ_timestamps) != 0 and max(succ_timestamps) >= self.begin_time
- is_instance_failed = len(fail_timestamps) != 0 and max(fail_timestamps) >= self.begin_time
+ is_instance_started = (
+ len(succ_timestamps) != 0
+ and max(succ_timestamps) >= self.begin_time
+ )
+ is_instance_failed = (
+ len(fail_timestamps) != 0
+ and max(fail_timestamps) >= self.begin_time
+ )
if is_instance_started:
LOG.info("DSS server started successfully.")
return
@@ -182,10 +198,15 @@ class DssCtl(object):
LOG.info("Start to exec dsscmd cv.")
dsscmd = "source ~/.bashrc && dsscmd cv -g %s -v %s"
for key, value in VG_CONFIG.items():
- return_code, stdout, stderr = exec_popen(dsscmd % (key, value), timeout=TIMEOUT)
+ return_code, stdout, stderr = exec_popen(
+ dsscmd % (key, value), timeout=TIMEOUT
+ )
if return_code:
output = stdout + stderr
- err_msg = "Dsscmd cv cmd[%s] exec failed, details: %s" % (dsscmd % (key, value), str(output))
+ err_msg = "Dsscmd cv cmd[%s] exec failed, details: %s" % (
+ dsscmd % (key, value),
+ str(output),
+ )
raise Exception(err_msg)
LOG.info("Success to exec dsscmd cv.")
else:
@@ -200,10 +221,15 @@ class DssCtl(object):
LOG.info("start to init lun.")
init_cmd = "dd if=/dev/zero of=%s bs=1M count=1 conv=notrunc"
for key, value in VG_CONFIG.items():
- return_code, stdout, stderr = exec_popen(init_cmd % value, timeout=TIMEOUT)
+ return_code, stdout, stderr = exec_popen(
+ init_cmd % value, timeout=TIMEOUT
+ )
if return_code:
output = stdout + stderr
- err_msg = "Init lun cmd[%s] exec failed, details: %s" % (init_cmd % value, str(output))
+ err_msg = "Init lun cmd[%s] exec failed, details: %s" % (
+ init_cmd % value,
+ str(output),
+ )
raise Exception(err_msg)
LOG.info("Init lun cmd[] exec success.", init_cmd)
LOG.info("Success to init lun.")
@@ -221,7 +247,9 @@ class DssCtl(object):
os.makedirs(DSS_CFG, exist_ok=True)
ComOpt.write_ini(self.dss_vg_cfg, VG_CONFIG, split=":")
INST_CONFIG["INST_ID"] = self.node_id
- INST_CONFIG["DSS_NODES_LIST"] = "0:{}:1811,1:{}:1811".format(self.cms_ip.split(";")[0], self.cms_ip.split(";")[1])
+ INST_CONFIG["DSS_NODES_LIST"] = "0:{}:1811,1:{}:1811".format(
+ self.cms_ip.split(";")[0], self.cms_ip.split(";")[1]
+ )
INST_CONFIG["LSNR_PATH"] = DSS_HOME
INST_CONFIG["LOG_HOME"] = DSS_LOG
ComOpt.write_ini(self.dss_inst_cfg, INST_CONFIG)
@@ -251,8 +279,10 @@ class DssCtl(object):
os.chmod(dss_contrl_path, 0o700)
if self.node_id == "0":
LOG.info("Start to add dss res.")
- cmd = ("source ~/.bashrc && %s/bin/cms res -add dss -type dss -attr \"script=%s\""
- % (CMS_HOME, DSS_CTRL_SCRIPTS))
+ cmd = (
+ 'source ~/.bashrc && %s/bin/cms res -add dss -type dss -attr "script=%s"'
+ % (CMS_HOME, DSS_CTRL_SCRIPTS)
+ )
return_code, stdout, stderr = exec_popen(cmd, timeout=TIMEOUT)
if return_code:
output = stdout + stderr
@@ -269,7 +299,7 @@ class DssCtl(object):
LOG.info("Start to config perctrl permission.")
cap_mode = f"{CAP_ADM},{CAP_WIO}"
path = f"{DSS_HOME}/bin/perctrl"
- cmd = f'sudo setcap {cap_mode}+ep {path}'
+ cmd = f"sudo setcap {cap_mode}+ep {path}"
return_code, stdout, stderr = exec_popen(cmd, timeout=TIMEOUT)
if return_code:
output = stdout + stderr
@@ -282,7 +312,11 @@ class DssCtl(object):
check current node is reg.
:return:
"""
- kick_cmd = "source ~/.bashrc && %s/bin/dsscmd inq_reg -i %s -D %s" % (DSS_HOME, self.node_id, DSS_HOME)
+ kick_cmd = "source ~/.bashrc && %s/bin/dsscmd inq_reg -i %s -D %s" % (
+ DSS_HOME,
+ self.node_id,
+ DSS_HOME,
+ )
return_code, stdout, stderr = exec_popen(kick_cmd, timeout=TIMEOUT)
if return_code:
output = stdout + stderr
@@ -296,7 +330,10 @@ class DssCtl(object):
:return:
"""
LOG.info("Start to kick node.")
- kick_cmd = "source ~/.bashrc && %s/bin/dsscmd unreghl -D %s" % (DSS_HOME, DSS_HOME)
+ kick_cmd = "source ~/.bashrc && %s/bin/dsscmd unreghl -D %s" % (
+ DSS_HOME,
+ DSS_HOME,
+ )
return_code, stdout, stderr = exec_popen(kick_cmd, timeout=TIMEOUT)
if return_code:
output = stdout + stderr
@@ -360,14 +397,14 @@ class DssCtl(object):
self.prepare_source()
self.cms_add_dss_res()
self.config_perctrl_permission()
-
+
with open(INSTALL_FILE, encoding="utf-8") as f:
_tmp = f.read()
info = json.loads(_tmp)
dss_install_type = info.get("install_type", "")
-
+
LOG.info("dss_install_type is %s", dss_install_type)
-
+
if dss_install_type != "reserve":
self.prepare_dss_dick()
self.reghl_dss_disk()
@@ -377,10 +414,14 @@ class DssCtl(object):
LOG.info("Start backup.")
if not os.path.exists(BACKUP_NAME):
os.makedirs(BACKUP_NAME, exist_ok=True)
- shutil.copytree(DSS_CFG, BACKUP_NAME,
- symlinks=False, ignore=None,
- copy_function=shutil.copy2,
- ignore_dangling_symlinks=False)
+ shutil.copytree(
+ DSS_CFG,
+ BACKUP_NAME,
+ symlinks=False,
+ ignore=None,
+ copy_function=shutil.copy2,
+ ignore_dangling_symlinks=False,
+ )
if not os.path.exists(os.path.join(BACKUP_NAME, "scripts")):
os.makedirs(os.path.join(BACKUP_NAME, "scripts"))
shutil.copytree(SCRIPTS_DIR, os.path.join(BACKUP_NAME, "scripts"))
@@ -412,7 +453,9 @@ class DssCtl(object):
self.reghl_dss_disk()
self.begin_time = str(datetime.datetime.now()).split(".")[0]
dssserver_cmd = "source ~/.bashrc && nohup dssserver -D %s &" % DSS_HOME
- subprocess.Popen(dssserver_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ subprocess.Popen(
+ dssserver_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
self.wait_dss_instance_started()
if self.check_status():
LOG.info("Success to start dss server.")
@@ -451,9 +494,20 @@ class DssCtl(object):
def main():
parse = argparse.ArgumentParser()
- parse.add_argument("--action", type=str,
- choices=["install", "uninstall", "start", "stop", "pre_install",
- "upgrade", "rollback", "pre_upgrade"])
+ parse.add_argument(
+ "--action",
+ type=str,
+ choices=[
+ "install",
+ "uninstall",
+ "start",
+ "stop",
+ "pre_install",
+ "upgrade",
+ "rollback",
+ "pre_upgrade",
+ ],
+ )
parse.add_argument("--mode", required=False, dest="mode", default="")
arg = parse.parse_args()
act = arg.action
@@ -467,4 +521,4 @@ if __name__ == "__main__":
except Exception as err:
LOG.error(str(err))
exit(str(err))
- exit(0)
\ No newline at end of file
+ exit(0)
diff --git a/pkg/deploy/action/fetch_cls_stat.py b/pkg/deploy/action/fetch_cls_stat.py
index ed289ad08bbfb4ccdd834358a03e00b08c8ce161..fc4ed2900b54e5e134ed217be3151135e3f81e86 100644
--- a/pkg/deploy/action/fetch_cls_stat.py
+++ b/pkg/deploy/action/fetch_cls_stat.py
@@ -21,8 +21,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
py_version = platform.python_version()
if py_version[0] == "3":
@@ -53,17 +58,17 @@ def _exec_popen(cmd, values=None):
def get_user():
config_path = Path(CONFIG_PATH)
config_list = json.loads(config_path.read_text())
- return config_list["deploy_user"].split(':')[0]
+ return config_list["deploy_user"].split(":")[0]
def parse_node_stat(node_stat):
- keys = ['NODE_ID', 'NAME', 'STAT', 'PRE_STAT']
+ keys = ["NODE_ID", "NAME", "STAT", "PRE_STAT"]
values = node_stat.split()
stat_json = {}
- for (idx, key) in enumerate(keys):
+ for idx, key in enumerate(keys):
stat_json[key] = values[idx]
online = False
- if stat_json.get('STAT') == 'ONLINE':
+ if stat_json.get("STAT") == "ONLINE":
online = True
return online, stat_json
@@ -72,7 +77,7 @@ def fetch_cms_stat():
user = CANTIAN_USER
cmd = 'su - %s -s /bin/bash -c "source ~/.bashrc && cms stat" | tail -n +2' % user
_, output, _ = _exec_popen(cmd)
- output = output.split('\n')
+ output = output.split("\n")
cms_stat_json = {}
if len(output) <= 1:
return (False, cms_stat_json)
@@ -83,19 +88,19 @@ def fetch_cms_stat():
detail_json.append(stat_json)
if online:
online_cnt += 1
- cms_stat_json['DETAIL'] = detail_json
+ cms_stat_json["DETAIL"] = detail_json
if online_cnt == 0:
- cms_stat_json['STATUS'] = 'OFFLINE'
+ cms_stat_json["STATUS"] = "OFFLINE"
elif online_cnt == len(output):
- cms_stat_json['STATUS'] = 'ONLINE'
+ cms_stat_json["STATUS"] = "ONLINE"
else:
- cms_stat_json['STATUS'] = 'PARTIALLY_ONLINE'
+ cms_stat_json["STATUS"] = "PARTIALLY_ONLINE"
return (True, cms_stat_json)
def gen_fault_result():
result_json = {}
- result_json['RESULT'] = -1
+ result_json["RESULT"] = -1
return json.dumps(result_json)
@@ -104,10 +109,10 @@ def fetch_cls_stat():
if not success:
return gen_fault_result()
status_json = {}
- status_json['CMS_STAT'] = cms_stat_json
- status_json['RESULT'] = 0
+ status_json["CMS_STAT"] = cms_stat_json
+ status_json["RESULT"] = 0
return json.dumps(status_json)
-if __name__ == '__main__':
+if __name__ == "__main__":
print(fetch_cls_stat())
diff --git a/pkg/deploy/action/find_next_page.py b/pkg/deploy/action/find_next_page.py
index baa6373078d516619cb3e012a6e988125063b2f4..bc3851364d7e3d79b8b0122b8f0cd63f35f64a1b 100644
--- a/pkg/deploy/action/find_next_page.py
+++ b/pkg/deploy/action/find_next_page.py
@@ -3,7 +3,6 @@ import sys
class Finder:
-
"""
information of page 4
page head info {
@@ -59,15 +58,15 @@ class Finder:
def get_next(line_string):
next_page = line_string.split(" ")[2]
return next_page
-
+
def get_line_array(self):
- with open(self.file_name, 'r') as f:
+ with open(self.file_name, "r") as f:
self.line_array = f.readlines()
-
+
def get_hard_damage_and_checksum(self):
- if(self.get_hard_damage(self.line_array[self.line_num]) == "0"):
+ if self.get_hard_damage(self.line_array[self.line_num]) == "0":
self.line_num += 2
- if (self.get_checksum(self.line_array[self.line_num]) == "success"):
+ if self.get_checksum(self.line_array[self.line_num]) == "success":
return 1
return 0
@@ -82,21 +81,28 @@ class Finder:
tmp_next = "0-0"
tmp_page_type_corr_and_not_damage = 0
while self.line_num < len(self.line_array):
- if (self.line_array[self.line_num].startswith(self.page_id_start)):
+ if self.line_array[self.line_num].startswith(self.page_id_start):
tmp_page_id = self.get_page_id(self.line_array[self.line_num])
- if (self.get_type(self.line_array[self.line_num]) == self.page_pcr_heap and
- self.get_hard_damage_and_checksum() == 1):
+ if (
+ self.get_type(self.line_array[self.line_num]) == self.page_pcr_heap
+ and self.get_hard_damage_and_checksum() == 1
+ ):
tmp_page_type_corr_and_not_damage = 1
-
- if (tmp_page_type_corr_and_not_damage == 1 and
- self.line_array[self.line_num].startswith(self.page_next_start)):
+
+ if tmp_page_type_corr_and_not_damage == 1 and self.line_array[
+ self.line_num
+ ].startswith(self.page_next_start):
tmp_next = self.get_next(self.line_array[self.line_num])
- if (tmp_next == self.target_page_id):
+ if tmp_next == self.target_page_id:
self.prev_page_id = tmp_page_id
- print_message("find the prev corr page: " + str(self.prev_page_id) + ", whose next is " +
- str(self.target_page_id))
+ print_message(
+ "find the prev corr page: "
+ + str(self.prev_page_id)
+ + ", whose next is "
+ + str(self.target_page_id)
+ )
return
- if (self.line_array[self.line_num].startswith(self.page_start)):
+ if self.line_array[self.line_num].startswith(self.page_start):
return
self.line_num += 1
return
@@ -110,20 +116,33 @@ class Finder:
tmp_page_id = "0-0"
tmp_find_next = 0
while self.line_num < len(self.line_array):
- if (self.line_array[self.line_num].startswith(self.page_id_start) and
- self.get_page_id(self.line_array[self.line_num]) == self.target_page_id):
+ if (
+ self.line_array[self.line_num].startswith(self.page_id_start)
+ and self.get_page_id(self.line_array[self.line_num])
+ == self.target_page_id
+ ):
tmp_find_next = 1
tmp_page_id = self.get_page_id(self.line_array[self.line_num])
- if (self.get_hard_damage_and_checksum() == 1):
+ if self.get_hard_damage_and_checksum() == 1:
self.correct_page_id = tmp_page_id
- print_message("the target page[" + str(tmp_page_id) + "] is correct!")
+ print_message(
+ "the target page[" + str(tmp_page_id) + "] is correct!"
+ )
return
- if (tmp_find_next == 1 and self.line_array[self.line_num].startswith(self.page_next_start)):
+ if tmp_find_next == 1 and self.line_array[self.line_num].startswith(
+ self.page_next_start
+ ):
self.target_page_id = self.get_next(self.line_array[self.line_num])
- print_message("the bad page[" + str(tmp_page_id) + "] is bad, continue to find [" +
- str(tmp_page_id) + "]'s next: " + str(self.target_page_id))
+ print_message(
+ "the bad page["
+ + str(tmp_page_id)
+ + "] is bad, continue to find ["
+ + str(tmp_page_id)
+ + "]'s next: "
+ + str(self.target_page_id)
+ )
return
- if (self.line_array[self.line_num].startswith(self.page_start)):
+ if self.line_array[self.line_num].startswith(self.page_start):
return
self.line_num += 1
return
@@ -131,12 +150,12 @@ class Finder:
def get(self):
self.get_line_array()
while self.line_num < len(self.line_array):
- if (self.line_array[self.line_num].startswith(self.page_id_start)):
+ if self.line_array[self.line_num].startswith(self.page_id_start):
if self.prev_page_id == "0-0":
self.handle_page()
else:
self.handle_bad_page()
- if (self.correct_page_id != "0-0"):
+ if self.correct_page_id != "0-0":
print_message("find the correct next: " + str(self.correct_page_id))
break
self.line_num += 1
@@ -148,6 +167,7 @@ class Options(object):
self.file_name = ""
self.target_page_id = ""
+
g_opts = Options()
@@ -170,12 +190,12 @@ def parse_parameter():
g_opts.target_page_id = _value
else:
raise Exception("error")
-
+
except getopt.GetoptError as err:
raise Exception("Parameter input error: " + err.msg) from err
- if (g_opts.file_name == ""):
+ if g_opts.file_name == "":
raise Exception("please input the correct file_name.")
- if (g_opts.target_page_id == ""):
+ if g_opts.target_page_id == "":
raise Exception("please input the correct page_id.")
@@ -191,18 +211,25 @@ def main():
parse_parameter()
finder = Finder(g_opts.file_name, g_opts.target_page_id)
finder.get()
- if (finder.correct_page_id != "0-0"):
- print_message("please set [" + str(finder.prev_page_id) + "]'s next to [" +
- str(finder.correct_page_id) + "].")
- elif (finder.prev_page_id == "0-0"):
- print_message("the bad page [" + str(finder.target_page_id) + "] has no prev_page.")
+ if finder.correct_page_id != "0-0":
+ print_message(
+ "please set ["
+ + str(finder.prev_page_id)
+ + "]'s next to ["
+ + str(finder.correct_page_id)
+ + "]."
+ )
+ elif finder.prev_page_id == "0-0":
+ print_message(
+ "the bad page [" + str(finder.target_page_id) + "] has no prev_page."
+ )
else:
print_message("Cannot find the next correct page.")
-
+
if __name__ == "__main__":
try:
main()
except Exception as ex:
print_message("Error: " + str(ex))
- exit(1)
\ No newline at end of file
+ exit(1)
diff --git a/pkg/deploy/action/get_config_info.py b/pkg/deploy/action/get_config_info.py
index 751ed2551b558486d10f230d77e72337b6501e9b..92e645c4c6242faeba43b2d7f0d8e4879581111a 100644
--- a/pkg/deploy/action/get_config_info.py
+++ b/pkg/deploy/action/get_config_info.py
@@ -15,19 +15,21 @@ def get_value(param):
_tmp = f.read()
info = json.loads(_tmp)
# deploy_user 格式为:用户:用户组
- if param == 'deploy_user':
- return info.get('deploy_user').split(':')[0]
+ if param == "deploy_user":
+ return info.get("deploy_user").split(":")[0]
- if param == 'deploy_group':
- return info.get('deploy_user').split(':')[1]
+ if param == "deploy_group":
+ return info.get("deploy_user").split(":")[1]
if param == "cluster_scale":
return len(info.get("cms_ip").split(";"))
if param == "deploy_mode":
- if info.get('deploy_mode', ""):
- return info.get('deploy_mode')
- return "dbstor" if info.get('deploy_policy', "") in ["ModeB", "ModeC"] else "file"
+ if info.get("deploy_mode", ""):
+ return info.get("deploy_mode")
+ return (
+ "dbstor" if info.get("deploy_policy", "") in ["ModeB", "ModeC"] else "file"
+ )
return info.get(param, "")
diff --git a/pkg/deploy/action/get_source_version.py b/pkg/deploy/action/get_source_version.py
index 759cbca35e7c2f0048b8be04cdb5f5b4fedb5fd5..3c44a6b62c4e3f5b7662461ed78716b4b8eb78a3 100644
--- a/pkg/deploy/action/get_source_version.py
+++ b/pkg/deploy/action/get_source_version.py
@@ -2,12 +2,12 @@ from upgrade_version_check import UpgradeVersionCheck
from om_log import LOGGER as LOG
-if __name__ == '__main__':
+if __name__ == "__main__":
version_check = UpgradeVersionCheck()
try:
version_check.read_source_version_info()
except Exception as err:
- LOG.error(f'obtain source version failed with error: {str(err)}')
- exit('')
+ LOG.error(f"obtain source version failed with error: {str(err)}")
+ exit("")
print(version_check.source_version)
diff --git a/pkg/deploy/action/implement/certificate_update_and_revocation.py b/pkg/deploy/action/implement/certificate_update_and_revocation.py
index 9e6bd3ddfabddf14549d2801d43bbb2a2f2ee7dc..1ce13ac77289690c3da7eaead1d72738910db2c4 100644
--- a/pkg/deploy/action/implement/certificate_update_and_revocation.py
+++ b/pkg/deploy/action/implement/certificate_update_and_revocation.py
@@ -14,11 +14,12 @@ CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, "../"))
from logic.common_func import exec_popen
from om_log import LOGGER as LOG
+
ENV_FILE = "/opt/cantian/action/env.sh"
def get_param_value(param):
- with open(ENV_FILE, 'r', encoding='utf-8') as file:
+ with open(ENV_FILE, "r", encoding="utf-8") as file:
env_config = file.readlines()
if param == "cantian_user":
for line in env_config:
@@ -120,11 +121,13 @@ class CertificateUpdateAndRevocation(object):
"""
更新证书密码
"""
- cmd = "su -s /bin/bash - cantian -c \""
+ cmd = 'su -s /bin/bash - cantian -c "'
cmd += "tmp_path=${LD_LIBRARY_PATH};export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH};"
- cmd += f"echo -e '{passwd}' | python3 -B /opt/cantian/action/implement" \
- f"/update_cantian_passwd.py update_mes_key_pwd;"
- cmd += "export LD_LIBRARY_PATH=${tmp_path}\""
+ cmd += (
+ f"echo -e '{passwd}' | python3 -B /opt/cantian/action/implement"
+ f"/update_cantian_passwd.py update_mes_key_pwd;"
+ )
+ cmd += 'export LD_LIBRARY_PATH=${tmp_path}"'
ret_code, _, stderr = exec_popen(cmd)
stderr = str(stderr)
stderr.replace(passwd, "****")
@@ -208,6 +211,7 @@ class CertificateUpdateAndRevocation(object):
raise Exception("query certifcate info failed, output:%s" % str(stderr))
print(str(stdout))
+
if __name__ == "__main__":
cert_update_and_revocation = CertificateUpdateAndRevocation()
_args = []
@@ -217,13 +221,15 @@ if __name__ == "__main__":
try:
getattr(cert_update_and_revocation, action)
except AttributeError as err:
- err_msg = "Currently, you can modify the certificate revocation list,"\
- " update certificates, and query certificate information.\n"\
- "example:\n"\
- "query_crt_info\n"\
- "update_crt_key cert_file_path, key_file_path\n"\
- "update_ca_crt_key ca_file_path, cert_file_path, key_file_path\n"\
- "update_certificate_crl crl_file_path"
+ err_msg = (
+ "Currently, you can modify the certificate revocation list,"
+ " update certificates, and query certificate information.\n"
+ "example:\n"
+ "query_crt_info\n"
+ "update_crt_key cert_file_path, key_file_path\n"
+ "update_ca_crt_key ca_file_path, cert_file_path, key_file_path\n"
+ "update_certificate_crl crl_file_path"
+ )
exit(err_msg)
try:
getattr(cert_update_and_revocation, action)(*_args)
diff --git a/pkg/deploy/action/implement/check_deploy_param.py b/pkg/deploy/action/implement/check_deploy_param.py
index c2129363381ade8f61fa74a164d4267e5c215632..600373496412d83e6ad209727e0b7dbec323b293 100644
--- a/pkg/deploy/action/implement/check_deploy_param.py
+++ b/pkg/deploy/action/implement/check_deploy_param.py
@@ -3,7 +3,7 @@ import os
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
-DEPLOY_PARAM_FILE = os.path.join(CUR_PATH, '../../config/deploy_param.json')
+DEPLOY_PARAM_FILE = os.path.join(CUR_PATH, "../../config/deploy_param.json")
CHECK_LIST = [
"cluster_id",
"cluster_name",
@@ -16,7 +16,7 @@ CHECK_LIST = [
"mes_ssl_switch",
"link_type",
"db_type",
- "mysql_metadata_in_cantian"
+ "mysql_metadata_in_cantian",
]
@@ -29,11 +29,16 @@ def read_file(file_path):
def check_deploy_param():
local_deploy_params = read_file(DEPLOY_PARAM_FILE)
storage_metadata_fs = local_deploy_params.get("storage_metadata_fs")
- remote_deploy_file = f"/mnt/dbdata/remote/metadata_{storage_metadata_fs}/deploy_param.json"
+ remote_deploy_file = (
+ f"/mnt/dbdata/remote/metadata_{storage_metadata_fs}/deploy_param.json"
+ )
if not os.path.exists(remote_deploy_file):
- err_msg = "%s is not exists, please check:\n" \
- "\t1、node 0 has been successfully installed.\n" \
- "\t2、storage_metadata_fs field in the configuration file same as node 0." % remote_deploy_file
+ err_msg = (
+ "%s is not exists, please check:\n"
+ "\t1、node 0 has been successfully installed.\n"
+ "\t2、storage_metadata_fs field in the configuration file same as node 0."
+ % remote_deploy_file
+ )
raise Exception(err_msg)
remote_deploy_params = read_file(remote_deploy_file)
check_failed_list = []
@@ -41,8 +46,10 @@ def check_deploy_param():
if local_deploy_params.get(check_key) != remote_deploy_params.get(check_key):
check_failed_list.append(check_key)
if check_failed_list:
- err_msg = "The configuration items of the current node are different from " \
- "those of node 0, details:%s" % check_failed_list
+ err_msg = (
+ "The configuration items of the current node are different from "
+ "those of node 0, details:%s" % check_failed_list
+ )
raise Exception(err_msg)
diff --git a/pkg/deploy/action/implement/check_nfs4_mandatory_lock_switch.py b/pkg/deploy/action/implement/check_nfs4_mandatory_lock_switch.py
index f40f43cc2f279369b44482c6c59389a581158df8..94343a438f2c2f5a36905ced94192267558eaf88 100644
--- a/pkg/deploy/action/implement/check_nfs4_mandatory_lock_switch.py
+++ b/pkg/deploy/action/implement/check_nfs4_mandatory_lock_switch.py
@@ -10,7 +10,9 @@ from utils.client.ssh_client import SshClient
from utils.client.rest_client import read_helper
-DEPLOY_PARAMS = os.path.join(str(pathlib.Path(CUR_PATH).parent.parent), "config/deploy_param.json")
+DEPLOY_PARAMS = os.path.join(
+ str(pathlib.Path(CUR_PATH).parent.parent), "config/deploy_param.json"
+)
class CheckLockSwitch(object):
@@ -42,7 +44,9 @@ class CheckLockSwitch(object):
if "NFSV4 Mandatory Lock Switch" in key and "Enabled" in value:
break
else:
- err_msg = "Current NFSV4 Mandatory Lock Switch is disabled, details:%s" % res
+ err_msg = (
+ "Current NFSV4 Mandatory Lock Switch is disabled, details:%s" % res
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
LOGGER.info("Success to check NFSV4 Mandatory Lock Switch")
diff --git a/pkg/deploy/action/implement/check_pwd.py b/pkg/deploy/action/implement/check_pwd.py
index d112036ecf71a09d299a84465a6b9ab46b1752c1..5dc36845cda72abe0e302b54978920a975ac0dca 100644
--- a/pkg/deploy/action/implement/check_pwd.py
+++ b/pkg/deploy/action/implement/check_pwd.py
@@ -1,6 +1,7 @@
import json
import sys
import os
+
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, "../"))
from logic.common_func import exec_popen
@@ -12,7 +13,7 @@ PWD_LEN = 8
class PassWordChecker:
def __init__(self, pwd):
self.pwd = pwd
- self.user = 'ctcliuser'
+ self.user = "ctcliuser"
@staticmethod
def check_key_passwd(key_file_path, passwd):
@@ -25,7 +26,7 @@ class PassWordChecker:
stderr.replace(passwd, "****")
if ret_code:
raise Exception("The password is incorrect.")
-
+
@staticmethod
def get_crt_modulus(cert_file_path):
"""
@@ -36,7 +37,7 @@ class PassWordChecker:
if ret_code:
raise Exception("Failed to get crt modulus, output:%s" % str(stderr))
return str(stdout)
-
+
@staticmethod
def get_key_modulus(key_file_path, passwd):
"""
@@ -72,14 +73,19 @@ class PassWordChecker:
if passwd_set & cases:
types += 1
if types < 3:
- LOG.error("Error: Password must contains at least three different types of characters.")
+ LOG.error(
+ "Error: Password must contains at least three different types of characters."
+ )
return 1
# Only can contains enumerated cases
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- LOG.error("Error: There are characters that are not allowed in the password: '%s'", "".join(un_cases))
+ LOG.error(
+ "Error: There are characters that are not allowed in the password: '%s'",
+ "".join(un_cases),
+ )
return 1
return 0
@@ -103,13 +109,13 @@ class PassWordChecker:
raise Exception("The certificate and private key do not match.")
-if __name__ == '__main__':
+if __name__ == "__main__":
pwd_checker = PassWordChecker(input())
action = "check_pwd"
if len(sys.argv) > 1:
action = sys.argv[1]
operator = {
"check_pwd": pwd_checker.verify_new_passwd,
- "check_cert_pwd": pwd_checker.check_cert_passwd
+ "check_cert_pwd": pwd_checker.check_cert_passwd,
}
exit(operator.get(action)())
diff --git a/pkg/deploy/action/implement/config_opt.py b/pkg/deploy/action/implement/config_opt.py
index 8516d18d902fddfb1ad114e0bb65045a505349e9..a8a1b5c9cd3dd9235706327ede3be2d1780ceca8 100644
--- a/pkg/deploy/action/implement/config_opt.py
+++ b/pkg/deploy/action/implement/config_opt.py
@@ -4,13 +4,14 @@ import json
import argparse
import traceback
+
def opt_ini_conf(file_path, action, key, value):
- with open(file_path, "r", encoding="utf-8")as fp:
+ with open(file_path, "r", encoding="utf-8") as fp:
config = fp.readlines()
for i, item in enumerate(config):
if "=" not in item:
continue
- _key, _value = item.split('=', maxsplit=1)
+ _key, _value = item.split("=", maxsplit=1)
if key == _key.strip(" "):
if action == "modify":
config[i] = f"{key} = {value}\n"
@@ -40,11 +41,12 @@ def cms_opt_ini_conf(action, key, value):
def main():
update_parse = argparse.ArgumentParser()
- update_parse.add_argument("-c", "--component", dest="component",
- choices=["cms", "cantian"],
- required=True)
- update_parse.add_argument("-a", "--action", dest="action", choices=["query", "modify"],
- required=True)
+ update_parse.add_argument(
+ "-c", "--component", dest="component", choices=["cms", "cantian"], required=True
+ )
+ update_parse.add_argument(
+ "-a", "--action", dest="action", choices=["query", "modify"], required=True
+ )
update_parse.add_argument("-k", "--key", dest="key", required=True)
update_parse.add_argument("-v", "--value", dest="value", required=False)
args = update_parse.parse_args()
@@ -63,4 +65,4 @@ if __name__ == "__main__":
try:
main()
except Exception as err:
- exit(str(traceback.format_exc(limit=1)))
\ No newline at end of file
+ exit(str(traceback.format_exc(limit=1)))
diff --git a/pkg/deploy/action/implement/get_source_version.py b/pkg/deploy/action/implement/get_source_version.py
index 19c9e68ac8a7c68b72ff0786239483bf03d5d5af..564131843c5981c609c9a91fb75b7956e29b3508 100644
--- a/pkg/deploy/action/implement/get_source_version.py
+++ b/pkg/deploy/action/implement/get_source_version.py
@@ -1,17 +1,18 @@
import sys
import os
+
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, "../"))
from implement.upgrade_version_check import UpgradeVersionCheck
from om_log import LOGGER as LOG
-if __name__ == '__main__':
+if __name__ == "__main__":
version_check = UpgradeVersionCheck()
try:
version_check.read_source_version_info()
except Exception as err:
- LOG.error('obtain source version failed with error: %s', str(err))
- exit('')
+ LOG.error("obtain source version failed with error: %s", str(err))
+ exit("")
print(version_check.source_version)
diff --git a/pkg/deploy/action/implement/update_cantian_passwd.py b/pkg/deploy/action/implement/update_cantian_passwd.py
index 55a2d1f8d5a72052b697c537d7c8010be5e4f521..b583935738e6314f48422fdaf82a871e217c73e2 100644
--- a/pkg/deploy/action/implement/update_cantian_passwd.py
+++ b/pkg/deploy/action/implement/update_cantian_passwd.py
@@ -3,7 +3,9 @@ import sys
import os
import json
-sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "dbstor"))
+sys.path.append(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "dbstor")
+)
from kmc_adapter import CApiWrapper
@@ -37,7 +39,9 @@ def update_mes_key_pwd(plain_text):
try:
ret_pwd = kmc_adapter.encrypt(plain_text)
except Exception as error:
- raise Exception("Failed to encrypt password of user [sys]. Error: %s" % str(error)) from error
+ raise Exception(
+ "Failed to encrypt password of user [sys]. Error: %s" % str(error)
+ ) from error
cantian_config = "/mnt/dbdata/local/cantian/tmp/data/cfg/cantiand.ini"
cms_config = "/opt/cantian/cms/cfg/cms.ini"
@@ -49,7 +53,5 @@ def update_mes_key_pwd(plain_text):
if __name__ == "__main__":
passwd = input()
action = sys.argv[1]
- options = {
- "update_mes_key_pwd": update_mes_key_pwd
- }
+ options = {"update_mes_key_pwd": update_mes_key_pwd}
options.get(action)(passwd.strip())
diff --git a/pkg/deploy/action/implement/upgrade_version_check.py b/pkg/deploy/action/implement/upgrade_version_check.py
index 96933b3d9a0346a8058f2d146457e9c9506eb684..d93a1399e77e41a33314c7992dae0f615173bf65 100644
--- a/pkg/deploy/action/implement/upgrade_version_check.py
+++ b/pkg/deploy/action/implement/upgrade_version_check.py
@@ -7,8 +7,8 @@ CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, "../"))
from om_log import LOGGER as LOG
-VERSION_PREFIX = 'Version:'
-SUB_VERSION_PREFIX = ('B', 'SP')
+VERSION_PREFIX = "Version:"
+SUB_VERSION_PREFIX = ("B", "SP")
class UpgradeVersionCheck:
@@ -16,12 +16,14 @@ class UpgradeVersionCheck:
def __init__(self, white_list=None, upgrade_mode=None):
self.white_list_file = white_list
self.upgrade_mode = upgrade_mode
- self.source_version_file = str(Path('/opt/cantian/versions.yml'))
- self.white_list_dict = {} # 格式:{SOURCE-VERSION: [UPGRADE-MODE, CHANGE-SYSTEM]}
- self.source_version = ''
+ self.source_version_file = str(Path("/opt/cantian/versions.yml"))
+ self.white_list_dict = (
+ {}
+ ) # 格式:{SOURCE-VERSION: [UPGRADE-MODE, CHANGE-SYSTEM]}
+ self.source_version = ""
def process_white_list(self):
- with open(self.white_list_file, 'r', encoding='utf-8') as file:
+ with open(self.white_list_file, "r", encoding="utf-8") as file:
white_list_info = file.readlines()
for white_list_detail in white_list_info[1:]:
@@ -31,8 +33,8 @@ class UpgradeVersionCheck:
self.white_list_dict[details[0]] = [details[1], details[2]]
def read_source_version_info(self):
- version = ''
- with open(self.source_version_file, 'r', encoding='utf-8') as file:
+ version = ""
+ with open(self.source_version_file, "r", encoding="utf-8") as file:
source_version_info = file.readlines()
for line in source_version_info:
@@ -46,52 +48,62 @@ class UpgradeVersionCheck:
for white_list_version, white_list_detail in self.white_list_dict.items():
if self.upgrade_mode not in white_list_detail[0]:
continue
- *white_main_version, white_sub_version = white_list_version.split('.')
- *source_main_version, source_sub_version = self.source_version.split('.')
+ *white_main_version, white_sub_version = white_list_version.split(".")
+ *source_main_version, source_sub_version = self.source_version.split(".")
if source_main_version != white_main_version:
continue
- if white_sub_version == '*' or white_sub_version == source_sub_version:
- result = "{} {} {}".format(self.source_version, white_list_detail[0], white_list_detail[1])
+ if white_sub_version == "*" or white_sub_version == source_sub_version:
+ result = "{} {} {}".format(
+ self.source_version, white_list_detail[0], white_list_detail[1]
+ )
break
- if '-' in white_sub_version:
- min_version, max_version = white_sub_version.split('-')
- trans_map = str.maketrans('', '', digits)
+ if "-" in white_sub_version:
+ min_version, max_version = white_sub_version.split("-")
+ trans_map = str.maketrans("", "", digits)
source_pre_fix = source_sub_version.translate(trans_map)
- if source_pre_fix not in SUB_VERSION_PREFIX: # 源版本号开头不是B或者SPH返回结果为空
+ if (
+ source_pre_fix not in SUB_VERSION_PREFIX
+ ): # 源版本号开头不是B或者SPH返回结果为空
break
- sub_version_min_num = min_version.replace(source_pre_fix, '')
- sub_version_max_num = max_version.replace(source_pre_fix, '')
- sub_source_version_num = source_sub_version.replace(source_pre_fix, '')
- if sub_version_min_num.isdigit() \
- and sub_version_max_num.isdigit() \
- and int(sub_version_max_num) >= int(sub_source_version_num) >= int(sub_version_min_num):
- result = "{} {} {}".format(self.source_version, white_list_detail[0], white_list_detail[1])
+ sub_version_min_num = min_version.replace(source_pre_fix, "")
+ sub_version_max_num = max_version.replace(source_pre_fix, "")
+ sub_source_version_num = source_sub_version.replace(source_pre_fix, "")
+ if (
+ sub_version_min_num.isdigit()
+ and sub_version_max_num.isdigit()
+ and int(sub_version_max_num)
+ >= int(sub_source_version_num)
+ >= int(sub_version_min_num)
+ ):
+ result = "{} {} {}".format(
+ self.source_version, white_list_detail[0], white_list_detail[1]
+ )
break
return result
-if __name__ == '__main__':
+if __name__ == "__main__":
white_list_input = sys.argv[1]
upgrade_mode_input = sys.argv[2]
version_check = UpgradeVersionCheck(white_list_input, upgrade_mode_input)
try:
version_check.process_white_list()
except Exception as err:
- LOG.error('obtain source version white list failed with error: %s', str(err))
- exit('')
+ LOG.error("obtain source version white list failed with error: %s", str(err))
+ exit("")
try:
version_check.read_source_version_info()
except Exception as err:
- LOG.error('obtain source version failed with error: %s', str(err))
- exit('')
+ LOG.error("obtain source version failed with error: %s", str(err))
+ exit("")
try:
print(version_check.source_version_check())
except Exception as err:
- LOG.error('source version check failed with error: %s', str(err))
- exit('')
+ LOG.error("source version check failed with error: %s", str(err))
+ exit("")
diff --git a/pkg/deploy/action/inspection/declear_env.py b/pkg/deploy/action/inspection/declear_env.py
index 3c0a2d90a9f3476f81baa2701dd87ddc4cbf7769..2a55d0f0396289d3d67702df73d2df17ffc5a633 100644
--- a/pkg/deploy/action/inspection/declear_env.py
+++ b/pkg/deploy/action/inspection/declear_env.py
@@ -26,9 +26,9 @@ class DeclearEnv:
string: cantian or mysql
"""
if os.path.exists(self.version_file):
- return 'cantian'
+ return "cantian"
- return 'mysql'
+ return "mysql"
def get_executor(self):
"""
diff --git a/pkg/deploy/action/inspection/generate_html_results.py b/pkg/deploy/action/inspection/generate_html_results.py
index 7b4770c4363203edd38d5903c158278850745bff..2b367f42df2fb60fac18b53c78dde920c8b406eb 100644
--- a/pkg/deploy/action/inspection/generate_html_results.py
+++ b/pkg/deploy/action/inspection/generate_html_results.py
@@ -3,7 +3,7 @@ import os
import stat
from pathlib import Path
-TEMPLETE_BODY_EN = '''
+TEMPLETE_BODY_EN = """
@@ -15,9 +15,9 @@ TEMPLETE_BODY_EN = '''
{}
-'''
+"""
-TEMPLETE_BODY_ZH = '''
+TEMPLETE_BODY_ZH = """
@@ -29,8 +29,8 @@ TEMPLETE_BODY_ZH = '''
{}
-'''
-TEMPLETE_DEV = '''
+"""
+TEMPLETE_DEV = """
{inspection_name}
@@ -86,7 +86,7 @@ TEMPLETE_DEV = '''
-'''
+"""
class GenHtmlRes(object):
@@ -97,10 +97,10 @@ class GenHtmlRes(object):
def write_file(self, content, lang="zh"):
file_name = "{}_inspection_result_{}.html".format(self.node_info, lang)
- file_path = str(Path(self.file_path + '/' + file_name))
+ file_path = str(Path(self.file_path + "/" + file_name))
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.write(content)
def generate_html_zh(self):
@@ -108,50 +108,72 @@ class GenHtmlRes(object):
for item in self.inspect_res:
resource_zh = item.get("resource_zh")
result = "成功" if item.get("inspection_result") == "success" else "失败"
- display = "none" if item.get("inspection_result") == "success" else 'contents'
- inspection_detail = json.dumps(item.get("inspection_detail"), indent=4, ensure_ascii=False).strip("\"")
- method_detail = json.dumps(resource_zh.get("检查步骤"), indent=4, ensure_ascii=False).strip("\"")
- criteria = json.dumps(resource_zh.get("检查方法"), indent=4, ensure_ascii=False).strip("\"")
- suggestion = json.dumps(resource_zh.get("修复建议"), indent=4, ensure_ascii=False).strip("\"")
- dev_one = TEMPLETE_DEV.format(inspection_name=item.get("description_zn"),
- information="原始信息",
- information_detail=inspection_detail.replace('\\n', "
"),
- method="检查步骤",
- method_detail=method_detail.replace('\\n', "
"),
- criteria="检查方法",
- criteria_detail=criteria.replace('\\n', "
"),
- suggestion="修复建议",
- suggestion_detail=suggestion.replace('\\n', "
"),
- result="检查结果",
- result_detail=result,
- display=display
- )
+ display = (
+ "none" if item.get("inspection_result") == "success" else "contents"
+ )
+ inspection_detail = json.dumps(
+ item.get("inspection_detail"), indent=4, ensure_ascii=False
+ ).strip('"')
+ method_detail = json.dumps(
+ resource_zh.get("检查步骤"), indent=4, ensure_ascii=False
+ ).strip('"')
+ criteria = json.dumps(
+ resource_zh.get("检查方法"), indent=4, ensure_ascii=False
+ ).strip('"')
+ suggestion = json.dumps(
+ resource_zh.get("修复建议"), indent=4, ensure_ascii=False
+ ).strip('"')
+ dev_one = TEMPLETE_DEV.format(
+ inspection_name=item.get("description_zn"),
+ information="原始信息",
+ information_detail=inspection_detail.replace("\\n", "
"),
+ method="检查步骤",
+ method_detail=method_detail.replace("\\n", "
"),
+ criteria="检查方法",
+ criteria_detail=criteria.replace("\\n", "
"),
+ suggestion="修复建议",
+ suggestion_detail=suggestion.replace("\\n", "
"),
+ result="检查结果",
+ result_detail=result,
+ display=display,
+ )
dev_list += dev_one
zh_html_res = TEMPLETE_BODY_ZH.format(dev_list)
- self.write_file(zh_html_res, 'zh')
+ self.write_file(zh_html_res, "zh")
def generate_html_en(self):
dev_list = ""
for item in self.inspect_res:
resource_en = item.get("resource_en")
- display = "none" if item.get("inspection_result") == "success" else 'contents'
- inspection_detail = json.dumps(item.get("inspection_detail"), indent=4, ensure_ascii=False).strip("\"")
- method_detail = json.dumps(resource_en.get("method"), indent=4, ensure_ascii=False).strip("\"")
- criteria = json.dumps(resource_en.get("description"), indent=4, ensure_ascii=False).strip("\"")
- suggestion = json.dumps(resource_en.get("suggestion"), indent=4, ensure_ascii=False).strip("\"")
- dev_one = TEMPLETE_DEV.format(inspection_name=item.get("description_en"),
- information="information",
- information_detail=inspection_detail.replace('\\n', "
"),
- method="method",
- method_detail=method_detail.replace('\\n', "
"),
- criteria="description",
- criteria_detail=criteria.replace('\\n', "
"),
- suggestion="suggestion",
- suggestion_detail=suggestion.replace('\\n', "
"),
- result="result",
- result_detail=item.get("inspection_result"),
- display=display
- )
+ display = (
+ "none" if item.get("inspection_result") == "success" else "contents"
+ )
+ inspection_detail = json.dumps(
+ item.get("inspection_detail"), indent=4, ensure_ascii=False
+ ).strip('"')
+ method_detail = json.dumps(
+ resource_en.get("method"), indent=4, ensure_ascii=False
+ ).strip('"')
+ criteria = json.dumps(
+ resource_en.get("description"), indent=4, ensure_ascii=False
+ ).strip('"')
+ suggestion = json.dumps(
+ resource_en.get("suggestion"), indent=4, ensure_ascii=False
+ ).strip('"')
+ dev_one = TEMPLETE_DEV.format(
+ inspection_name=item.get("description_en"),
+ information="information",
+ information_detail=inspection_detail.replace("\\n", "
"),
+ method="method",
+ method_detail=method_detail.replace("\\n", "
"),
+ criteria="description",
+ criteria_detail=criteria.replace("\\n", "
"),
+ suggestion="suggestion",
+ suggestion_detail=suggestion.replace("\\n", "
"),
+ result="result",
+ result_detail=item.get("inspection_result"),
+ display=display,
+ )
dev_list += dev_one
en_html_res = TEMPLETE_BODY_EN.format(dev_list)
- self.write_file(en_html_res, 'en')
+ self.write_file(en_html_res, "en")
diff --git a/pkg/deploy/action/inspection/inspection_cantian.py b/pkg/deploy/action/inspection/inspection_cantian.py
index 7805a2527460467461109ebbab71fccb04ba8cbd..7ea06e328ea942e747961242731842b253046102 100644
--- a/pkg/deploy/action/inspection/inspection_cantian.py
+++ b/pkg/deploy/action/inspection/inspection_cantian.py
@@ -8,9 +8,9 @@ from pathlib import Path
from declear_env import DeclearEnv
from inspection_task import InspectionTask
-DEPLOY_CONFIG_FILE = str(Path('/opt/cantian/config/deploy_param.json'))
+DEPLOY_CONFIG_FILE = str(Path("/opt/cantian/config/deploy_param.json"))
DIR_NAME, _ = os.path.split(os.path.abspath(__file__))
-INSPECTION_JSON_FILE = str(Path('{}/inspection_config.json'.format(DIR_NAME)))
+INSPECTION_JSON_FILE = str(Path("{}/inspection_config.json".format(DIR_NAME)))
class CantianInspection(InspectionTask):
@@ -38,10 +38,10 @@ class CantianInspection(InspectionTask):
:return:
string: node id
"""
- with open(DEPLOY_CONFIG_FILE, encoding='utf-8') as file:
+ with open(DEPLOY_CONFIG_FILE, encoding="utf-8") as file:
deploy_info = json.load(file)
- node_id = deploy_info.get('node_id').split(':')[0]
- cms_ip = deploy_info.get('cms_ip').split(';')
+ node_id = deploy_info.get("node_id").split(":")[0]
+ cms_ip = deploy_info.get("cms_ip").split(";")
node_ip = cms_ip[int(node_id)]
return "cantian_" + node_ip
@@ -51,13 +51,13 @@ class CantianInspection(InspectionTask):
raise ValueError(f"inspection must be executed by {self.deply_user}")
def task_execute_single(self, inspection_detail, name_pwd, ip_port):
- echo_sentence = ''
- _ip = ''
- _port = ''
- inspection_item_file = inspection_detail.get('inspection_file_path')
- inspection_item_input = inspection_detail.get('script_input')
- component_belong = inspection_detail.get('component')
- time_out = int(inspection_detail.get('time_out'))
+ echo_sentence = ""
+ _ip = ""
+ _port = ""
+ inspection_item_file = inspection_detail.get("inspection_file_path")
+ inspection_item_input = inspection_detail.get("script_input")
+ component_belong = inspection_detail.get("component")
+ time_out = int(inspection_detail.get("time_out"))
if name_pwd:
echo_sentence = f'echo -e "{name_pwd[0]}\n{name_pwd[1]}"'
@@ -67,31 +67,59 @@ class CantianInspection(InspectionTask):
_port = ip_port[1]
if component_belong not in self.user_map.keys():
- raise Exception(f'Module {component_belong} not exist')
+ raise Exception(f"Module {component_belong} not exist")
if inspection_item_input:
if echo_sentence:
echo_cmd = shlex.split(echo_sentence)
- echo_popen = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE, shell=False)
- single_inspection_popen = subprocess.Popen([f'/usr/bin/python3', inspection_item_file,
- inspection_item_input, _ip, _port], stdin=echo_popen.stdout,
- stdout=subprocess.PIPE, shell=False)
+ echo_popen = subprocess.Popen(
+ echo_cmd, stdout=subprocess.PIPE, shell=False
+ )
+ single_inspection_popen = subprocess.Popen(
+ [
+ f"/usr/bin/python3",
+ inspection_item_file,
+ inspection_item_input,
+ _ip,
+ _port,
+ ],
+ stdin=echo_popen.stdout,
+ stdout=subprocess.PIPE,
+ shell=False,
+ )
else:
- single_inspection_popen = subprocess.Popen([f'{echo_sentence}/usr/bin/python3', inspection_item_file,
- inspection_item_input, _ip, _port],
- stdout=subprocess.PIPE, shell=False)
+ single_inspection_popen = subprocess.Popen(
+ [
+ f"{echo_sentence}/usr/bin/python3",
+ inspection_item_file,
+ inspection_item_input,
+ _ip,
+ _port,
+ ],
+ stdout=subprocess.PIPE,
+ shell=False,
+ )
else:
if echo_sentence:
echo_cmd = shlex.split(echo_sentence)
- echo_popen = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE, shell=False)
- single_inspection_popen = subprocess.Popen([f'/usr/bin/python3', inspection_item_file,
- _ip, _port], stdin=echo_popen.stdout,
- stdout=subprocess.PIPE, shell=False)
+ echo_popen = subprocess.Popen(
+ echo_cmd, stdout=subprocess.PIPE, shell=False
+ )
+ single_inspection_popen = subprocess.Popen(
+ [f"/usr/bin/python3", inspection_item_file, _ip, _port],
+ stdin=echo_popen.stdout,
+ stdout=subprocess.PIPE,
+ shell=False,
+ )
else:
- single_inspection_popen = subprocess.Popen([f'/usr/bin/python3', inspection_item_file,
- _ip, _port],
- stdout=subprocess.PIPE, shell=False)
+ single_inspection_popen = subprocess.Popen(
+ [f"/usr/bin/python3", inspection_item_file, _ip, _port],
+ stdout=subprocess.PIPE,
+ shell=False,
+ )
- single_inspection_result = single_inspection_popen.communicate(timeout=time_out)[0].decode('utf-8')
+ single_inspection_result = single_inspection_popen.communicate(
+ timeout=time_out
+ )[0].decode("utf-8")
- return single_inspection_result
\ No newline at end of file
+ return single_inspection_result
diff --git a/pkg/deploy/action/inspection/inspection_mysql.py b/pkg/deploy/action/inspection/inspection_mysql.py
index 51eb1b552e6a87f291b8d031fe414b11b288f7e9..b4dcc508c0d1eb4b9fe0fe367a08f87f505931f0 100644
--- a/pkg/deploy/action/inspection/inspection_mysql.py
+++ b/pkg/deploy/action/inspection/inspection_mysql.py
@@ -9,7 +9,7 @@ from inspection_task import InspectionTask
DIR_NAME, _ = os.path.split(os.path.abspath(__file__))
-INSPECTION_JSON_FILE = str(Path('{}/mysql_inspection_config.json'.format(DIR_NAME)))
+INSPECTION_JSON_FILE = str(Path("{}/mysql_inspection_config.json".format(DIR_NAME)))
DEPLOY_UID = 5000
LOCALHOST = "127.0.0.1"
@@ -44,7 +44,7 @@ class MysqlInspection(InspectionTask):
manage_ip = socket.gethostbyname(socket.gethostname())
except Exception as err:
manage_ip = LOCALHOST
- return 'mysql_' + str(manage_ip)
+ return "mysql_" + str(manage_ip)
@staticmethod
def check_executor():
@@ -52,21 +52,29 @@ class MysqlInspection(InspectionTask):
raise ValueError(f"inspection must be executed by root")
def task_execute_single(self, inspection_detail, name_pwd=None, ip_port=None):
- inspection_item_file = inspection_detail.get('inspection_file_path')
- inspection_item_input = inspection_detail.get('script_input')
- component_belong = inspection_detail.get('component')
- time_out = int(inspection_detail.get('time_out'))
+ inspection_item_file = inspection_detail.get("inspection_file_path")
+ inspection_item_input = inspection_detail.get("script_input")
+ component_belong = inspection_detail.get("component")
+ time_out = int(inspection_detail.get("time_out"))
if component_belong not in self.user_map.keys():
- raise ValueError(f'Module {component_belong} not exist')
+ raise ValueError(f"Module {component_belong} not exist")
if inspection_item_input:
- single_inspection_popen = subprocess.Popen(['/usr/bin/python3', inspection_item_file,
- inspection_item_input], stdout=subprocess.PIPE, shell=False)
+ single_inspection_popen = subprocess.Popen(
+ ["/usr/bin/python3", inspection_item_file, inspection_item_input],
+ stdout=subprocess.PIPE,
+ shell=False,
+ )
else:
- single_inspection_popen = subprocess.Popen(['/usr/bin/python3', inspection_item_file],
- stdout=subprocess.PIPE, shell=False)
+ single_inspection_popen = subprocess.Popen(
+ ["/usr/bin/python3", inspection_item_file],
+ stdout=subprocess.PIPE,
+ shell=False,
+ )
- single_inspection_result = single_inspection_popen.communicate(timeout=time_out)[0].decode('utf-8')
+ single_inspection_result = single_inspection_popen.communicate(
+ timeout=time_out
+ )[0].decode("utf-8")
return single_inspection_result
diff --git a/pkg/deploy/action/inspection/inspection_scripts/cms/cms_res_check.py b/pkg/deploy/action/inspection/inspection_scripts/cms/cms_res_check.py
index 5376dba04696523844c0d769a1a745d86f31b113..7c4a5106c0a853b62b204937ad7a1777c509c775 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/cms/cms_res_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/cms/cms_res_check.py
@@ -5,33 +5,42 @@ import os
import json
import sys
from pathlib import Path
-sys.path.append('/opt/cantian/action/inspection')
+
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
from common_func import _exec_popen
def parse_cmd_result(cmd_res):
- keys = ['RESOURCE_NAME', 'RESOURCE_TYPE', 'RESOURCE_GROUP_NAME', 'START_TIMEOUT(ms)', 'STOP_TIMEOUT(ms)',
- 'CHECK_TIMEOUT(ms)', 'CHECK_INTERVAL(ms)', 'HB_TIMEOUT(ms)']
+ keys = [
+ "RESOURCE_NAME",
+ "RESOURCE_TYPE",
+ "RESOURCE_GROUP_NAME",
+ "START_TIMEOUT(ms)",
+ "STOP_TIMEOUT(ms)",
+ "CHECK_TIMEOUT(ms)",
+ "CHECK_INTERVAL(ms)",
+ "HB_TIMEOUT(ms)",
+ ]
values = cmd_res.split()
stat_json = {}
- for (idx, key) in enumerate(keys):
+ for idx, key in enumerate(keys):
stat_json[key] = values[idx]
res = True
- if stat_json.get('HB_TIMEOUT(ms)') != '10000':
- res = False
- return (res)
+ if stat_json.get("HB_TIMEOUT(ms)") != "10000":
+ res = False
+ return res
def fetch_cms_hbtime(logger):
logger.info("cms res check start!")
- cmd = 'source ~/.bashrc && cms res -list | tail -n +2'
+ cmd = "source ~/.bashrc && cms res -list | tail -n +2"
ret_code, output, stderr = _exec_popen(cmd)
- output = output.split('\n')
+ output = output.split("\n")
if ret_code:
logger.error("get cms res failed, std_err: %s", stderr)
result_json = {}
- result_json['data'] = {}
+ result_json["data"] = {}
result_json["error"] = {}
result_json["error"]["code"] = 0
result_json["error"]["description"] = ""
@@ -39,28 +48,30 @@ def fetch_cms_hbtime(logger):
result_json["error"]["code"] = -1
result_json["error"]["description"] = "cms res check error!"
logger.error("cms res check error!")
- return (result_json)
+ return result_json
for cmd_res in output:
(res) = parse_cmd_result(cmd_res)
if res:
- result_json['data']["RESULT"] = 'HB_TIMEOUT is 10 seconds'
+ result_json["data"]["RESULT"] = "HB_TIMEOUT is 10 seconds"
else:
result_json["error"]["code"] = -1
- result_json['error']["description"] = '[WAR]: HB_TIMEOUT greater than 10 seconds'
- result_json['data']["RESULT"] = output
+ result_json["error"][
+ "description"
+ ] = "[WAR]: HB_TIMEOUT greater than 10 seconds"
+ result_json["data"]["RESULT"] = output
logger.info("cms res check succ!")
- return (result_json)
+ return result_json
def fetch_cls_stat():
# check if user is root
- cantian_log = setup('cantian')
- if(os.getuid() == 0):
+ cantian_log = setup("cantian")
+ if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
(result_json) = fetch_cms_hbtime(cantian_log)
return json.dumps(result_json, indent=1)
-if __name__ == '__main__':
+if __name__ == "__main__":
print(fetch_cls_stat())
diff --git a/pkg/deploy/action/inspection/inspection_scripts/cms/cms_stat_check.py b/pkg/deploy/action/inspection/inspection_scripts/cms/cms_stat_check.py
index a23151e73f6aa193273428e6618f814ed7013e2d..45a7b52ebf95fca0297942a9056b76b4dd824617 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/cms/cms_stat_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/cms/cms_stat_check.py
@@ -6,39 +6,49 @@ import json
import sys
from pathlib import Path
-sys.path.append('/opt/cantian/action/inspection')
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
from common_func import _exec_popen
def parse_node_stat(node_stat):
- keys = ['NODE_ID', 'NAME', 'STAT', 'PRE_STAT', 'TARGET_STAT', 'WORK_STAT', 'SESSION_ID', 'INSTANCE_ID', 'ROLE']
+ keys = [
+ "NODE_ID",
+ "NAME",
+ "STAT",
+ "PRE_STAT",
+ "TARGET_STAT",
+ "WORK_STAT",
+ "SESSION_ID",
+ "INSTANCE_ID",
+ "ROLE",
+ ]
values = node_stat.split()
stat_json = {}
- node_info = {'NODE STAT': 'OFFLINE', 'NODE ROLE': 'NULL'}
- for (idx, key) in enumerate(keys):
+ node_info = {"NODE STAT": "OFFLINE", "NODE ROLE": "NULL"}
+ for idx, key in enumerate(keys):
stat_json[key] = values[idx]
online = False
reformer = False
- if stat_json.get('STAT') == 'ONLINE':
+ if stat_json.get("STAT") == "ONLINE":
online = True
- node_info['NODE STAT'] = 'ONLINE'
- if stat_json.get('ROLE') == 'REFORMER':
+ node_info["NODE STAT"] = "ONLINE"
+ if stat_json.get("ROLE") == "REFORMER":
reformer = True
- node_info['NODE ROLE'] = 'REFORMER'
+ node_info["NODE ROLE"] = "REFORMER"
return online, reformer, node_info
def fetch_cms_stat(logger):
logger.info("cms stat check start!")
- cmd = 'source ~/.bashrc && cms stat | tail -n +2'
+ cmd = "source ~/.bashrc && cms stat | tail -n +2"
ret_code, output, stderr = _exec_popen(cmd)
- output = output.split('\n')
+ output = output.split("\n")
if ret_code:
logger.error("get cms res information failed, std_err: %s", stderr)
cluster_stat = {}
result_json = {}
- result_json['data'] = {}
+ result_json["data"] = {}
result_json["error"] = {}
result_json["error"]["code"] = 0
result_json["error"]["description"] = ""
@@ -57,20 +67,20 @@ def fetch_cms_stat(logger):
if reformer:
refomer_stat = True
if online_cnt == len(output) and refomer_stat is True:
- result_json['data']['RESULT'] = 'CLUSTER STAT NORMAL'
+ result_json["data"]["RESULT"] = "CLUSTER STAT NORMAL"
else:
result_json["error"]["code"] = 1
result_json["error"]["description"] = detail_json
return result_json
detail_json.append(cluster_stat)
- result_json['data']['DETAIL'] = detail_json
+ result_json["data"]["DETAIL"] = detail_json
logger.info("cms stat check succ!")
return result_json
def fetch_cls_stat():
# check if user is root
- cantian_log = setup('cantian')
+ cantian_log = setup("cantian")
if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
@@ -78,5 +88,5 @@ def fetch_cls_stat():
return json.dumps(result_json, indent=1)
-if __name__ == '__main__':
+if __name__ == "__main__":
print(fetch_cls_stat())
diff --git a/pkg/deploy/action/inspection/inspection_scripts/cms/cms_version_check.py b/pkg/deploy/action/inspection/inspection_scripts/cms/cms_version_check.py
index db44c08af911d279c29dfbc19cabf32707faefcd..8e09abc74b4ec33bfff6252273e8bbfeea9ef678 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/cms/cms_version_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/cms/cms_version_check.py
@@ -5,14 +5,15 @@ import os
import json
import sys
from pathlib import Path
-sys.path.append('/opt/cantian/action/inspection')
+
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
from common_func import _exec_popen
def fetch_cms_version(logger):
logger.info("cms version check start!")
- cmd = "source ~/.bashrc && cms -help |head -n 1 | grep -oP \"\K\d+\.\d+\""
+ cmd = 'source ~/.bashrc && cms -help |head -n 1 | grep -oP "\K\d+\.\d+"'
ret_code, output, stderr = _exec_popen(cmd)
if ret_code:
logger.error("get cms help information failed, std_err: %s", stderr)
@@ -23,7 +24,7 @@ def fetch_cms_version(logger):
raise Exception("Get cms version failed.")
version = output.strip()
result_json = {}
- result_json['data'] = {}
+ result_json["data"] = {}
result_json["error"] = {}
result_json["error"]["code"] = 0
result_json["error"]["description"] = ""
@@ -31,21 +32,23 @@ def fetch_cms_version(logger):
if version != output_yml:
logger.error("cms version is different from the version.yml")
result_json["error"]["code"] = 1
- result_json["error"]["description"] = "get cms help information failed, std_err " \
- "or cms version is different from the version.yml"
+ result_json["error"]["description"] = (
+ "get cms help information failed, std_err "
+ "or cms version is different from the version.yml"
+ )
logger.info("cms version check succ!")
- return (result_json)
+ return result_json
def fetch_cls_stat():
# check if user is root
- cantian_log = setup('cantian')
- if(os.getuid() == 0):
+ cantian_log = setup("cantian")
+ if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
(result_json) = fetch_cms_version(cantian_log)
return json.dumps(result_json, indent=1)
-if __name__ == '__main__':
+if __name__ == "__main__":
print(fetch_cls_stat())
diff --git a/pkg/deploy/action/inspection/inspection_scripts/cms/common_func.py b/pkg/deploy/action/inspection/inspection_scripts/cms/common_func.py
index 633f8522220020fbf82ae63a2b221e429065a627..f93d227dc242134cb0a17c6ed9c79f81b0f9b333 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/cms/common_func.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/cms/common_func.py
@@ -15,8 +15,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
py_version = platform.python_version()
if py_version[0] == "3":
@@ -41,4 +46,4 @@ def _exec_popen(cmd, values=None):
if stderr[-1:] == os.linesep:
stderr = stderr[:-1]
- return pobj.returncode, stdout, stderr
\ No newline at end of file
+ return pobj.returncode, stdout, stderr
diff --git a/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_check.py b/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_check.py
index f6563749565037d4f5d1923e08e07e8f09153ef8..d411070c58331d1a96083e87c46c59b326e073d3 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_check.py
@@ -8,114 +8,140 @@ from pathlib import Path
class OmChecker:
def __init__(self):
- self.decode_mod = 'utf-8'
- self.component_check_order = ['cms', 'cantian', 'cantian_exporter']
- self.check_file_parent_path = '/opt/cantian/action'
- self.check_file = 'check_status.sh'
- self.check_daemon_cmd = 'pgrep -f cantian_daemon'
- self.check_timer_cmd = 'systemctl is-active cantian.timer'
+ self.decode_mod = "utf-8"
+ self.component_check_order = ["cms", "cantian", "cantian_exporter"]
+ self.check_file_parent_path = "/opt/cantian/action"
+ self.check_file = "check_status.sh"
+ self.check_daemon_cmd = "pgrep -f cantian_daemon"
+ self.check_timer_cmd = "systemctl is-active cantian.timer"
self.check_res_flag = True
self.check_note = {
- 'cms': 'unknown',
- 'cantian': 'unknown',
- 'ct_om': 'unknown',
- 'cantian_exporter': 'unknown',
- 'cantian_daemon': 'unknown',
- 'cantian_timer': 'unknown'
- }
- self.format_output = {
- 'data': {},
- 'error': {
- 'code': 0,
- 'description': ''
- }
+ "cms": "unknown",
+ "cantian": "unknown",
+ "ct_om": "unknown",
+ "cantian_exporter": "unknown",
+ "cantian_daemon": "unknown",
+ "cantian_timer": "unknown",
}
+ self.format_output = {"data": {}, "error": {"code": 0, "description": ""}}
def check_ctom(self):
- key_file = 'ctmgr/uds_server.py'
- check_popen = subprocess.Popen(['/usr/bin/pgrep', '-f', key_file],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
+ key_file = "ctmgr/uds_server.py"
+ check_popen = subprocess.Popen(
+ ["/usr/bin/pgrep", "-f", key_file],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
ct_om_pid, _ = check_popen.communicate(timeout=60)
if ct_om_pid.decode(self.decode_mod):
- self.check_note['ct_om'] = 'online'
+ self.check_note["ct_om"] = "online"
else:
- self.check_note['ct_om'] = 'offline'
+ self.check_note["ct_om"] = "offline"
self.check_res_flag = False
def check_components(self):
for component in self.component_check_order:
- script_path = str(Path(os.path.join(self.check_file_parent_path, component, self.check_file)))
- check_popen = subprocess.Popen(['/usr/bin/bash', script_path],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
+ script_path = str(
+ Path(
+ os.path.join(
+ self.check_file_parent_path, component, self.check_file
+ )
+ )
+ )
+ check_popen = subprocess.Popen(
+ ["/usr/bin/bash", script_path],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
_, err = check_popen.communicate(timeout=60)
if err.decode(self.decode_mod):
continue
check_result = check_popen.returncode
if check_result:
- self.check_note[component] = 'offline'
+ self.check_note[component] = "offline"
self.check_res_flag = False
else:
- self.check_note[component] = 'online'
+ self.check_note[component] = "online"
def check_daemon(self):
- daemon = subprocess.Popen(shlex.split(self.check_daemon_cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- shell=False)
+ daemon = subprocess.Popen(
+ shlex.split(self.check_daemon_cmd),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
output, err = daemon.communicate(timeout=60)
if not err.decode(self.decode_mod):
if output.decode(self.decode_mod):
- self.check_note['cantian_daemon'] = 'online'
+ self.check_note["cantian_daemon"] = "online"
else:
- self.check_note['cantian_daemon'] = 'offline'
+ self.check_note["cantian_daemon"] = "offline"
self.check_res_flag = False
def check_cantian_timer(self):
- daemon = subprocess.Popen(shlex.split(self.check_timer_cmd), stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, shell=False)
+ daemon = subprocess.Popen(
+ shlex.split(self.check_timer_cmd),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
output, err = daemon.communicate(timeout=60)
if not err.decode(self.decode_mod):
- if output.decode(self.decode_mod).strip() == 'active':
- self.check_note['cantian_timer'] = 'active'
+ if output.decode(self.decode_mod).strip() == "active":
+ self.check_note["cantian_timer"] = "active"
- if output.decode(self.decode_mod).strip() == 'inactive':
- self.check_note['cantian_timer'] = 'inactive'
+ if output.decode(self.decode_mod).strip() == "inactive":
+ self.check_note["cantian_timer"] = "inactive"
self.check_res_flag = False
def get_format_output(self):
try:
self.check_components()
except Exception as err:
- self.format_output['error']['code'] = 1
- self.format_output['error']['description'] = "check components failed with err: {}".format(str(err))
+ self.format_output["error"]["code"] = 1
+ self.format_output["error"]["description"] = (
+ "check components failed with err: {}".format(str(err))
+ )
return self.format_output
try:
self.check_ctom()
except Exception as err:
- self.format_output['error']['code'] = 1
- self.format_output['error']['description'] = "check ct_om status failed with err: {}".format(str(err))
+ self.format_output["error"]["code"] = 1
+ self.format_output["error"]["description"] = (
+ "check ct_om status failed with err: {}".format(str(err))
+ )
return self.format_output
try:
self.check_daemon()
except Exception as err:
- self.format_output['error']['code'] = 1
- self.format_output['error']['description'] = "check cantian_daemon failed with err: {}".format(str(err))
+ self.format_output["error"]["code"] = 1
+ self.format_output["error"]["description"] = (
+ "check cantian_daemon failed with err: {}".format(str(err))
+ )
return self.format_output
try:
self.check_cantian_timer()
except Exception as err:
- self.format_output['error']['code'] = 1
- self.format_output['error']['description'] = "check cantian timer fained with err: {}".format(str(err))
+ self.format_output["error"]["code"] = 1
+ self.format_output["error"]["description"] = (
+ "check cantian timer fained with err: {}".format(str(err))
+ )
return self.format_output
if not self.check_res_flag:
- self.format_output['error']['code'] = 1
- self.format_output['error']['description'] = "check cantian status failed, details: %s" % self.check_note
- self.format_output['data'] = self.check_note
+ self.format_output["error"]["code"] = 1
+ self.format_output["error"]["description"] = (
+ "check cantian status failed, details: %s" % self.check_note
+ )
+ self.format_output["data"] = self.check_note
return self.format_output
-if __name__ == '__main__':
+if __name__ == "__main__":
oc = OmChecker()
print(json.dumps(oc.get_format_output()))
diff --git a/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_logicrep_check.py b/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_logicrep_check.py
index 067e684ba3aba9e76550d1f9d116758399e28a85..64781c5b48c11fb17b57949535735d069823c20e 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_logicrep_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_logicrep_check.py
@@ -2,7 +2,7 @@ import json
import os
import sys
-sys.path.append('/opt/cantian/action/inspection')
+sys.path.append("/opt/cantian/action/inspection")
from om_common_function import exec_popen
@@ -13,21 +13,15 @@ FLAG_FILE = "/opt/software/tools/logicrep/enable.success"
class LogicrepChecker:
def __init__(self):
- self.check_process_cmd = 'ps -ef | grep ZLogCatcherMain | grep -v grep'
+ self.check_process_cmd = "ps -ef | grep ZLogCatcherMain | grep -v grep"
self.collect_cmd = f'grep -E "{CHECK_PATTERN}" {LOG_FILE_PATH}'
self.flag = 0
self.check_note = {
- 'Logicrep master node': 'unknown',
- 'Logicrep process': 'unknown',
- 'Warning log': "unknown",
- }
- self.format_output = {
- 'data': dict(),
- 'error': {
- 'code': 0,
- 'description': ''
- }
+ "Logicrep master node": "unknown",
+ "Logicrep process": "unknown",
+ "Warning log": "unknown",
}
+ self.format_output = {"data": dict(), "error": {"code": 0, "description": ""}}
@staticmethod
def select_error_info(info_list):
@@ -57,18 +51,18 @@ class LogicrepChecker:
def check_node(self):
if os.path.exists(FLAG_FILE):
- self.check_note['Logicrep master node'] = 'true'
+ self.check_note["Logicrep master node"] = "true"
else:
- self.check_note['Logicrep master node'] = 'false'
+ self.check_note["Logicrep master node"] = "false"
self.flag += 1
def check_process(self):
- self.check_note['Logicrep process'] = 'online'
+ self.check_note["Logicrep process"] = "online"
try:
_ = self._run_cmd(self.check_process_cmd)
except Exception as err:
if self.check_process_cmd in str(err):
- self.check_note['Logicrep process'] = 'offline'
+ self.check_note["Logicrep process"] = "offline"
self.flag += 1
else:
raise
@@ -81,53 +75,60 @@ class LogicrepChecker:
if self.collect_cmd in str(err):
pass
if not stdout:
- self.check_note['Warning log'] = 'none'
+ self.check_note["Warning log"] = "none"
return
stdout_list = self.select_error_info(stdout.split("\n"))
if not stdout_list:
- self.check_note['Warning log'] = 'none'
+ self.check_note["Warning log"] = "none"
return
- self.check_note['Warning log'] = "\n".join(stdout_list)
+ self.check_note["Warning log"] = "\n".join(stdout_list)
raise Exception(str(self.check_note))
def get_format_output(self):
try:
self.check_node()
except Exception as err:
- self.format_output['error']['code'] = -1
- self.format_output['error']['description'] = "check logicrep master node failed with err: {}".format(
- str(err))
- self.format_output['data'] = self.check_note
+ self.format_output["error"]["code"] = -1
+ self.format_output["error"]["description"] = (
+ "check logicrep master node failed with err: {}".format(str(err))
+ )
+ self.format_output["data"] = self.check_note
return self.format_output
try:
self.check_process()
except Exception as err:
- self.format_output['error']['code'] = -1
- self.format_output['error']['description'] = "check logicrep process failed with err: {}".format(str(err))
- self.format_output['data'] = self.check_note
+ self.format_output["error"]["code"] = -1
+ self.format_output["error"]["description"] = (
+ "check logicrep process failed with err: {}".format(str(err))
+ )
+ self.format_output["data"] = self.check_note
return self.format_output
if self.flag == 2:
- self.check_note['Warning log'] = 'none'
- self.format_output['data'] = self.check_note
+ self.check_note["Warning log"] = "none"
+ self.format_output["data"] = self.check_note
return self.format_output
elif self.flag:
- self.format_output['error']['code'] = -1
- self.format_output['error']['description'] = f"directory or process not found\n{str(self.check_note)}"
- self.format_output['data'] = self.check_note
+ self.format_output["error"]["code"] = -1
+ self.format_output["error"][
+ "description"
+ ] = f"directory or process not found\n{str(self.check_note)}"
+ self.format_output["data"] = self.check_note
return self.format_output
try:
self.collect_log()
except Exception as err:
- self.format_output['error']['code'] = -1
- self.format_output['error']['description'] = "collect warning info failed with err: {}".format(str(err))
- self.format_output['data'] = self.check_note
+ self.format_output["error"]["code"] = -1
+ self.format_output["error"]["description"] = (
+ "collect warning info failed with err: {}".format(str(err))
+ )
+ self.format_output["data"] = self.check_note
return self.format_output
- self.format_output['data'] = self.check_note
+ self.format_output["data"] = self.check_note
return self.format_output
-if __name__ == '__main__':
+if __name__ == "__main__":
far = LogicrepChecker()
print(json.dumps(far.get_format_output()))
diff --git a/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_ntp_check.py b/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_ntp_check.py
index 83623982611447b6f383dbeb9ce6927e44482a25..58d7205adc1b276ce385191d162107053196bb1c 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_ntp_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/ct_om/cantian_om_ntp_check.py
@@ -3,7 +3,7 @@ import re
import sys
from datetime import datetime
-sys.path.append('/opt/cantian/action/inspection')
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
from om_common_function import exec_popen
@@ -20,21 +20,15 @@ LOG_ZIP_FILE_PATH = "/opt/cantian/log/cantian/run/*tar.gz"
class NtpChecker:
def __init__(self):
- self.check_ntp_cmd = 'timedatectl'
+ self.check_ntp_cmd = "timedatectl"
self.check_cmd = f'grep -E "%s" {LOG_FILE_PATH}'
self.check_zip_cmd = f'tar -Oxzf {LOG_ZIP_FILE_PATH} | grep -E "%s"'
self.check_note = {
- 'System clock synchronized': 'unknown',
- 'NTP service': "unknown",
- 'pitr warning': "unknown",
- }
- self.format_output = {
- 'data': dict(),
- 'error': {
- 'code': 0,
- 'description': ''
- }
+ "System clock synchronized": "unknown",
+ "NTP service": "unknown",
+ "pitr warning": "unknown",
}
+ self.format_output = {"data": dict(), "error": {"code": 0, "description": ""}}
@staticmethod
def get_ntp_result(output):
@@ -59,9 +53,9 @@ class NtpChecker:
stdout = self._run_cmd(self.check_ntp_cmd)
flag = self.get_ntp_result(stdout)
if flag:
- self.check_note['System clock synchronized'] = 'yes'
+ self.check_note["System clock synchronized"] = "yes"
else:
- self.check_note['System clock synchronized'] = 'no'
+ self.check_note["System clock synchronized"] = "no"
raise Exception("System clock synchronized is no")
if "NTP service: active" in str(stdout):
self.check_note["NTP service"] = "active"
@@ -69,7 +63,6 @@ class NtpChecker:
self.check_note["NTP service"] = "inactive"
raise Exception("NTP service is inactive.")
-
def check_time_interval(self):
check_list = [self.check_cmd, self.check_zip_cmd]
check_result = []
@@ -87,7 +80,9 @@ class NtpChecker:
timestamp = self._get_log_time(item)
check_result.append((timestamp, item))
if check_result:
- self.check_note["pitr warning"] = "\n".join([item[1] for item in sorted(check_result)][-20:])
+ self.check_note["pitr warning"] = "\n".join(
+ [item[1] for item in sorted(check_result)][-20:]
+ )
raise Exception(str(self.check_note))
else:
self.check_note["pitr warning"] = "success"
@@ -96,22 +91,25 @@ class NtpChecker:
try:
self.check_ntp()
except Exception as err:
- self.format_output['error']['code'] = -1
- self.format_output['error']['description'] = "check ntp server failed with err: {}".format(str(err))
- self.format_output['data'] = self.check_note
+ self.format_output["error"]["code"] = -1
+ self.format_output["error"]["description"] = (
+ "check ntp server failed with err: {}".format(str(err))
+ )
+ self.format_output["data"] = self.check_note
return self.format_output
try:
self.check_time_interval()
except Exception as err:
- self.format_output['error']['code'] = -1
- self.format_output['error']['description'] = "check time interval failed with err: {}".format(str(err))
- self.format_output['data'] = self.check_note
+ self.format_output["error"]["code"] = -1
+ self.format_output["error"]["description"] = (
+ "check time interval failed with err: {}".format(str(err))
+ )
+ self.format_output["data"] = self.check_note
return self.format_output
- self.format_output['data'] = self.check_note
+ self.format_output["data"] = self.check_note
return self.format_output
-if __name__ == '__main__':
+if __name__ == "__main__":
far = NtpChecker()
print(json.dumps(far.get_format_output()))
-
diff --git a/pkg/deploy/action/inspection/inspection_scripts/ct_om/om_common_function.py b/pkg/deploy/action/inspection/inspection_scripts/ct_om/om_common_function.py
index 947f76c1d4871584557462471310946610594c82..5e4fde4c1a347ccc8cdcdbbc919bc08752e3956a 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/ct_om/om_common_function.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/ct_om/om_common_function.py
@@ -12,8 +12,13 @@ def exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
py_version = platform.python_version()
if py_version[0] == "3":
@@ -39,4 +44,3 @@ def exec_popen(cmd, values=None):
stderr = stderr[:-1]
return pobj.returncode, stdout, stderr
-
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_archive_status.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_archive_status.py
index e28a296f8eb38de8ff8ffde4172fab612b37f80e..a4a9e679b1e98d0e8457393051a9d8942907b959 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_archive_status.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_archive_status.py
@@ -6,23 +6,23 @@ import re
from pathlib import Path
from ct_check import CheckContext, BaseItem, ResultStatus
-sys.path.append('/opt/cantian/action/inspection')
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
-DEPLOY_CONFIG_FILE = str(Path('/opt/cantian/config/deploy_param.json'))
+DEPLOY_CONFIG_FILE = str(Path("/opt/cantian/config/deploy_param.json"))
UNIT_CONVERSION_MAP = {
"P": 1024 * 1024 * 1024 * 1024,
"T": 1024 * 1024 * 1024,
"G": 1024 * 1024,
"M": 1024,
- "K": 1
+ "K": 1,
}
class CheckArchiveStatus(BaseItem):
- '''
+ """
check version of database
- '''
+ """
def __init__(self):
super(CheckArchiveStatus, self).__init__(self.__class__.__name__)
@@ -38,7 +38,9 @@ class CheckArchiveStatus(BaseItem):
values = {}
if self.db_type == 0 or self.db_type == "0":
self.result.rst = ResultStatus.WARNING
- self.result.sug = "The backup function is invalid. You are advised to enable it"
+ self.result.sug = (
+ "The backup function is invalid. You are advised to enable it"
+ )
values["result"] = "db_type is %s" % self.db_type
self.result.val = json.dumps(values)
return False
@@ -52,9 +54,11 @@ class CheckArchiveStatus(BaseItem):
return
if self.deploy_mode == "dbstor":
self.result.rst = ResultStatus.NG
- values["result"] = "Deploy mode is %s, please check whether the "\
- "remaining capacity of the file system meets "\
- "the requirements by self." % self.deploy_mode
+ values["result"] = (
+ "Deploy mode is %s, please check whether the "
+ "remaining capacity of the file system meets "
+ "the requirements by self." % self.deploy_mode
+ )
self.result.val = json.dumps(values)
return
@@ -62,8 +66,10 @@ class CheckArchiveStatus(BaseItem):
ret_code, str_out = self.get_cmd_result(cmd)
if ret_code:
self.result.rst = ResultStatus.ERROR
- self.result.sug = "1) Check whether the network link is normal\n " \
- "2) If the link is normal, contact technical support engineers"
+ self.result.sug = (
+ "1) Check whether the network link is normal\n "
+ "2) If the link is normal, contact technical support engineers"
+ )
values["except"] = "can not connect to %s" % self.archive_ip
self.result.val = json.dumps(values)
return
@@ -90,37 +96,49 @@ class CheckArchiveStatus(BaseItem):
used_capacity_number_str = re.sub("[A-z]", "", used_capacity)
max_capacity_number_str = re.sub("[A-z]", "", max_capacity)
- used_capacity_num = float(used_capacity_number_str) * UNIT_CONVERSION_MAP.get(used_capacity_unit_str, 0)
- max_capacity_num = float(max_capacity_number_str) * UNIT_CONVERSION_MAP.get(max_capacity_unit_str, 0)
+ used_capacity_num = float(used_capacity_number_str) * UNIT_CONVERSION_MAP.get(
+ used_capacity_unit_str, 0
+ )
+ max_capacity_num = float(max_capacity_number_str) * UNIT_CONVERSION_MAP.get(
+ max_capacity_unit_str, 0
+ )
if self.max_archive_size > max_capacity_num * 0.45:
self.result.rst = ResultStatus.ERROR
- values["except"] = "The archive configuration capacity must be less than or " \
- "equal to 45% of the maximum archive file system capacity"
+ values["except"] = (
+ "The archive configuration capacity must be less than or "
+ "equal to 45% of the maximum archive file system capacity"
+ )
self.result.sug = "Please modify the archive file"
self.result.val = json.dumps(values)
return
if used_capacity_num > self.max_archive_size * 0.95:
self.result.rst = ResultStatus.WARNING
- values["result"] = "The used archive file capacity exceeds the threshold, causing archive failure"
+ values["result"] = (
+ "The used archive file capacity exceeds the threshold, causing archive failure"
+ )
self.result.sug = "Contact technical support engineers"
self.result.val = json.dumps(values)
-if __name__ == '__main__':
- '''
+if __name__ == "__main__":
+ """
main
- '''
+ """
# check if user is root
- cantian_log = setup('cantian')
+ cantian_log = setup("cantian")
if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
archive_object = CheckArchiveStatus()
- with os.fdopen(os.open(DEPLOY_CONFIG_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r") \
- as file_handle:
+ with os.fdopen(
+ os.open(
+ DEPLOY_CONFIG_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR
+ ),
+ "r",
+ ) as file_handle:
json_data = json.load(file_handle)
archive_object.max_archive_size_str = json_data.get("MAX_ARCH_FILES_SIZE", "")
archive_object.db_type = json_data.get("db_type", 0)
@@ -130,7 +148,9 @@ if __name__ == '__main__':
unit_str = re.compile("[A-z]").findall(archive_object.max_archive_size_str)[0]
number_str = re.sub("[A-z]", "", archive_object.max_archive_size_str)
if UNIT_CONVERSION_MAP.get(unit_str, 0) != 0:
- archive_object.max_archive_size = float(number_str) * UNIT_CONVERSION_MAP.get(unit_str)
+ archive_object.max_archive_size = float(
+ number_str
+ ) * UNIT_CONVERSION_MAP.get(unit_str)
checker_context = CheckContext()
archive_object.run_check(checker_context, cantian_log)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_db_version.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_db_version.py
index ee0ac82fa7c247856d7a598b5bee09e72227ff5c..198b49b256c7a07bc8d721a4c77a01f92e5a454c 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_db_version.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_db_version.py
@@ -6,33 +6,37 @@ import sys
from ct_check import CheckContext
from ct_check import BaseItem
from ct_check import ResultStatus
-sys.path.append('/opt/cantian/action/inspection')
+
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
class CheckDBVersion(BaseItem):
- '''
+ """
check version of database
- '''
+ """
+
def __init__(self):
super(CheckDBVersion, self).__init__(self.__class__.__name__)
self.title = "Check the database version"
def do_check(self):
- '''
+ """
function : Check version of database
input : NA
output : NA
- '''
+ """
vals = {}
self.result.rst = ResultStatus.OK
- cmd = "source ~/.bashrc && %s/cantiand -v | grep -oP \"\K\d+\.\d+\"" % \
- os.path.join(self.context.app_path, "bin")
+ cmd = (
+ 'source ~/.bashrc && %s/cantiand -v | grep -oP "\K\d+\.\d+"'
+ % os.path.join(self.context.app_path, "bin")
+ )
self.result.raw = cmd
status, output = self.get_cmd_result(cmd, self.user)
- if (status == 0):
+ if status == 0:
vals["db_version"] = output
cmd = "cat /opt/cantian/versions.yml | grep -oP 'Version: \K\d+\.\d+'"
status, output = self.get_cmd_result(cmd, self.user)
@@ -49,12 +53,12 @@ class CheckDBVersion(BaseItem):
self.result.val = json.dumps(vals)
-if __name__ == '__main__':
- '''
+if __name__ == "__main__":
+ """
main
- '''
+ """
# check if user is root
- cantian_log = setup('cantian')
+ cantian_log = setup("cantian")
if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_drc_res_ratio.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_drc_res_ratio.py
index a3303b814b9fdf2c519155fe5d6034f4b4f0ea0d..32f85de61575e9e2df25c534757299842c860879 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_drc_res_ratio.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_drc_res_ratio.py
@@ -6,13 +6,16 @@ import sys
from ct_check import CheckContext
from ct_check import BaseItem
from ct_check import ResultStatus
-sys.path.append('/opt/cantian/action/inspection')
+
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
+
class CheckDRCResRatio(BaseItem):
- '''
+ """
check DRC res ratio
- '''
+ """
+
def __init__(self):
super(CheckDRCResRatio, self).__init__(self.__class__.__name__)
self.suggestion = "If DRC resource ratio is too high, try checkpoint"
@@ -21,11 +24,11 @@ class CheckDRCResRatio(BaseItem):
self.epv = 0.9375
def do_check(self):
- '''
+ """
function : Check for DRC res ratio
input : NA
output : NA
- '''
+ """
self.result.epv = self.epv
self.result.rst = ResultStatus.OK
@@ -36,16 +39,16 @@ class CheckDRCResRatio(BaseItem):
# Execute sql command
status, records = self.get_sql_result(sql)
- if (status == 0):
- res_ratio_dict['PAGE_BUF'] = records["records"][0][0]
- res_ratio_dict['GLOBAL_LOCK'] = records["records"][1][0]
- res_ratio_dict['LOCAL_LOCK'] = records["records"][2][0]
- res_ratio_dict['LOCAL_TXN'] = records["records"][3][0]
- res_ratio_dict['GLOBAL_TXN'] = records["records"][4][0]
- res_ratio_dict['LOCK_ITEM'] = records["records"][5][0]
+ if status == 0:
+ res_ratio_dict["PAGE_BUF"] = records["records"][0][0]
+ res_ratio_dict["GLOBAL_LOCK"] = records["records"][1][0]
+ res_ratio_dict["LOCAL_LOCK"] = records["records"][2][0]
+ res_ratio_dict["LOCAL_TXN"] = records["records"][3][0]
+ res_ratio_dict["GLOBAL_TXN"] = records["records"][4][0]
+ res_ratio_dict["LOCK_ITEM"] = records["records"][5][0]
self.result.rst = ResultStatus.OK
for value in res_ratio_dict.values():
- if (float(value) >= self.result.epv) :
+ if float(value) >= self.result.epv:
self.result.rst = ResultStatus.NG
else:
self.result.rst = ResultStatus.ERROR
@@ -55,13 +58,13 @@ class CheckDRCResRatio(BaseItem):
self.result.val = json.dumps(res_ratio_dict)
-if __name__ == '__main__':
- '''
+if __name__ == "__main__":
+ """
main
- '''
+ """
# check if user is root
- cantian_log = setup('cantian')
- if(os.getuid() == 0):
+ cantian_log = setup("cantian")
+ if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_redundant_links.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_redundant_links.py
index 2759c831e6048ac7411c25b39c43f8f5bef38732..633dda249302b347d55a818caa919a1b0abf1c63 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_redundant_links.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_redundant_links.py
@@ -6,52 +6,56 @@ import sys
from ct_check import CheckContext
from ct_check import BaseItem
from ct_check import ResultStatus
-sys.path.append('/opt/cantian/action/inspection')
+
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
-
-
+
+
class CheckRedundantLinks(BaseItem):
- '''
+ """
check version of database
- '''
+ """
+
def __init__(self):
super(CheckRedundantLinks, self).__init__(self.__class__.__name__)
self.title = "Check redundant links"
-
+
def do_check(self):
- '''
+ """
function : Check redundant links
input : NA
output : NA
- '''
-
+ """
+
vals = {}
self.result.rst = ResultStatus.OK
-
+
cmd = "sh /opt/cantian/action/inspection/inspection_scripts/kernal/check_link_cnt.sh"
self.result.raw = cmd
status, output = self.get_cmd_result(cmd, self.user)
- if (status == 0):
+ if status == 0:
vals["success"] = "Have redundant link."
else:
self.result.rst = ResultStatus.ERROR
- vals["except"] = "Do not have redundant link, for details, see the /opt/cantian/dbstor/cgwshowdev.log ."
-
+ vals["except"] = (
+ "Do not have redundant link, for details, see the /opt/cantian/dbstor/cgwshowdev.log ."
+ )
+
# add resault to json
self.result.val = json.dumps(vals)
-
-
-if __name__ == '__main__':
- '''
+
+
+if __name__ == "__main__":
+ """
main
- '''
+ """
# check if user is root
- cantian_log = setup('cantian')
- if(os.getuid() == 0):
+ cantian_log = setup("cantian")
+ if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
-
+
# main function
checker = CheckRedundantLinks()
checker_context = CheckContext()
- checker.run_check(checker_context, cantian_log)
\ No newline at end of file
+ checker.run_check(checker_context, cantian_log)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_session.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_session.py
index ff1e344573bdf6980d6abdd8b1af3c31d39d0a3c..680810bb4ee84d09b3f5203f226012bb5d29939d 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_session.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_session.py
@@ -6,65 +6,65 @@ import sys
from ct_check import CheckContext
from ct_check import BaseItem
from ct_check import ResultStatus
-sys.path.append('/opt/cantian/action/inspection')
+
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
class CheckSession(BaseItem):
- '''
+ """
check resource of host
- '''
+ """
+
def __init__(self):
super(CheckSession, self).__init__(self.__class__.__name__)
- self.suggestion = \
- "The SQL client connection is less than or equal to 80% of the configured value."
+ self.suggestion = "The SQL client connection is less than or equal to 80% of the configured value."
self.standard = "The SQL client connection is less than or equal to 80% of the configured value."
self.title = "Check the number of database connections"
self.epv = "[0, 80]"
def execute_sql(self, sql, vals):
- '''
+ """
function : excute sql
input : NA
output : NA
- '''
- if not (self.context.db_user and self.context.db_passwd and
- self.context.port):
+ """
+ if not (self.context.db_user and self.context.db_passwd and self.context.port):
vals["except"] = "Database connection failed"
self.result.rst = ResultStatus.ERROR
self.result.val = json.dumps(vals)
return -1, 0
status, records = self.get_sql_result(sql)
- if status :
+ if status:
self.result.rst = ResultStatus.ERROR
vals["except"] = records
return status, records
def get_db_session(self, vals):
- '''
+ """
function : get session count of database
input : NA
output : int
- '''
+ """
if self.copyright:
sql = "SELECT COUNT(1) FROM DV_SESSIONS;"
else:
sql = "SELECT COUNT(1) FROM V\$SESSION;"
self.result.raw += "SESSION: " + sql.replace("\$", "$") + "\n"
status, records = self.execute_sql(sql, vals)
- if (status == 0):
+ if status == 0:
session_count = int(records["records"][0][0])
else:
session_count = 0
return status, session_count
def get_conf_session(self, vals):
- '''
+ """
function : get session value of confFile
input : dict
output : int
- '''
+ """
session_value = 1500
status = 0
conf_file = "%s/cfg/cantiand.ini" % self.data_path
@@ -74,7 +74,7 @@ class CheckSession(BaseItem):
return status, 0
content = ""
- with open(conf_file, 'r') as fp:
+ with open(conf_file, "r") as fp:
content = fp.readlines()
for line in content:
@@ -88,11 +88,11 @@ class CheckSession(BaseItem):
return status, session_value - 5
def check_session(self, vals):
- '''
+ """
function : check session
input : dict
output : NA
- '''
+ """
status, db_session = self.get_db_session(vals)
if status:
return
@@ -103,16 +103,16 @@ class CheckSession(BaseItem):
if usage > 80.0:
self.result.rst = ResultStatus.NG
- vals['db_session'] = db_session
- vals['conf_session'] = conf_session
- vals['usage'] = "%.2f%%" % usage
+ vals["db_session"] = db_session
+ vals["conf_session"] = conf_session
+ vals["usage"] = "%.2f%%" % usage
def do_check(self):
- '''
+ """
function : Check for status
input : NA
output : NA
- '''
+ """
vals = {}
self.result.rst = ResultStatus.OK
@@ -122,13 +122,13 @@ class CheckSession(BaseItem):
self.result.val = json.dumps(vals)
-if __name__ == '__main__':
- '''
+if __name__ == "__main__":
+ """
main
- '''
+ """
# check if user is root
- cantian_log = setup('cantian')
- if(os.getuid() == 0):
+ cantian_log = setup("cantian")
+ if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
@@ -144,5 +144,5 @@ if __name__ == '__main__':
for argv in sys.argv[1:]:
setattr(checker_context, context_attr[item_index], argv)
item_index += 1
-
+
checker.run_check(checker_context, cantian_log)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_transaction.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_transaction.py
index 177183e5a9e98acf5e1ee24084f978737694da69..c506b77e3fd245d4c6f459fe997965deb1c51d1e 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/check_transaction.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/check_transaction.py
@@ -6,7 +6,8 @@ import sys
from ct_check import CheckContext
from ct_check import BaseItem
from ct_check import ResultStatus
-sys.path.append('/opt/cantian/action/inspection')
+
+sys.path.append("/opt/cantian/action/inspection")
from log_tool import setup
# minutes converted to microseconds
@@ -14,24 +15,26 @@ MAX_MICRO_TIME = 3 * 60 * 1000000
class CheckTransaction(BaseItem):
- '''
+ """
check transaction of database
- '''
+ """
+
def __init__(self):
super(CheckTransaction, self).__init__(self.__class__.__name__)
- self.suggestion = \
- "Configurable time, if there is a long transaction, \
+ self.suggestion = "Configurable time, if there is a long transaction, \
it is recommended that the user modify the SQL statement."
- self.standard = "Check transactions greater than 3 minutes, check if they do not exist."
+ self.standard = (
+ "Check transactions greater than 3 minutes, check if they do not exist."
+ )
self.title = "check for long time transactions"
self.epv = 0
def do_check(self):
- '''
+ """
function : Check for transaction of long time
input : NA
output : NA
- '''
+ """
vals = {}
self.result.epv = self.epv
@@ -48,7 +51,7 @@ it is recommended that the user modify the SQL statement."
# Execute sql command
status, records = self.get_sql_result(sql)
- if (status == 0):
+ if status == 0:
count = records["records"][0][0]
# expect value : 0
if int(count) == self.result.epv:
@@ -64,14 +67,13 @@ it is recommended that the user modify the SQL statement."
self.result.val = json.dumps(vals)
-
-if __name__ == '__main__':
- '''
+if __name__ == "__main__":
+ """
main
- '''
+ """
# check if user is root
- cantian_log = setup('cantian')
- if(os.getuid() == 0):
+ cantian_log = setup("cantian")
+ if os.getuid() == 0:
cantian_log.error("Cannot use root user for this operation!")
sys.exit(1)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/ct_check.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/ct_check.py
index 140bac6ad562aa79f64e5ee20e2195a594f06f1f..143c258853d224e447953fe9bbd87eb3b8550367 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/ct_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/ct_check.py
@@ -33,7 +33,7 @@ gPyVersion = platform.python_version()
if gPyVersion[0] == "3":
import importlib
-if CURRENT_OS != 'Linux':
+if CURRENT_OS != "Linux":
raise ValueError("Error:Check os failed:current os is not linux")
@@ -48,7 +48,7 @@ class CheckContext:
"""
# Initialize the self.clusterInfo variable
curr_path = os.path.realpath(__file__)
- self.base_path = os.path.join(os.path.split(curr_path)[0], 'inspection')
+ self.base_path = os.path.join(os.path.split(curr_path)[0], "inspection")
self.user = None
self.support_items = {}
self.support_scenes = {}
@@ -82,9 +82,9 @@ class CheckContext:
# Exception class
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class CheckException(Exception):
- '''
+ """
base class of exception
- '''
+ """
def __init__(self, content):
super(CheckException, self).__init__(self)
@@ -96,9 +96,9 @@ class CheckException(Exception):
class CheckNAException(CheckException):
- '''
+ """
NA checkItem exception
- '''
+ """
def __init__(self, item):
super(CheckNAException, self).__init__(self.__class__.__name__)
@@ -108,9 +108,9 @@ class CheckNAException(CheckException):
class TimeoutException(CheckException):
- '''
+ """
timeout exception
- '''
+ """
def __init__(self, second):
super(TimeoutException, self).__init__(self.__class__.__name__)
@@ -120,20 +120,41 @@ class TimeoutException(CheckException):
def get_current_user():
- '''
+ """
get current user
- '''
+ """
# Get the current user
return pwd.getpwuid(os.getuid())[0]
def check_legality(parameter_string):
- '''
+ """
Check for illegal characters
- '''
+ """
# the list of invalid characters
value_check_list = [
- "|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", "{", "}", "(", ")", "[", "]", "~", "*", "?", "!", "\n", " "
+ "|",
+ ";",
+ "&",
+ "$",
+ "<",
+ ">",
+ "`",
+ "\\",
+ "'",
+ '"',
+ "{",
+ "}",
+ "(",
+ ")",
+ "[",
+ "]",
+ "~",
+ "*",
+ "?",
+ "!",
+ "\n",
+ " ",
]
# judge illegal characters
for ch in value_check_list:
@@ -143,9 +164,9 @@ def check_legality(parameter_string):
class SharedFuncs:
- '''
+ """
defined tools for executing cmd and sql
- '''
+ """
def __init__(self):
pass
@@ -163,37 +184,40 @@ class SharedFuncs:
ssh_base = "%s -o NumberOfPasswordPrompts=1 %s@%s" % (ssh, user, ip)
process = Execution(ssh_base)
- idx = process.expect(['(P|p)assword:'])
+ idx = process.expect(["(P|p)assword:"])
if idx == 0:
process.sendLine(passwd)
- done_flag = 'ct_check done'
+ done_flag = "ct_check done"
process.sendLine("%s; echo 'ct_check done'" % ssh_sql)
- escape = 'unicode-escape'
+ escape = "unicode-escape"
while True:
- idx = process.expect([done_flag, "Please enter password"],
- timeout=50)
+ idx = process.expect([done_flag, "Please enter password"], timeout=50)
if idx == 0:
- process.sendLine('exit')
+ process.sendLine("exit")
status = 0
- output = str(process.context_before.decode(escape).split(done_flag)[0])
+ output = str(
+ process.context_before.decode(escape).split(done_flag)[0]
+ )
break
elif idx == 1:
process.sendLine(db_passwd)
else:
- process.sendLine('exit')
+ process.sendLine("exit")
status = 1
- output = str(process.context_buffer.decode(escape).split(done_flag)[0])
+ output = str(
+ process.context_buffer.decode(escape).split(done_flag)[0]
+ )
break
except Exception as err:
if process:
- process.sendLine('exit')
+ process.sendLine("exit")
status = 1
output = f"{err}\n{process.context_buffer.decode('unicode-escape').split(done_flag)[0]}"
return status, output
@staticmethod
def get_abs_path(self, _file):
- for _path in os.environ["PATH"].split(':'):
+ for _path in os.environ["PATH"].split(":"):
abs_file = os.path.normpath(os.path.join(_path, _file))
if os.path.exists(abs_file):
return 0, abs_file
@@ -209,8 +233,13 @@ class SharedFuncs:
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
py_version = platform.python_version()
if py_version[0] == "3":
@@ -253,9 +282,9 @@ class SharedFuncs:
return 0, "\n".join(result)
def run_shell_cmd(self, cmd, user=None, mpprc_file="", array=(0,)):
- '''
+ """
defined tools for cmd
- '''
+ """
if not isinstance(array, tuple):
return -1, "parameter [array] is illegal"
@@ -267,12 +296,12 @@ class SharedFuncs:
if max(array) > len(cmd.split("|")) - 1:
return -1, "parameter [array] is illegal"
- if (mpprc_file):
+ if mpprc_file:
cmd = "source '%s'; %s" % (mpprc_file, cmd)
cmd = cmd + "; echo ${PIPESTATUS[*]}"
# change user but can not be root user
- if (user and user != get_current_user()):
+ if user and user != get_current_user():
cmd = "su -s /bin/bash - %s -c 'source ~/.bashrc; %s'" % (user, cmd)
returncode, stdout, stderr = self.exec_open(cmd)
@@ -299,21 +328,24 @@ class SharedFuncs:
return 1, "CantianDB 100 V300R001"
ctsql_path = os.path.join(app_path, "bin")
- sql_cmd = "source ~/.bashrc && echo -e %s| %s/ctsql %s@%s:%s -q -c \"%s\"" % (db_passwd, ctsql_path,
- db_user,
- db_addr,
- port,
- sql)
+ sql_cmd = 'source ~/.bashrc && echo -e %s| %s/ctsql %s@%s:%s -q -c "%s"' % (
+ db_passwd,
+ ctsql_path,
+ db_user,
+ db_addr,
+ port,
+ sql,
+ )
returncode, stdout, stderr = self.exec_open(sql_cmd)
output = stdout + stderr
return returncode, output
def get_sql_result(self, *args):
- '''
+ """
get records of sql output
success:state, {'records':[[list1], [list2]], "title":[list_title]}
failed: state, string_errormsg
- '''
+ """
sql, db_user, db_passwd, db_addr, port, app_path = args
params = [sql, db_user, db_passwd, db_addr, port, app_path]
status, output = self.run_sql_cmd(*params)
@@ -326,18 +358,18 @@ class SharedFuncs:
num_records = int(re.findall(r"(\d+) rows fetched.", output)[0])
end_index = result_list.index(f"{num_records} rows fetched.")
result_records["title"] = result_list[end_index - 2 - num_records].split()
- for result_line in result_list[end_index - num_records: end_index]:
+ for result_line in result_list[end_index - num_records : end_index]:
result_records["records"].append(result_line.split())
return status, result_records
else:
return status, "\n".join([line for line in output.split("\n") if line])
def verify_conn(self, *args_):
- '''
+ """
function : get ip type
input :string, string, string, string, string, string
output : iptype
- '''
+ """
sql, dbuser, dbpwd, db_addr, port, app_path = args_
params = [sql, dbuser, dbpwd, db_addr, port, app_path]
if not (dbuser and dbpwd):
@@ -364,9 +396,10 @@ def get_validity(path, path_desc):
class ResultStatus(object):
- '''
+ """
define result status
- '''
+ """
+
OK = "OK"
NA = "NA"
WARNING = "WARNING"
@@ -375,9 +408,9 @@ class ResultStatus(object):
class LocalItemResult(object):
- '''
+ """
the check result running on one host
- '''
+ """
def __init__(self, name, host):
self.name = name
@@ -409,29 +442,32 @@ class LocalItemResult(object):
" {sug}\n"
"[REFER ]\n"
" {raw}\n"
-
)
val = self.val if self.val else ""
raw = self.raw if self.raw else ""
try:
- content = output_doc.format(name=self.name,
- rst=self.rst,
- host=self.host,
- val=val,
- epv=self.epv,
- des=self.des,
- sug=self.sug,
- raw=raw)
+ content = output_doc.format(
+ name=self.name,
+ rst=self.rst,
+ host=self.host,
+ val=val,
+ epv=self.epv,
+ des=self.des,
+ sug=self.sug,
+ raw=raw,
+ )
except Exception:
- output_utf8 = output_doc.encode('utf-8')
- content = output_utf8.format(name=self.name,
- rst=self.rst,
- host=self.host,
- val=val,
- epv=self.epv,
- des=self.des,
- sug=self.sug,
- raw=raw.decode('utf-8'))
+ output_utf8 = output_doc.encode("utf-8")
+ content = output_utf8.format(
+ name=self.name,
+ rst=self.rst,
+ host=self.host,
+ val=val,
+ epv=self.epv,
+ des=self.des,
+ sug=self.sug,
+ raw=raw.decode("utf-8"),
+ )
return content
def to_json(self):
@@ -443,45 +479,49 @@ class LocalItemResult(object):
"EXPECT": self.epv,
"SUGGEST": self.sug,
"DESCRIPT_EN": self.des,
- "REFER": [raw.strip() for raw in self.raw.split("\n") if raw]
+ "REFER": [raw.strip() for raw in self.raw.split("\n") if raw],
},
- "error": {
- "code": 0,
- "description": ""
- }
+ "error": {"code": 0, "description": ""},
}
- if (self.rst == ResultStatus.NA):
+ if self.rst == ResultStatus.NA:
rst = "\033[0;37m%s\033[0m" % "NONE"
- elif (self.rst == ResultStatus.WARNING or
- self.rst == ResultStatus.ERROR or
- self.rst == ResultStatus.NG):
+ elif (
+ self.rst == ResultStatus.WARNING
+ or self.rst == ResultStatus.ERROR
+ or self.rst == ResultStatus.NG
+ ):
rst = "\033[0;31m%s\033[0m" % self.rst
else:
rst = "\033[0;32m%s\033[0m" % ResultStatus.OK
val = json.loads(self.val)
- if (self.rst == ResultStatus.NG):
+ if self.rst == ResultStatus.NG:
detail_result["error"]["code"] = 1
- detail_result["error"]["description"] = \
- "{} is failed, expect val: {} current val: {}".format(self.des, self.epv, val)
- elif (self.rst == ResultStatus.ERROR):
+ detail_result["error"]["description"] = (
+ "{} is failed, expect val: {} current val: {}".format(
+ self.des, self.epv, val
+ )
+ )
+ elif self.rst == ResultStatus.ERROR:
detail_result["error"]["code"] = -1
- detail_result["error"]["description"] = "{} is failed, error msg: \"{}\"".format(self.des, val["except"])
+ detail_result["error"]["description"] = (
+ '{} is failed, error msg: "{}"'.format(self.des, val["except"])
+ )
json_dump = json.dumps(detail_result, ensure_ascii=False, indent=2)
print(json_dump)
class BaseItem(object):
- '''
+ """
base class of check item
- '''
+ """
def __init__(self, name):
- '''
+ """
Constructor
- '''
+ """
self.name = name
self.title = None
self.suggestion = None
@@ -489,10 +529,10 @@ class BaseItem(object):
self.time = int(time.time())
self.standard = None
self.threshold = {}
- self.category = 'other'
- self.permission = 'user'
- self.analysis = 'default'
- self.scope = 'all'
+ self.category = "other"
+ self.permission = "user"
+ self.analysis = "default"
+ self.scope = "all"
self.cluster = None
self.user = None
self.nodes = None
@@ -514,15 +554,15 @@ class BaseItem(object):
@abstractmethod
def do_check(self):
- '''
+ """
check script for each item
- '''
+ """
pass
def init_form(self, context):
- '''
+ """
initialize the check item from context
- '''
+ """
self.context = context
self.user = context.user
self.nodes = context.nodes
@@ -543,38 +583,40 @@ class BaseItem(object):
self.result.epv = self.epv
self.result.des = self.title
# new host without cluster installed
- if (not self.user):
+ if not self.user:
self.host = socket.gethostname()
self.result.host = socket.gethostname()
def get_cmd_result(self, cmd, user=None):
- '''
+ """
get cmd result
- '''
+ """
if not user:
user = self.user
status, output = SharedFuncs().run_shell_cmd(cmd, user)
return status, output
def get_sql_result(self, sql):
- '''
+ """
get sql result
- '''
+ """
if self.context.port:
- status, output = SharedFuncs().get_sql_result(sql,
- self.context.db_user,
- self.context.db_passwd,
- self.context.db_addr,
- self.context.port,
- self.context.app_path)
+ status, output = SharedFuncs().get_sql_result(
+ sql,
+ self.context.db_user,
+ self.context.db_passwd,
+ self.context.db_addr,
+ self.context.port,
+ self.context.app_path,
+ )
return status, output
else:
return "-1", "miss parameter [-P]"
def run_check(self, context, logger):
- '''
+ """
main process for checking
- '''
+ """
content = ""
except_val = {}
self.init_form(context)
@@ -590,8 +632,12 @@ class BaseItem(object):
except_val["except"] = str(e)
self.result.rst = ResultStatus.ERROR
self.result.val = json.dumps(except_val)
- logger.error("Exception occur when running %s:\n%s:traceback%s", self.name, str(e),
- traceback.format_exc())
+ logger.error(
+ "Exception occur when running %s:\n%s:traceback%s",
+ self.name,
+ str(e),
+ traceback.format_exc(),
+ )
finally:
# output result
content = self.result.output(context.out_path)
@@ -600,9 +646,9 @@ class BaseItem(object):
class ItemResult(object):
- '''
+ """
inspection inspection framework
- '''
+ """
def __init__(self, name):
self.name = name
@@ -612,26 +658,26 @@ class ItemResult(object):
self.suggestion = ""
self.epv = ""
self.des = ""
- self.category = 'other'
+ self.category = "other"
self.analysis = ""
def __iter__(self):
- '''
+ """
make iterable
- '''
+ """
return iter(self._items)
def __getitem__(self, idx):
- '''
+ """
get item
- '''
+ """
return self._items[idx]
@staticmethod
def parse(output):
- '''
+ """
parse output
- '''
+ """
item_result = None
local_item_result = None
lines = output.splitlines()
@@ -644,43 +690,49 @@ class ItemResult(object):
if not line:
continue
- if line.startswith('[HOST ]'):
+ if line.startswith("[HOST ]"):
host = line.split()[-1]
- if line.startswith('[NAME ]'):
+ if line.startswith("[NAME ]"):
name = line.split()[-1]
if item_result is None:
item_result = ItemResult(name)
else:
item_result.append(local_item_result)
local_item_result = LocalItemResult(name, host)
- if line.startswith('[RESULT ]'):
+ if line.startswith("[RESULT ]"):
local_item_result.rst = line.split()[-1]
value = ItemResult.__parse_multi_line(lines, idx)
- if line.startswith('[VALUE ]'):
+ if line.startswith("[VALUE ]"):
local_item_result.val = value
- if line.startswith('[EXPECT ]'):
+ if line.startswith("[EXPECT ]"):
local_item_result.epv = value
- if line.startswith('[DESCRIPT_EN]'):
+ if line.startswith("[DESCRIPT_EN]"):
local_item_result.des = value
- if line.startswith('[SUGGEST ]'):
+ if line.startswith("[SUGGEST ]"):
local_item_result.sug = value
- if line.startswith('[REFER ]'):
+ if line.startswith("[REFER ]"):
local_item_result.raw = value
return item_result
@staticmethod
def __parse_multi_line(lines, start_idx):
- '''
+ """
parse line by line
- '''
+ """
vals = []
starter = (
- '[HOST ]', '[NAME ]', '[RESULT ]', '[VALUE ]', '[REFER ]', '[EXPECT ]',
- '[DESCRIPT_EN]', '[SUGGEST ]'
+ "[HOST ]",
+ "[NAME ]",
+ "[RESULT ]",
+ "[VALUE ]",
+ "[REFER ]",
+ "[EXPECT ]",
+ "[DESCRIPT_EN]",
+ "[SUGGEST ]",
)
- for line in lines[start_idx + 1:]:
+ for line in lines[start_idx + 1 :]:
if line.strip().startswith(starter):
break
else:
@@ -688,7 +740,7 @@ class ItemResult(object):
return "\n".join(vals)
def append(self, val):
- '''
+ """
append item
- '''
+ """
self._items.append(val)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/kernal/gs_check.py b/pkg/deploy/action/inspection/inspection_scripts/kernal/gs_check.py
index ccee21863cd61ec5889fae2d14b5619c5a4f43b9..0bf5f201d8ef9d4ebad4a19eee9b35ee6c0475b0 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/kernal/gs_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/kernal/gs_check.py
@@ -11,6 +11,7 @@ import socket
import pwd
import json
from abc import abstractmethod
+
sys.path.append("/opt/cantian/action/cantian")
from cantian_funclib import Execution
from cantian_funclib import get_abs_path
@@ -30,7 +31,7 @@ gPyVersion = platform.python_version()
if gPyVersion[0] == "3":
import importlib
-if CURRENT_OS != 'Linux':
+if CURRENT_OS != "Linux":
print("Error:Check os failed:current os is not linux")
raise ValueError("Error:Check os failed:current os is not linux")
@@ -46,7 +47,7 @@ class CheckContext:
"""
# Initialize the self.clusterInfo variable
curr_path = os.path.realpath(__file__)
- self.base_path = os.path.join(os.path.split(curr_path)[0], 'inspection')
+ self.base_path = os.path.join(os.path.split(curr_path)[0], "inspection")
self.user = None
self.support_items = {}
self.support_scenes = {}
@@ -80,9 +81,10 @@ class CheckContext:
# Exception class
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class CheckException(Exception):
- '''
+ """
base class of exception
- '''
+ """
+
def __init__(self, content):
super(CheckException, self).__init__(self)
self.code = "CANTIAN-53000"
@@ -93,9 +95,10 @@ class CheckException(Exception):
class CheckNAException(CheckException):
- '''
+ """
NA checkItem exception
- '''
+ """
+
def __init__(self, item):
super(CheckNAException, self).__init__(self.__class__.__name__)
self.code = "CANTIAN-53033"
@@ -104,9 +107,10 @@ class CheckNAException(CheckException):
class TimeoutException(CheckException):
- '''
+ """
timeout exception
- '''
+ """
+
def __init__(self, second):
super(TimeoutException, self).__init__(self.__class__.__name__)
self.code = "CANTIAN-53028"
@@ -115,21 +119,42 @@ class TimeoutException(CheckException):
def get_current_user():
- '''
+ """
get current user
- '''
+ """
# Get the current user
return pwd.getpwuid(os.getuid())[0]
def check_legality(parameter_string):
- '''
+ """
Check for illegal characters
- '''
+ """
# the list of invalid characters
- value_check_list = ["|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"",
- "{", "}", "(", ")", "[", "]", "~", "*",
- "?", "!", "\n", " "]
+ value_check_list = [
+ "|",
+ ";",
+ "&",
+ "$",
+ "<",
+ ">",
+ "`",
+ "\\",
+ "'",
+ '"',
+ "{",
+ "}",
+ "(",
+ ")",
+ "[",
+ "]",
+ "~",
+ "*",
+ "?",
+ "!",
+ "\n",
+ " ",
+ ]
# judge illegal characters
for ch in value_check_list:
if parameter_string.find(ch) >= 0:
@@ -138,17 +163,18 @@ def check_legality(parameter_string):
class SharedFuncs:
- '''
+ """
defined tools for executing cmd and sql
- '''
+ """
+
def __init__(self):
pass
@staticmethod
def run_shell_cmd(cmd, user=None, mpprc_file="", array=(0,)):
- '''
+ """
defined tools for cmd
- '''
+ """
if not isinstance(array, tuple):
return -1, "parameter [array] is illegal"
@@ -160,18 +186,18 @@ class SharedFuncs:
if max(array) > len(cmd.split("|")) - 1:
return -1, "parameter [array] is illegal"
- if (mpprc_file):
+ if mpprc_file:
cmd = "source '%s'; %s" % (mpprc_file, cmd)
cmd = cmd + "; echo ${PIPESTATUS[*]}"
# change user but can not be root user
- if (user and user != get_current_user()):
+ if user and user != get_current_user():
cmd = "su -s /bin/bash - %s -c 'source ~/.bashrc; %s'" % (user, cmd)
# execute cmd
- p = subprocess.Popen(['bash', '-c', cmd],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ ["bash", "-c", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
# get normal and abnormal output
(stdoutdata, stderrdata) = p.communicate()
@@ -179,11 +205,11 @@ class SharedFuncs:
status = p.returncode
if gPyVersion[0] == "3":
- '''
+ """
python3's Popen returned Byte
python2's Popen returned str
convert to str if python3
- '''
+ """
stdoutdata = stdoutdata.decode()
stderrdata = stderrdata.decode()
@@ -207,7 +233,7 @@ class SharedFuncs:
return status, stdoutdata + stderrdata
def get_abs_path(self, _file):
- for _path in os.environ["PATH"].split(':'):
+ for _path in os.environ["PATH"].split(":"):
abs_file = os.path.normpath(os.path.join(_path, _file))
if os.path.exists(abs_file):
return abs_file
@@ -223,20 +249,24 @@ class SharedFuncs:
return 1, "CantianDB 100 V300R001"
ctsql_path = os.path.join(app_path, "bin")
- sql_cmd = "source ~/.bashrc && %s/ctsql %s@%s:%s -q -c \"%s\"" % (ctsql_path,
- db_user,
- db_addr,
- port,
- sql)
+ sql_cmd = 'source ~/.bashrc && %s/ctsql %s@%s:%s -q -c "%s"' % (
+ ctsql_path,
+ db_user,
+ db_addr,
+ port,
+ sql,
+ )
if gPyVersion[0] == "3":
sql_cmd = sql_cmd.encode()
- p = subprocess.Popen(['bash', '-c', sql_cmd],
- shell=False,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ ["bash", "-c", sql_cmd],
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
db_passwd += "\n"
@@ -262,22 +292,19 @@ class SharedFuncs:
process = None
ssh = get_abs_path("ssh")
try:
- ssh_base = "%s -o NumberOfPasswordPrompts=1 %s@%s" % (ssh,
- user,
- ip)
+ ssh_base = "%s -o NumberOfPasswordPrompts=1 %s@%s" % (ssh, user, ip)
process = Execution(ssh_base)
- idx = process.expect(['(P|p)assword:'])
+ idx = process.expect(["(P|p)assword:"])
if idx == 0:
process.sendLine(passwd)
- done_flag = 'gs_check done'
+ done_flag = "gs_check done"
process.sendLine("%s; echo 'gs_check done'" % ssh_sql)
- escape = 'unicode-escape'
+ escape = "unicode-escape"
while True:
- idx = process.expect([done_flag, "Please enter password"],
- timeout=50)
+ idx = process.expect([done_flag, "Please enter password"], timeout=50)
if idx == 0:
- process.sendLine('exit')
+ process.sendLine("exit")
status = 0
output = str(process.context_before.decode(escape))
output = output.split(done_flag)[0]
@@ -285,35 +312,32 @@ class SharedFuncs:
elif idx == 1:
process.sendLine(db_passwd)
else:
- process.sendLine('exit')
+ process.sendLine("exit")
status = 1
context_buffer = process.context_buffer.decode(escape)
output = str(context_buffer).split(done_flag)[0]
break
except Exception as err:
if process:
- process.sendLine('exit')
+ process.sendLine("exit")
status = 1
output = str(err)
output += os.linesep
- context_buffer = process.context_buffer.decode('unicode-escape')
+ context_buffer = process.context_buffer.decode("unicode-escape")
output += context_buffer.split(done_flag)[0]
return status, output
def get_sql_result(self, sql, db_user, db_passwd, db_addr, port, app_path):
- '''
+ """
get records of sql output
success:state, {'records':[[list1], [list2]], "title":[list_title]}
failed: state, string_errormsg
- '''
- status, output = self.run_sql_cmd(sql,
- db_user,
- db_passwd,
- db_addr,
- port,
- app_path)
+ """
+ status, output = self.run_sql_cmd(
+ sql, db_user, db_passwd, db_addr, port, app_path
+ )
if not status:
- if (output.find("CT-") != -1) :
+ if output.find("CT-") != -1:
status = -1
return status, output.split("\n")[2]
result_records = {"title": [], "records": []}
@@ -328,21 +352,16 @@ class SharedFuncs:
return status, "\n".join([_f for _f in output.split("\n") if _f])
def verify_conn(self, sql, dbuser, dbpwd, db_addr, port, app_path):
- '''
+ """
function : get ip type
input :string, string, string, string, string, string
output : iptype
- '''
+ """
if not (dbuser and dbpwd):
raise Exception("[ERROR]: Username and password cannot be empty")
check_legality(dbuser)
# execute the command to verify the database connection
- status, output = self.run_sql_cmd(sql,
- dbuser,
- dbpwd,
- db_addr,
- port,
- app_path)
+ status, output = self.run_sql_cmd(sql, dbuser, dbpwd, db_addr, port, app_path)
if status:
raise Exception("[ERROR]: %s" % output)
@@ -357,8 +376,7 @@ def get_validity(path, path_desc):
if not os.path.exists(real_path):
raise Exception("The %s %s is not exists." % (path_desc, real_path))
if not os.path.isdir(real_path):
- raise Exception("The %s %s is not directory type." % (path_desc,
- real_path))
+ raise Exception("The %s %s is not directory type." % (path_desc, real_path))
return real_path
@@ -370,9 +388,10 @@ def __print_on_screen(msg):
class ResultStatus(object):
- '''
+ """
define result status
- '''
+ """
+
OK = "OK"
NA = "NA"
WARNING = "WARNING"
@@ -381,9 +400,9 @@ class ResultStatus(object):
class LocalItemResult(object):
- '''
+ """
the check result running on one host
- '''
+ """
def __init__(self, name, host):
self.name = name
@@ -419,24 +438,28 @@ class LocalItemResult(object):
val = self.val if self.val else ""
raw = self.raw if self.raw else ""
try:
- content = output_doc.format(name=self.name,
- rst=self.rst,
- host=self.host,
- val=val,
- epv=self.epv,
- des=self.des,
- sug=self.sug,
- raw=raw)
+ content = output_doc.format(
+ name=self.name,
+ rst=self.rst,
+ host=self.host,
+ val=val,
+ epv=self.epv,
+ des=self.des,
+ sug=self.sug,
+ raw=raw,
+ )
except Exception:
- output_utf8 = output_doc.encode('utf-8')
- content = output_utf8.format(name=self.name,
- rst=self.rst,
- host=self.host,
- val=val,
- epv=self.epv,
- des=self.des,
- sug=self.sug,
- raw=raw.decode('utf-8'))
+ output_utf8 = output_doc.encode("utf-8")
+ content = output_utf8.format(
+ name=self.name,
+ rst=self.rst,
+ host=self.host,
+ val=val,
+ epv=self.epv,
+ des=self.des,
+ sug=self.sug,
+ raw=raw.decode("utf-8"),
+ )
return content
def to_json(self):
@@ -445,11 +468,13 @@ class LocalItemResult(object):
detail_result["error"] = {}
detail_result["error"]["code"] = 0
detail_result["error"]["description"] = ""
- if (self.rst == ResultStatus.NA):
+ if self.rst == ResultStatus.NA:
rst = "\033[0;37m%s\033[0m" % "NONE"
- elif (self.rst == ResultStatus.WARNING or
- self.rst == ResultStatus.ERROR or
- self.rst == ResultStatus.NG):
+ elif (
+ self.rst == ResultStatus.WARNING
+ or self.rst == ResultStatus.ERROR
+ or self.rst == ResultStatus.NG
+ ):
rst = "\033[0;31m%s\033[0m" % self.rst
else:
rst = "\033[0;32m%s\033[0m" % ResultStatus.OK
@@ -468,13 +493,18 @@ class LocalItemResult(object):
detail_result["data"]["REFER"] = []
for raw in self.raw.split("\n"):
self.add_refer(raw, detail_result["data"]["REFER"])
- if (self.rst == ResultStatus.NG) :
+ if self.rst == ResultStatus.NG:
detail_result["error"]["code"] = 1
- detail_result["error"]["description"] = \
- "{} is failed, expect val: {} current val: {}".format(self.des, self.epv, val)
- elif (self.rst == ResultStatus.ERROR) :
+ detail_result["error"]["description"] = (
+ "{} is failed, expect val: {} current val: {}".format(
+ self.des, self.epv, val
+ )
+ )
+ elif self.rst == ResultStatus.ERROR:
detail_result["error"]["code"] = -1
- detail_result["error"]["description"] = "{} is failed, error msg: \"{}\"".format(self.des, val["except"])
+ detail_result["error"]["description"] = (
+ '{} is failed, error msg: "{}"'.format(self.des, val["except"])
+ )
json_dump = json.dumps(detail_result, ensure_ascii=False, indent=2)
print(json_dump)
@@ -488,14 +518,14 @@ class LocalItemResult(object):
class BaseItem(object):
- '''
+ """
base class of check item
- '''
+ """
def __init__(self, name):
- '''
+ """
Constructor
- '''
+ """
self.name = name
self.title = None
self.suggestion = None
@@ -503,10 +533,10 @@ class BaseItem(object):
self.time = int(time.time())
self.standard = None
self.threshold = {}
- self.category = 'other'
- self.permission = 'user'
- self.analysis = 'default'
- self.scope = 'all'
+ self.category = "other"
+ self.permission = "user"
+ self.analysis = "default"
+ self.scope = "all"
self.cluster = None
self.user = None
self.nodes = None
@@ -528,15 +558,15 @@ class BaseItem(object):
@abstractmethod
def do_check(self):
- '''
+ """
check script for each item
- '''
+ """
pass
def init_form(self, context):
- '''
+ """
initialize the check item from context
- '''
+ """
self.context = context
self.user = context.user
self.nodes = context.nodes
@@ -557,38 +587,40 @@ class BaseItem(object):
self.result.epv = self.epv
self.result.des = self.title
# new host without cluster installed
- if (not self.user):
+ if not self.user:
self.host = socket.gethostname()
self.result.host = socket.gethostname()
def get_cmd_result(self, cmd, user=None):
- '''
+ """
get cmd result
- '''
+ """
if not user:
user = self.user
status, output = SharedFuncs().run_shell_cmd(cmd, user)
return status, output
def get_sql_result(self, sql):
- '''
+ """
get sql result
- '''
+ """
if self.context.port:
- status, output = SharedFuncs().get_sql_result(sql,
- self.context.db_user,
- self.context.db_passwd,
- self.context.db_addr,
- self.context.port,
- self.context.app_path)
+ status, output = SharedFuncs().get_sql_result(
+ sql,
+ self.context.db_user,
+ self.context.db_passwd,
+ self.context.db_addr,
+ self.context.port,
+ self.context.app_path,
+ )
return status, output
else:
return "-1", "miss prameter [-P]"
def run_check(self, context, logger):
- '''
+ """
main process for checking
- '''
+ """
content = ""
except_val = {}
self.init_form(context)
@@ -604,7 +636,9 @@ class BaseItem(object):
except_val["except"] = str(Exception)
self.result.rst = ResultStatus.ERROR
self.result.val = json.dumps(except_val)
- logger.error("Exception occur when running {}:\n{}".format(self.name, str(Exception)))
+ logger.error(
+ "Exception occur when running {}:\n{}".format(self.name, str(Exception))
+ )
finally:
# output result
content = self.result.output(context.out_path)
@@ -613,9 +647,10 @@ class BaseItem(object):
class ItemResult(object):
- '''
+ """
inspection inspection framework
- '''
+ """
+
def __init__(self, name):
self.name = name
self._items = []
@@ -624,86 +659,91 @@ class ItemResult(object):
self.suggestion = ""
self.epv = ""
self.des = ""
- self.category = 'other'
+ self.category = "other"
self.analysis = ""
def __iter__(self):
- '''
+ """
make iterable
- '''
+ """
return iter(self._items)
def __getitem__(self, idx):
- '''
+ """
get item
- '''
+ """
return self._items[idx]
def append(self, val):
- '''
+ """
append item
- '''
+ """
self._items.append(val)
@staticmethod
def parse(output):
- '''
+ """
parse output
- '''
+ """
item_result = None
local_item_result = None
host = None
idx = 0
for line in output.splitlines():
idx += 1
- if (idx == len(output.splitlines()) and
- local_item_result is not None):
+ if idx == len(output.splitlines()) and local_item_result is not None:
item_result.append(local_item_result)
current = line.strip()
- if (not current):
+ if not current:
continue
- if (current.startswith('[HOST ]')):
+ if current.startswith("[HOST ]"):
host = current.split()[-1].strip()
- if (current.startswith('[NAME ]')):
+ if current.startswith("[NAME ]"):
name = current.split()[-1].strip()
- if (item_result is None):
+ if item_result is None:
item_result = ItemResult(name)
- if (local_item_result is not None):
+ if local_item_result is not None:
item_result.append(local_item_result)
- local_item_result = LocalItemResult(current.split()[-1].strip(),
- host)
- if (current.startswith('[RESULT ]')):
+ local_item_result = LocalItemResult(current.split()[-1].strip(), host)
+ if current.startswith("[RESULT ]"):
local_item_result.rst = current.split()[-1].strip()
- if (current.startswith('[VALUE ]')):
+ if current.startswith("[VALUE ]"):
value = ItemResult.__parse_multi_line(output.splitlines()[idx:])
local_item_result.val = value
- if (current.startswith('[EXPECT ]')):
+ if current.startswith("[EXPECT ]"):
exp = ItemResult.__parse_multi_line(output.splitlines()[idx:])
local_item_result.epv = exp
- if (current.startswith('[DESCRIPT_EN]')):
+ if current.startswith("[DESCRIPT_EN]"):
des_en = ItemResult.__parse_multi_line(output.splitlines()[idx:])
local_item_result.des = des_en
- if (current.startswith('[SUGGEST ]')):
+ if current.startswith("[SUGGEST ]"):
sug = ItemResult.__parse_multi_line(output.splitlines()[idx:])
local_item_result.sug = sug
- if (current.startswith('[REFER ]')):
+ if current.startswith("[REFER ]"):
refer = ItemResult.__parse_multi_line(output.splitlines()[idx:])
local_item_result.raw = refer
return item_result
@staticmethod
def __parse_multi_line(lines):
- '''
+ """
parse line by line
- '''
+ """
vals = []
- starter = ('[HOST ]', '[NAME ]', '[RESULT ]',
- '[VALUE ]', '[REFER ]', '[EXPECT ]',
- '[DESCRIPT_EN]', '[SUGGEST ]')
+ starter = (
+ "[HOST ]",
+ "[NAME ]",
+ "[RESULT ]",
+ "[VALUE ]",
+ "[REFER ]",
+ "[EXPECT ]",
+ "[DESCRIPT_EN]",
+ "[SUGGEST ]",
+ )
for line in lines:
current = line.strip()
- if (current.startswith(starter)):
+ if current.startswith(starter):
break
else:
vals.append(current)
- return "\n".join(vals)
\ No newline at end of file
+ return "\n".join(vals)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/mysql/common_func.py b/pkg/deploy/action/inspection/inspection_scripts/mysql/common_func.py
index 633f8522220020fbf82ae63a2b221e429065a627..f93d227dc242134cb0a17c6ed9c79f81b0f9b333 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/mysql/common_func.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/mysql/common_func.py
@@ -15,8 +15,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
py_version = platform.python_version()
if py_version[0] == "3":
@@ -41,4 +46,4 @@ def _exec_popen(cmd, values=None):
if stderr[-1:] == os.linesep:
stderr = stderr[:-1]
- return pobj.returncode, stdout, stderr
\ No newline at end of file
+ return pobj.returncode, stdout, stderr
diff --git a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_connection_check.py b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_connection_check.py
index 57f78fd3843aaf10a9e99d8fb15f76ef633b429d..0a238174107b035508740505fdc35a471258c3a4 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_connection_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_connection_check.py
@@ -4,21 +4,16 @@ import json
import os
import subprocess
import sys
-sys.path.append('/mf_connector/inspection')
+
+sys.path.append("/mf_connector/inspection")
from log_tool import setup
from inspection_scripts.mysql.mysql_diskspace_check import DiskSpaceCheck
-class ConnectionCheck():
+class ConnectionCheck:
def __init__(self):
- self.result_json = {
- 'data': {},
- 'error': {
- 'code': 0,
- 'description': ''
- }
- }
+ self.result_json = {"data": {}, "error": {"code": 0, "description": ""}}
def connection_check(self, logger):
logger.info("connection check start!")
@@ -30,27 +25,28 @@ class ConnectionCheck():
if status:
logger.error("can not get engines information")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = output
+ self.result_json["error"]["description"] = output
else:
res = output.split("\n")
res = res[1:]
lines = [line.split("\t") for line in res]
for line in lines:
if line[0] == "CTC" and line[1] == "DEFAULT":
- self.result_json["data"]["RESULT"] = 'check connection succ!'
+ self.result_json["data"]["RESULT"] = "check connection succ!"
logger.info("check connection succ!")
break
else:
logger.error("the connection check failed")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = "the connection check failed, CTC is not default"
+ self.result_json["error"][
+ "description"
+ ] = "the connection check failed, CTC is not default"
return self.result_json
return self.result_json
-if __name__ == '__main__':
+if __name__ == "__main__":
mysql_log = setup("mysql")
cn = ConnectionCheck()
result_json = cn.connection_check(mysql_log)
print(json.dumps(result_json, indent=1))
-
diff --git a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_diskspace_check.py b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_diskspace_check.py
index d82c5799e6648d3b308815d40b208383af4d3d3a..7a1a6145710c8092fb506af72a6761bc6b9522f8 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_diskspace_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_diskspace_check.py
@@ -4,21 +4,16 @@ import json
import os
import subprocess
import sys
-sys.path.append('/mf_connector/inspection')
+
+sys.path.append("/mf_connector/inspection")
from log_tool import setup
from common_func import _exec_popen
-class DiskSpaceCheck():
+class DiskSpaceCheck:
def __init__(self):
- self.result_json = {
- 'data': {},
- 'error': {
- 'code': 0,
- 'description': ''
- }
- }
+ self.result_json = {"data": {}, "error": {"code": 0, "description": ""}}
@staticmethod
def get_cmd_res(cmd):
@@ -26,14 +21,14 @@ class DiskSpaceCheck():
return status, output
def pwd_check(self, logger):
- cmd_pwd = '/docker-entrypoint-initdb.d/encrypt -d $MYSQL_AGENT_PASSWORD'
+ cmd_pwd = "/docker-entrypoint-initdb.d/encrypt -d $MYSQL_AGENT_PASSWORD"
pwd_code, pwd = self.get_cmd_res(cmd_pwd)
if pwd_code == 0:
return pwd
else:
logger.error("can not get password")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = pwd
+ self.result_json["error"]["description"] = pwd
return self.result_json
def dir_check(self, logger):
@@ -41,14 +36,18 @@ class DiskSpaceCheck():
if isinstance(temp, dict):
return temp
password = temp.strip()
- cmd_dir = "mysql -uRDS_agent -p" + password + ''' -e"show global variables like 'datadir';"'''
+ cmd_dir = (
+ "mysql -uRDS_agent -p"
+ + password
+ + ''' -e"show global variables like 'datadir';"'''
+ )
datadir_code, res = self.get_cmd_res(cmd_dir)
if datadir_code == 0:
return res
else:
logger.error("can not get data directory")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = res
+ self.result_json["error"]["description"] = res
return self.result_json
def diskspace_check(self, logger):
@@ -64,22 +63,26 @@ class DiskSpaceCheck():
if int(use_space) < 90:
self.result_json["error"]["code"] = 0
self.result_json["error"]["description"] = ""
- self.result_json["data"]["RESULT"] = ' the used disk space are less than 90%, succ!'
+ self.result_json["data"][
+ "RESULT"
+ ] = " the used disk space are less than 90%, succ!"
logger.info("the remaining disk spaces check succ!")
return self.result_json
else:
logger.error("the remaining spaces are less than 10%")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = "the remaining spaces are less than 10% "
+ self.result_json["error"][
+ "description"
+ ] = "the remaining spaces are less than 10% "
return self.result_json
else:
logger.error("can not get the disk spaces info")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = value
+ self.result_json["error"]["description"] = value
return self.result_json
-if __name__ == '__main__':
+if __name__ == "__main__":
mysql_log = setup("mysql")
ds = DiskSpaceCheck()
result_json = ds.diskspace_check(mysql_log)
diff --git a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_file_check.py b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_file_check.py
index 75bbc205b61a8d9d01ba29ffc0c9ae2473e50deb..b025a5e0686d0396afc7cdd129d157207851b5cd 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_file_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_file_check.py
@@ -4,26 +4,30 @@ import json
import os
import subprocess
import sys
-sys.path.append('/mf_connector/inspection')
+
+sys.path.append("/mf_connector/inspection")
from log_tool import setup
-class SharedFileCheck():
+class SharedFileCheck:
def __init__(self):
self.path = "/dev/shm/"
self.shm_list = [
- "cantian.0", "cantian.1", "cantian.2", "cantian.3", "cantian.4", "cantian.5", "cantian.6",
- "cantian.7", "cantian.8", "cantian.shm_unix_sock", "cantian_shm_config_0.txt",
- "cantian_shm_config_1.txt"
+ "cantian.0",
+ "cantian.1",
+ "cantian.2",
+ "cantian.3",
+ "cantian.4",
+ "cantian.5",
+ "cantian.6",
+ "cantian.7",
+ "cantian.8",
+ "cantian.shm_unix_sock",
+ "cantian_shm_config_0.txt",
+ "cantian_shm_config_1.txt",
]
- self.result_json = {
- 'data': {},
- 'error': {
- 'code': 0,
- 'description': ''
- }
- }
+ self.result_json = {"data": {}, "error": {"code": 0, "description": ""}}
def file_check(self, logger):
myfile = []
@@ -33,7 +37,7 @@ class SharedFileCheck():
else:
logger.error("there are some files missed")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = "not all shm files exist"
+ self.result_json["error"]["description"] = "not all shm files exist"
break
return myfile
@@ -44,13 +48,14 @@ class SharedFileCheck():
return self.result_json
for i in myfile:
# 合法uid是6000, gid是5000
- if os.stat(self.path + i)[4] == 6000 and \
- os.stat(self.path + i)[5] == 5000:
+ if os.stat(self.path + i)[4] == 6000 and os.stat(self.path + i)[5] == 5000:
uid.append(i)
else:
logger.error("not all uid of shm files are 5000")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = "not all uid of shm files are 5000 "
+ self.result_json["error"][
+ "description"
+ ] = "not all uid of shm files are 5000 "
break
return uid
@@ -61,19 +66,24 @@ class SharedFileCheck():
if uid != self.shm_list:
return self.result_json
for i in uid:
- if (os.access(self.path + i, os.R_OK) is True) and (os.access(self.path + i, os.W_OK) is True):
+ if (os.access(self.path + i, os.R_OK) is True) and (
+ os.access(self.path + i, os.W_OK) is True
+ ):
rights.append(i)
else:
logger.error("user does not have right to read or write")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = "not all files can be read and written"
+ self.result_json["error"][
+ "description"
+ ] = "not all files can be read and written"
break
if rights == self.shm_list:
- self.result_json["data"]["RESULT"] = 'check shm files succ!'
+ self.result_json["data"]["RESULT"] = "check shm files succ!"
return self.result_json
-if __name__ == '__main__':
+
+if __name__ == "__main__":
mysql_log = setup("mysql")
sf = SharedFileCheck()
result_json = sf.rights_check(mysql_log)
- print(json.dumps(result_json, indent=1))
\ No newline at end of file
+ print(json.dumps(result_json, indent=1))
diff --git a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_transaction_isolation_check.py b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_transaction_isolation_check.py
index f345a0543fa95ab2f89cb9556eab19c9cbdd414c..293a904985077408094fd8da7dcc8d46a9bb8d87 100644
--- a/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_transaction_isolation_check.py
+++ b/pkg/deploy/action/inspection/inspection_scripts/mysql/mysql_transaction_isolation_check.py
@@ -3,53 +3,55 @@
import json
import os
import sys
-sys.path.append('/mf_connector/inspection')
+
+sys.path.append("/mf_connector/inspection")
from log_tool import setup
from inspection_scripts.mysql.mysql_diskspace_check import DiskSpaceCheck
-class TransactionIsolationCheck():
+class TransactionIsolationCheck:
def __init__(self):
- self.result_json = {
- 'data': {},
- 'error': {
- 'code': 0,
- 'description': ''
- }
- }
+ self.result_json = {"data": {}, "error": {"code": 0, "description": ""}}
def transaction_isolation_check(self, logger):
logger.info("transaction isolation level check start!")
password = DiskSpaceCheck().pwd_check(logger)
if isinstance(password, dict):
- self.result_json['error']['description'] = "Password is a dictionary."
+ self.result_json["error"]["description"] = "Password is a dictionary."
return self.result_json
- cmd = "mysql -uRDS_agent -p" + password + ''' -e"show variables like 'transaction_isolation';"'''
+ cmd = (
+ "mysql -uRDS_agent -p"
+ + password
+ + ''' -e"show variables like 'transaction_isolation';"'''
+ )
status, output = DiskSpaceCheck().get_cmd_res(cmd)
if status:
logger.error("can not get transaction isolation information")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = output
+ self.result_json["error"]["description"] = output
else:
res = output.split("\n")
res = res[1:]
lines = [line.split("\t") for line in res]
for line in lines:
if line[0] == "transaction_isolation" and line[1] == "READ-COMMITTED":
- self.result_json["data"]["RESULT"] = 'check transaction isolation succ!'
+ self.result_json["data"][
+ "RESULT"
+ ] = "check transaction isolation succ!"
logger.info("check transaction isolation succeed!")
break
else:
logger.error("the transaction isolation check failed")
self.result_json["error"]["code"] = -1
- self.result_json['error']['description'] = \
- "the transaction isolation check failed, isolation level is not READ-COMMITTED"
+ self.result_json["error"][
+ "description"
+ ] = "the transaction isolation check failed, isolation level is not READ-COMMITTED"
return self.result_json
-if __name__ == '__main__':
+if __name__ == "__main__":
mysql_log = setup("mysql")
cn = TransactionIsolationCheck()
result_json = cn.transaction_isolation_check(mysql_log)
- print(json.dumps(result_json, indent=1))
\ No newline at end of file
+ print(json.dumps(result_json, indent=1))
diff --git a/pkg/deploy/action/inspection/inspection_task.py b/pkg/deploy/action/inspection/inspection_task.py
index e84097a8139edcf956338011c8a7693a743d6099..124d22ea095f88c95b43ea0ccc1637e5f7893b83 100644
--- a/pkg/deploy/action/inspection/inspection_task.py
+++ b/pkg/deploy/action/inspection/inspection_task.py
@@ -14,23 +14,23 @@ sys.path.append(CUR_PATH)
import log_tool
from generate_html_results import GenHtmlRes
-LOG = log_tool.setup('om')
+LOG = log_tool.setup("om")
MAX_AUDIT_NUM = 10
DIR_NAME, _ = os.path.split(os.path.abspath(__file__))
-INSPECTION_PATH = str(Path('{}/inspections_log'.format(DIR_NAME)))
-FAIL = 'fail'
-SUCCESS = 'success'
-SUCCESS_ENUM = [0, '0']
+INSPECTION_PATH = str(Path("{}/inspections_log".format(DIR_NAME)))
+FAIL = "fail"
+SUCCESS = "success"
+SUCCESS_ENUM = [0, "0"]
CTSQL_IP = "127.0.0.1"
CTSQL_PORT = "1611"
-LOG_DIRECTORY = f'{CUR_PATH}/inspection_task_log'
+LOG_DIRECTORY = f"{CUR_PATH}/inspection_task_log"
class InspectionTask:
def __init__(self, _input_value, _use_smartkit=False):
- self.input_param = 'all'
+ self.input_param = "all"
self.inspection_map = self.read_inspection_config()
self.use_smartkit = _use_smartkit
@@ -46,12 +46,12 @@ class InspectionTask:
self.fail_list = []
self.deply_user = self.get_depoly_user()
self.user_map = {
- 'cantian': self.deply_user,
- 'cms': self.deply_user,
- 'dbstor': self.deply_user,
- 'mysql': self.deply_user,
- 'ctmgr': 'ctmgruser',
- 'ct_om': 'root'
+ "cantian": self.deply_user,
+ "cms": self.deply_user,
+ "dbstor": self.deply_user,
+ "mysql": self.deply_user,
+ "ctmgr": "ctmgruser",
+ "ct_om": "root",
}
@staticmethod
@@ -63,27 +63,29 @@ class InspectionTask:
return ""
@staticmethod
- def format_single_inspection_result(inspection_item, inspection_detail, execute_result, inspection_result):
+ def format_single_inspection_result(
+ inspection_item, inspection_detail, execute_result, inspection_result
+ ):
return_value = {
- 'inspection_item': inspection_item,
- 'description_zn': inspection_detail.get("description_zn"),
- 'description_en': inspection_detail.get("description_en"),
- 'component': inspection_detail.get("component"),
- 'inspection_result': execute_result,
- 'inspection_detail': inspection_result.get('data'),
- 'resource_en': inspection_detail.get("resource_en"),
- 'resource_zh': inspection_detail.get("resource_zh")
+ "inspection_item": inspection_item,
+ "description_zn": inspection_detail.get("description_zn"),
+ "description_en": inspection_detail.get("description_en"),
+ "component": inspection_detail.get("component"),
+ "inspection_result": execute_result,
+ "inspection_detail": inspection_result.get("data"),
+ "resource_en": inspection_detail.get("resource_en"),
+ "resource_zh": inspection_detail.get("resource_zh"),
}
if inspection_result and isinstance(inspection_result, dict):
- err_info = inspection_result.get('error', {})
- error_code = err_info.get('code')
+ err_info = inspection_result.get("error", {})
+ error_code = err_info.get("code")
if error_code is None:
return return_value
if error_code not in SUCCESS_ENUM:
- return_value['inspection_result'] = FAIL
- return_value['inspection_detail'] = {
- 'error': inspection_result.get('error')
+ return_value["inspection_result"] = FAIL
+ return_value["inspection_detail"] = {
+ "error": inspection_result.get("error")
}
return return_value
@@ -93,9 +95,11 @@ class InspectionTask:
if not inspection_detail:
raise ValueError("[error]: inspection item %s not exist" % inspection_item)
- if not os.path.exists(inspection_detail.get('inspection_file_path')):
- raise ValueError("[error]: inspection file: "
- "%s not exist" % str(inspection_detail.get('inspection_file_path')))
+ if not os.path.exists(inspection_detail.get("inspection_file_path")):
+ raise ValueError(
+ "[error]: inspection file: "
+ "%s not exist" % str(inspection_detail.get("inspection_file_path"))
+ )
@staticmethod
def res_format_check(res_output):
@@ -109,10 +113,12 @@ class InspectionTask:
if not isinstance(res_output, dict):
return False
- if 'data' not in res_output or 'error' not in res_output:
+ if "data" not in res_output or "error" not in res_output:
return False
- if isinstance(res_output.get('error'), dict) and 'code' in res_output.get('error'):
+ if isinstance(res_output.get("error"), dict) and "code" in res_output.get(
+ "error"
+ ):
return True
return False
@@ -125,7 +131,7 @@ class InspectionTask:
True: the input username is legal
False: the input username is illegal
"""
- reg_pattern = r'^\w+$'
+ reg_pattern = r"^\w+$"
reg_match_res = re.findall(reg_pattern, user_name)
if not reg_match_res or len(reg_match_res) >= 2:
return False
@@ -138,20 +144,27 @@ class InspectionTask:
standby_keystore = "/opt/cantian/common/config/standby_keystore_bak.ks"
sys.path.append("/opt/cantian/action/dbstor")
from kmc_adapter import CApiWrapper
- ctsql_ini_path = '/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini'
- kmc_decrypt = CApiWrapper(primary_keystore=primary_keystore, standby_keystore=standby_keystore)
+
+ ctsql_ini_path = "/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini"
+ kmc_decrypt = CApiWrapper(
+ primary_keystore=primary_keystore, standby_keystore=standby_keystore
+ )
kmc_decrypt.initialize()
ctsql_ini_data = file_reader(ctsql_ini_path)
- encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find('=') + 1:].strip()
+ encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find("=") + 1 :].strip()
try:
kmc_decrypt_pwd = kmc_decrypt.decrypt(encrypt_pwd)
except Exception as error:
- raise Exception('[result] decrypt ctsql passwd failed') from error
+ raise Exception("[result] decrypt ctsql passwd failed") from error
finally:
kmc_decrypt.finalize()
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
return kmc_decrypt_pwd
def read_inspection_config(self):
@@ -159,7 +172,7 @@ class InspectionTask:
reading inspection config file to obtain inspection component details
:return:
"""
- with open(self.inspection_json_file, encoding='utf-8') as file:
+ with open(self.inspection_json_file, encoding="utf-8") as file:
inspection_map = json.load(file)
return inspection_map
@@ -170,14 +183,16 @@ class InspectionTask:
:param _input_value: all or [component names]
:return:
"""
- if _input_value == 'all':
+ if _input_value == "all":
return list(self.inspection_map.keys())
- if not _input_value.startswith('[') or not _input_value.endswith(']'):
- LOG.error(f'input_value is: {_input_value}, format error')
- raise ValueError('[error]: Input value is not correct; should be "all" or "[component1, component2, ...]".')
+ if not _input_value.startswith("[") or not _input_value.endswith("]"):
+ LOG.error(f"input_value is: {_input_value}, format error")
+ raise ValueError(
+ '[error]: Input value is not correct; should be "all" or "[component1, component2, ...]".'
+ )
- return _input_value[1:-1].split(',')
+ return _input_value[1:-1].split(",")
def task_execute_single(self, inspection_detail, name_pwd, ip_port):
"""
@@ -197,8 +212,10 @@ class InspectionTask:
utc_now = datetime.utcnow()
cur_time = utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None)
node_info = self.get_node_ip()
- audit_file = 'inspection_{}_{}'.format(node_info, str(cur_time.strftime("%Y%m%d%H%M%S")))
- audit_file_path = str(Path(self.audit_path + '/' + audit_file))
+ audit_file = "inspection_{}_{}".format(
+ node_info, str(cur_time.strftime("%Y%m%d%H%M%S"))
+ )
+ audit_file_path = str(Path(self.audit_path + "/" + audit_file))
if not os.path.exists(self.audit_path):
os.mkdir(self.audit_path)
@@ -213,8 +230,12 @@ class InspectionTask:
shutil.rmtree(temp_path)
audit_list.pop(0)
# 生成html格式巡检结果
- GenHtmlRes(self.inspection_result, audit_file_path, node_info).generate_html_zh()
- GenHtmlRes(self.inspection_result, audit_file_path, node_info).generate_html_en()
+ GenHtmlRes(
+ self.inspection_result, audit_file_path, node_info
+ ).generate_html_zh()
+ GenHtmlRes(
+ self.inspection_result, audit_file_path, node_info
+ ).generate_html_en()
return audit_file
@@ -223,11 +244,13 @@ class InspectionTask:
user_name = "sys"
system_pwd = self.decrypt_password()
else:
- user_name = input('Please input user:')
+ user_name = input("Please input user:")
system_pwd = getpass.getpass("Please input password:")
if not self.user_name_reg(user_name):
- raise ValueError(f"[error] the input username '{user_name}' is illegal, "
- f"please enter a correct username.")
+ raise ValueError(
+ f"[error] the input username '{user_name}' is illegal, "
+ f"please enter a correct username."
+ )
return system_pwd, user_name
def get_user_pwd(self):
@@ -238,7 +261,7 @@ class InspectionTask:
required is False: empty tuple ()
"""
for inspection_item in self.inspection_items:
- if self.inspection_map.get(inspection_item, {}).get('need_pwd'):
+ if self.inspection_map.get(inspection_item, {}).get("need_pwd"):
system_pwd, user_name = self.check_smartkit()
return user_name, system_pwd
@@ -252,7 +275,7 @@ class InspectionTask:
required is False: empty tuple ()
"""
for inspection_item in self.inspection_items:
- if self.inspection_map.get(inspection_item, {}).get('need_ip'):
+ if self.inspection_map.get(inspection_item, {}).get("need_ip"):
return CTSQL_IP, CTSQL_PORT
return ()
@@ -272,28 +295,39 @@ class InspectionTask:
raise ValueError(str(val_err)) from val_err
try:
- single_inspection_result = json.loads(self.task_execute_single(inspection_detail, name_pwd, ip_port))
+ single_inspection_result = json.loads(
+ self.task_execute_single(inspection_detail, name_pwd, ip_port)
+ )
except Exception as _err:
- LOG.error(f'execute item: {inspection_item} with {str(_err.__class__)} error: {str(_err)}')
- print(f'[error]: inspection component: {inspection_item} execute it\'s inspection script failed')
- formated_inspection_result = self.format_single_inspection_result(inspection_item,
- inspection_detail, FAIL, {})
+ LOG.error(
+ f"execute item: {inspection_item} with {str(_err.__class__)} error: {str(_err)}"
+ )
+ print(
+ f"[error]: inspection component: {inspection_item} execute it's inspection script failed"
+ )
+ formated_inspection_result = self.format_single_inspection_result(
+ inspection_item, inspection_detail, FAIL, {}
+ )
self.inspection_result.append(formated_inspection_result)
self.fail_list.append(inspection_item)
continue
if not self.res_format_check(single_inspection_result):
- print(f'[error]: inspection component: {inspection_item} obtain an unexpected result')
- formated_inspection_result = self.format_single_inspection_result(inspection_item,
- inspection_detail, FAIL, {})
+ print(
+ f"[error]: inspection component: {inspection_item} obtain an unexpected result"
+ )
+ formated_inspection_result = self.format_single_inspection_result(
+ inspection_item, inspection_detail, FAIL, {}
+ )
self.inspection_result.append(formated_inspection_result)
self.fail_list.append(inspection_item)
continue
- formated_inspection_result = self.format_single_inspection_result(inspection_item, inspection_detail,
- SUCCESS, single_inspection_result)
+ formated_inspection_result = self.format_single_inspection_result(
+ inspection_item, inspection_detail, SUCCESS, single_inspection_result
+ )
- if formated_inspection_result.get('inspection_result') == FAIL:
+ if formated_inspection_result.get("inspection_result") == FAIL:
self.inspection_result.append(formated_inspection_result)
self.fail_list.append(inspection_item)
continue
@@ -302,21 +336,27 @@ class InspectionTask:
self.success_list.append(inspection_item)
log_name = self.write_audit()
- log_full_path = str(Path(f'{self.audit_path}/{log_name}'))
+ log_full_path = str(Path(f"{self.audit_path}/{log_name}"))
if not self.fail_list:
- raise ValueError(f'All components inspection execute success; \ninspection result file is {log_full_path}')
+ raise ValueError(
+ f"All components inspection execute success; \ninspection result file is {log_full_path}"
+ )
if not self.success_list:
- raise ValueError(f'All components inspection execute failed; \ninspection result file is {log_full_path}')
+ raise ValueError(
+ f"All components inspection execute failed; \ninspection result file is {log_full_path}"
+ )
- raise ValueError(f'Component: [{", ".join(self.success_list)}] inspection execute success, '
- f'\ncomponent: [{", ".join(self.fail_list)} ]inspection execute failed; '
- f'\ninspection result file is {log_full_path}')
+ raise ValueError(
+ f'Component: [{", ".join(self.success_list)}] inspection execute success, '
+ f'\ncomponent: [{", ".join(self.fail_list)} ]inspection execute failed; '
+ f"\ninspection result file is {log_full_path}"
+ )
def file_reader(file_path):
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
return file.read()
@@ -328,30 +368,35 @@ def main(input_val):
# 运行期间修改日志路径为777,确保各模块可以在日志路径内创建日志文件
os.chmod(LOG_DIRECTORY, 0o777)
from declear_env import DeclearEnv
+
# 获取当前为参天还是mysql容器
current_env = DeclearEnv().get_env_type()
# 获取执行当前进程的用户名
current_executor = DeclearEnv().get_executor()
if current_env == "cantian":
from inspection_cantian import CantianInspection
+
cantian_inspection = CantianInspection(input_val, use_smartkit)
cantian_inspection.task_execute()
else:
from inspection_mysql import MysqlInspection
+
mysql_inspection = MysqlInspection(input_val, use_smartkit)
mysql_inspection.task_execute()
-if __name__ == '__main__':
+if __name__ == "__main__":
input_value = None
use_smartkit = False
try:
input_value = sys.argv[1]
except Exception as err:
_ = err
- exit('[error]: Input format is not correct, missing input value;'
- ' Input value could be "all" or "[component1,component2,...]".')
+ exit(
+ "[error]: Input format is not correct, missing input value;"
+ ' Input value could be "all" or "[component1,component2,...]".'
+ )
if len(sys.argv) == 3 and sys.argv[2] == "smartkit":
use_smartkit = True
sys.dont_write_bytecode = True
diff --git a/pkg/deploy/action/inspection/log_tool.py b/pkg/deploy/action/inspection/log_tool.py
index 701504b270d033fa4a997d4a1e27c2902cdc3209..6f4150f3d9442d3878b3975d76d632d91da776dc 100644
--- a/pkg/deploy/action/inspection/log_tool.py
+++ b/pkg/deploy/action/inspection/log_tool.py
@@ -13,9 +13,9 @@ CONSOLE_CONF = {
"log_file_backup_count": 5,
"log_date_format": "%Y-%m-%d %H:%M:%S",
"logging_default_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
"logging_context_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s"
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
}
}
@@ -37,7 +37,7 @@ def _get_log_file_path(project):
os.chown(logger_file, owner_uid, owner_gid)
return logger_file
- return ''
+ return ""
def setup(project_name):
@@ -55,8 +55,10 @@ def setup(project_name):
log_path = _get_log_file_path(project_name)
if log_path:
file_log = handlers.RotatingFileHandler(
- log_path, maxBytes=log_config.get("log_file_max_size"),
- backupCount=log_config.get("log_file_backup_count"))
+ log_path,
+ maxBytes=log_config.get("log_file_max_size"),
+ backupCount=log_config.get("log_file_backup_count"),
+ )
log_root.addHandler(file_log)
log_root.addHandler(console)
@@ -64,7 +66,9 @@ def setup(project_name):
handler.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")))
+ datefmt=log_config.get("log_date_format"),
+ )
+ )
if log_config.get("debug"):
log_root.setLevel(logging.DEBUG)
diff --git a/pkg/deploy/action/logic/common_func.py b/pkg/deploy/action/logic/common_func.py
index 720a8c7c024cfa8174f400d4a823a105ca10b1a5..9692b80f15362400d7398ed501b9f94a8ebb5046 100644
--- a/pkg/deploy/action/logic/common_func.py
+++ b/pkg/deploy/action/logic/common_func.py
@@ -17,11 +17,11 @@ def close_child_process(proc):
os.killpg(proc.pid, signal.SIGKILL)
except ProcessLookupError as err:
_ = err
- return 'success'
+ return "success"
except Exception as err:
return str(err)
- return 'success'
+ return "success"
def retry(retry_times, log, task, wait_times):
@@ -40,7 +40,9 @@ def retry(retry_times, log, task, wait_times):
continue
else:
raise err
+
return wrapper
+
return decorate
@@ -51,8 +53,14 @@ def exec_popen(cmd, timeout=TIME_OUT):
return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=os.setsid,
+ )
pobj.stdin.write(cmd.encode())
pobj.stdin.write(os.linesep.encode())
try:
@@ -80,12 +88,12 @@ def read_json_config(file_path):
def write_json_config(file_path, data):
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(file_path, flags, modes), 'w') as fp:
+ with os.fdopen(os.open(file_path, flags, modes), "w") as fp:
json.dump(data, fp, indent=4)
def file_reader(file_path):
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
return file.read()
diff --git a/pkg/deploy/action/logic/storage_operate.py b/pkg/deploy/action/logic/storage_operate.py
index d64680bfc2b5653b9b209b08f12ee0c07d781f64..c48e2e4115791f148d7d17726861be6307e1cfe5 100644
--- a/pkg/deploy/action/logic/storage_operate.py
+++ b/pkg/deploy/action/logic/storage_operate.py
@@ -60,8 +60,11 @@ class StorageInf(object):
:param fs_name: 文件系统名称
:return:
"""
- mount_dir = f"/mnt/dbdata/remote/{fs_name}" if prefix is None \
+ mount_dir = (
+ f"/mnt/dbdata/remote/{fs_name}"
+ if prefix is None
else f"/mnt/dbdata/remote/{prefix}_{fs_name}"
+ )
mount_cmd = f"mount -t nfs {params} {logic_ip}:/{fs_name} " + mount_dir
mkdir_cmd = f"if [ ! -d {mount_dir} ];then mkdir -p {mount_dir};fi"
return_code, _, stderr = exec_popen(mkdir_cmd)
@@ -96,10 +99,10 @@ class StorageInf(object):
err_msg = err_msg % (status_code, error_code, error_des)
cls.handle_error_msg(err_msg)
rsp_code, rsp_result, rsp_data = result.get_rsp_data()
- error_code = rsp_result.get('code')
+ error_code = rsp_result.get("code")
if rsp_code != 0 or error_code != 0:
- error_des = rsp_result.get('description')
- error_sgt = rsp_result.get('suggestion')
+ error_des = rsp_result.get("description")
+ error_sgt = rsp_result.get("suggestion")
err_msg = err_msg % (error_code, error_des, error_sgt)
cls.handle_error_msg(err_msg)
return rsp_data
@@ -109,10 +112,10 @@ class StorageInf(object):
err_msg = err_msg + ", Detail:[%s]%s.Suggestion:%s"
result = ResponseParse(res)
rsp_code, rsp_result, rsp_data = result.get_omtask_rsp_data()
- if rsp_code != 0 or (rsp_result.get('code') and rsp_result.get('code') != 0):
- error_des = rsp_result.get('description')
- error_sgt = rsp_result.get('suggestion')
- err_msg = err_msg % (rsp_result.get('code'), error_des, error_sgt)
+ if rsp_code != 0 or (rsp_result.get("code") and rsp_result.get("code") != 0):
+ error_des = rsp_result.get("description")
+ error_sgt = rsp_result.get("suggestion")
+ err_msg = err_msg % (rsp_result.get("code"), error_des, error_sgt)
cls.handle_error_msg(err_msg)
return rsp_data
@@ -125,8 +128,10 @@ class StorageInf(object):
try:
self.rest_client.login()
except Exception as _err:
- err_msg = "Login DM[float ip:%s] failed, details: %s " \
- % (self.ip, str(_err))
+ err_msg = "Login DM[float ip:%s] failed, details: %s " % (
+ self.ip,
+ str(_err),
+ )
self.handle_error_msg(err_msg)
LOG.info("Login DM success.")
@@ -159,21 +164,29 @@ class StorageInf(object):
:return: None
"""
LOG.info("Query storage pool info start, pool_id:[%s]", pool_id)
- url = Constant.QUERY_POOL.format(deviceId=self.rest_client.device_id) + f"?filter=ID:{pool_id}"
+ url = (
+ Constant.QUERY_POOL.format(deviceId=self.rest_client.device_id)
+ + f"?filter=ID:{pool_id}"
+ )
res = self.rest_client.normal_request(url, "get")
err_msg = f"Failed to query pool id:[{pool_id}]"
resp_data = self.result_parse(err_msg, res)
return resp_data
def query_vstore_count(self, vstore_id):
- url = Constant.QUERY_VSTORE.format(deviceId=self.rest_client.device_id) + f"?filter=ID:{vstore_id}"
+ url = (
+ Constant.QUERY_VSTORE.format(deviceId=self.rest_client.device_id)
+ + f"?filter=ID:{vstore_id}"
+ )
res = self.rest_client.normal_request(url, "get")
err_msg = f"Failed to query vstore id:[{vstore_id}]"
rsp_data = self.result_parse(err_msg, res)
return rsp_data
def query_vstore_info(self, vstore_id):
- url = Constant.DELETE_VSTORE.format(deviceId=self.rest_client.device_id, id=vstore_id)
+ url = Constant.DELETE_VSTORE.format(
+ deviceId=self.rest_client.device_id, id=vstore_id
+ )
res = self.rest_client.normal_request(url, "get")
err_msg = f"Failed to query vstore info:[{vstore_id}]"
rsp_data = self.result_parse(err_msg, res)
@@ -193,10 +206,11 @@ class StorageInf(object):
:param vstore_id: 租户id
:return: rollback_rate:回顾进度, rollback_status:回滚状态
"""
- query_url = Constant.QUERY_ROLLBACK_SNAPSHOT_PROCESS.format(deviceId=self.rest_client.device_id,
- fs_name=fs_name)
+ query_url = Constant.QUERY_ROLLBACK_SNAPSHOT_PROCESS.format(
+ deviceId=self.rest_client.device_id, fs_name=fs_name
+ )
url = query_url + f"&&vstoreId={vstore_id}"
- res = self.rest_client.normal_request(url, 'get')
+ res = self.rest_client.normal_request(url, "get")
err_msg = f"Failed to query fs[{fs_name}] rollback snapshot process."
rsp_data = self.result_parse(err_msg, res)
return rsp_data
@@ -244,10 +258,17 @@ class StorageInf(object):
:param vstore_id: 租户ID
:return:
"""
- query_url = Constant.NFS_SHARE_ADD_CLIENT.format(deviceId=self.rest_client.device_id)
- url = query_url + f"?filter=PARENTID:{nfs_share_id}&vstoreId={vstore_id}&range=[0-1]"
+ query_url = Constant.NFS_SHARE_ADD_CLIENT.format(
+ deviceId=self.rest_client.device_id
+ )
+ url = (
+ query_url
+ + f"?filter=PARENTID:{nfs_share_id}&vstoreId={vstore_id}&range=[0-1]"
+ )
res = self.rest_client.normal_request(url, "get")
- err_msg = f"Failed to query nfs share auth client, nfs_share_id:%s" % nfs_share_id
+ err_msg = (
+ f"Failed to query nfs share auth client, nfs_share_id:%s" % nfs_share_id
+ )
return self.result_parse(err_msg, res)
def query_logical_port_info(self, ip_addr, vstore_id=None):
@@ -257,9 +278,14 @@ class StorageInf(object):
:param vstore_id: 租户id
:return:
"""
- query_url = Constant.QUERY_LOGIC_PORT_INFO.format(deviceId=self.rest_client.device_id)
+ query_url = Constant.QUERY_LOGIC_PORT_INFO.format(
+ deviceId=self.rest_client.device_id
+ )
if vstore_id:
- url = query_url + f"?filter=IPV4ADDR:{ip_addr}&range=[0-100]&vstoreId={vstore_id}"
+ url = (
+ query_url
+ + f"?filter=IPV4ADDR:{ip_addr}&range=[0-100]&vstoreId={vstore_id}"
+ )
else:
url = query_url + f"?filter=IPV4ADDR:{ip_addr}&range=[0-100]"
res = self.rest_client.normal_request(url, "get")
@@ -272,7 +298,9 @@ class StorageInf(object):
:param vstore_id: 租户id, 默认是系统租户
:return:
"""
- query_url = Constant.QUERY_LOGIC_PORT_INFO.format(deviceId=self.rest_client.device_id)
+ query_url = Constant.QUERY_LOGIC_PORT_INFO.format(
+ deviceId=self.rest_client.device_id
+ )
url = query_url + f"?vstoreId={vstore_id}"
res = self.rest_client.normal_request(url, "get")
err_msg = f"Failed to query lf info"
@@ -305,12 +333,17 @@ class StorageInf(object):
:param vstore_id:
:return: 克隆文件系统ID
"""
- LOG.info("Begin to clone fs id[%s], new fs[%s], vstore id [%s]", parent_id, clone_fs_name, vstore_id)
+ LOG.info(
+ "Begin to clone fs id[%s], new fs[%s], vstore id [%s]",
+ parent_id,
+ clone_fs_name,
+ vstore_id,
+ )
url = Constant.CREATE_CLONE_FS.format(deviceId=self.rest_client.device_id)
data = {
"NAME": clone_fs_name,
"PARENTFILESYSTEMID": parent_id,
- "vstoreId": vstore_id
+ "vstoreId": vstore_id,
}
res = self.rest_client.normal_request(url, "post", data=data)
err_msg = f"Failed to clone fs[id:{parent_id}], clone fs name:{clone_fs_name} "
@@ -327,10 +360,10 @@ class StorageInf(object):
:return:
"""
data = {
- 'NAME': name,
- 'PARENTID': fs_id,
- 'vstoreId': vstore_id,
- 'PARENTTYPE': 40
+ "NAME": name,
+ "PARENTID": fs_id,
+ "vstoreId": vstore_id,
+ "PARENTTYPE": 40,
}
url = Constant.CREATE_FSSNAPSHOT.format(deviceId=self.rest_client.device_id)
res = self.rest_client.normal_request(url, "post", data=data)
@@ -348,11 +381,7 @@ class StorageInf(object):
"""
LOG.info("Begin to split clone fs[id:%s]", clone_fs_id)
url = Constant.SPLIT_CLONE_FS.format(deviceId=self.rest_client.device_id)
- data = {
- "ID": clone_fs_id,
- "action": action,
- "vstoreId": vstore_id
- }
+ data = {"ID": clone_fs_id, "action": action, "vstoreId": vstore_id}
res = self.rest_client.normal_request(url, "put", data=data)
err_msg = f"Failed to split clone fs[id:{clone_fs_id}]"
self.result_parse(err_msg, res)
@@ -365,10 +394,7 @@ class StorageInf(object):
:param vstore_id: 租户ID
:return:
"""
- data = {
- 'ID': snapshot_id,
- 'vstoreId': vstore_id
- }
+ data = {"ID": snapshot_id, "vstoreId": vstore_id}
url = Constant.ROLLBACK_SNAPSHOT.format(deviceId=self.rest_client.device_id)
res = self.rest_client.normal_request(url, "put", data=data)
err_msg = f"Failed to rollback snapshot, snapshot_id:[%s]" % snapshot_id
@@ -381,8 +407,9 @@ class StorageInf(object):
:param snapshot_id: 快照ID
:return:
"""
- url = (Constant.CREATE_FSSNAPSHOT + "/{id}").format(deviceId=self.rest_client.device_id,
- id=snapshot_id)
+ url = (Constant.CREATE_FSSNAPSHOT + "/{id}").format(
+ deviceId=self.rest_client.device_id, id=snapshot_id
+ )
res = self.rest_client.normal_request(url, "get")
err_msg = f"Failed to query snapshot info, snapshot_id:[%s]" % snapshot_id
rsp_data = self.result_parse(err_msg, res)
@@ -394,8 +421,9 @@ class StorageInf(object):
:param snapshot_id: 快照ID
:return:
"""
- url = (Constant.CREATE_FSSNAPSHOT + "/{id}").format(deviceId=self.rest_client.device_id,
- id=snapshot_id)
+ url = (Constant.CREATE_FSSNAPSHOT + "/{id}").format(
+ deviceId=self.rest_client.device_id, id=snapshot_id
+ )
res = self.rest_client.normal_request(url, "delete")
err_msg = f"Failed to delete snapshot, snapshot_id:[%s]" % snapshot_id
rsp_data = self.result_parse(err_msg, res)
@@ -431,10 +459,9 @@ class StorageInf(object):
"NAME": 客户端IP或主机名或网络组名。192.168.0.10或192.168.0.0/24或*,“*”表示全部客户端IP地址
:return: nfs client id
"""
- url = Constant.NFS_SHARE_ADD_CLIENT.\
- format(deviceId=self.rest_client.device_id)
+ url = Constant.NFS_SHARE_ADD_CLIENT.format(deviceId=self.rest_client.device_id)
res = self.rest_client.normal_request(url, "post", data=data)
- err_msg = f'Failed to create nfs share, data:{data}'
+ err_msg = f"Failed to create nfs share, data:{data}"
rsp_data = self.result_parse(err_msg, res)
LOG.info("Add nfs client success, data:%s", data)
return rsp_data.get("ID")
@@ -457,11 +484,7 @@ class StorageInf(object):
"""
LOG.info("Begin to open nfs 4.0 and 4.1 configer of vstore[%s]", vstore_id)
url = Constant.NFS_SERVICE.format(deviceId=self.rest_client.device_id)
- data = {
- "vstoreId": vstore_id,
- "SUPPORTV4": True,
- "SUPPORTV41": True
- }
+ data = {"vstoreId": vstore_id, "SUPPORTV4": True, "SUPPORTV41": True}
res = self.rest_client.normal_request(url, "put", data=data)
err_msg = f"Failed to open vstore{vstore_id} nfs service"
self.result_parse(err_msg, res)
@@ -494,8 +517,9 @@ class StorageInf(object):
:return: None
"""
LOG.info("Begin to del nfs share, id[%s]", nfs_share_id)
- del_share_url = Constant.NFS_SHARE_DELETE.\
- format(deviceId=self.rest_client.device_id, id=nfs_share_id)
+ del_share_url = Constant.NFS_SHARE_DELETE.format(
+ deviceId=self.rest_client.device_id, id=nfs_share_id
+ )
url = del_share_url + f"?vstoreId={vstore_id}"
res = self.rest_client.normal_request(url, "delete")
err_msg = f"Failed to delete {nfs_share_id} nfs share"
@@ -509,8 +533,7 @@ class StorageInf(object):
:return:
"""
LOG.info("Begin to del fs by id[%s]", fs_id)
- url = Constant.DELETE_FS.\
- format(deviceId=self.rest_client.device_id, id=fs_id)
+ url = Constant.DELETE_FS.format(deviceId=self.rest_client.device_id, id=fs_id)
res = self.rest_client.normal_request(url, "delete")
err_msg = f"Failed to delete {fs_id} fs"
self.result_parse(err_msg, res)
@@ -526,12 +549,14 @@ class StorageInf(object):
:return:
"""
LOG.info("Begin to delete cdp schedule by id[%s]", cdp_id)
- url = Constant.DELETE_FS_CDP_SCHEDULE.format(deviceId=self.rest_client.device_id)
+ url = Constant.DELETE_FS_CDP_SCHEDULE.format(
+ deviceId=self.rest_client.device_id
+ )
data = {
- "ID": fs_id,
- "TIMINGSNAPSHOTSCHEDULEID": cdp_id,
- "scheduleName": cdp_name,
- "vstoreId": vstore_id
+ "ID": fs_id,
+ "TIMINGSNAPSHOTSCHEDULEID": cdp_id,
+ "scheduleName": cdp_name,
+ "vstoreId": vstore_id,
}
res = self.rest_client.normal_request(url, "delete", data=data)
err_msg = f"Failed to delete {cdp_id} cdp schedule"
diff --git a/pkg/deploy/action/logicrep/logicrep_ctl.py b/pkg/deploy/action/logicrep/logicrep_ctl.py
index ad1c5b45c22d73623d07040d6d331f87aeb90848..eeed8a40183a981026eca35d413719a78a6bdc98 100644
--- a/pkg/deploy/action/logicrep/logicrep_ctl.py
+++ b/pkg/deploy/action/logicrep/logicrep_ctl.py
@@ -10,13 +10,24 @@ import glob
from get_config_info import get_value
from logging import handlers
-sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
- "inspection", "inspection_scripts", "ct_om"))
-
-sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "dbstor"))
+sys.path.append(
+ os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "..",
+ "inspection",
+ "inspection_scripts",
+ "ct_om",
+ )
+)
+
+sys.path.append(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "dbstor")
+)
from kmc_adapter import CApiWrapper
-sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "cantian"))
+sys.path.append(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "cantian")
+)
from Common import DefaultValue
CMD_CREATE_LREP = f"CREATE USER LREP IDENTIFIED BY '%s'; \
@@ -63,7 +74,7 @@ CMD_CHECK_OPEN = "SELECT LREP_MODE FROM SYS.DV_DATABASE;"
CMD_CHECK_ACTIVE = "SELECT * FROM DV_LRPL_DETAIL;"
DV_LRPL_DETAIL = "select * from DV_LRPL_DETAIL;"
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
-CTSQL_INI_PATH = r'/mnt/dbdata/local/cantian/tmp/data/cfg/*sql.ini'
+CTSQL_INI_PATH = r"/mnt/dbdata/local/cantian/tmp/data/cfg/*sql.ini"
PRIMARY_KEYSTORE = r"/opt/cantian/common/config/primary_keystore_bak.ks"
STANDBY_KEYSTORE = r"/opt/cantian/common/config/standby_keystore_bak.ks"
CANTIAN_CONFIG = os.path.join(CURRENT_PATH, "..", "..", "config", "deploy_param.json")
@@ -81,9 +92,7 @@ def setup():
for handler in list(log.handlers):
log.removeHandler(handler)
- file_log = handlers.RotatingFileHandler(
- LOG_FILE, maxBytes=6291456,
- backupCount=5)
+ file_log = handlers.RotatingFileHandler(LOG_FILE, maxBytes=6291456, backupCount=5)
log.addHandler(file_log)
log.addHandler(console)
@@ -91,8 +100,10 @@ def setup():
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s]"
- " [tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S"))
+ " [tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ )
+ )
log.setLevel(logging.INFO)
return log
@@ -101,12 +112,12 @@ LOG = setup()
def file_reader(file_path):
- with open(file_path, 'r', encoding="utf-8") as file:
+ with open(file_path, "r", encoding="utf-8") as file:
return file.read()
def file_lines_reader(file_path):
- with open(file_path, 'r', encoding="utf-8") as file:
+ with open(file_path, "r", encoding="utf-8") as file:
return file.readlines()
@@ -138,7 +149,9 @@ class Logicrep:
self.mode = mode
@staticmethod
- def kmc_resovle_password(mode, plain_text, key1=PRIMARY_KEYSTORE, key2=STANDBY_KEYSTORE):
+ def kmc_resovle_password(
+ mode, plain_text, key1=PRIMARY_KEYSTORE, key2=STANDBY_KEYSTORE
+ ):
kmc_adapter = CApiWrapper(primary_keystore=key1, standby_keystore=key2)
kmc_adapter.initialize()
try:
@@ -147,11 +160,17 @@ class Logicrep:
if mode == "decrypted":
ret_pwd = kmc_adapter.decrypt(plain_text)
except Exception as error:
- raise Exception("Failed to %s password. output: %s" % (mode, error)) from error
-
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/software/tools/logicrep/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ raise Exception(
+ "Failed to %s password. output: %s" % (mode, error)
+ ) from error
+
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/software/tools/logicrep/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
kmc_adapter.finalize()
return ret_pwd
@@ -170,12 +189,16 @@ class Logicrep:
try:
import cantian_om_logicrep_check
except Exception as error:
- raise Exception(f"import inspection failed. Error info : {str(error)}") from error
+ raise Exception(
+ f"import inspection failed. Error info : {str(error)}"
+ ) from error
far = cantian_om_logicrep_check.LogicrepChecker()
result = far.get_format_output()
- if result['error']['code'] == -1:
- raise Exception(f"check failed.Error info : {result['error']['description']}")
+ if result["error"]["code"] == -1:
+ raise Exception(
+ f"check failed.Error info : {result['error']['description']}"
+ )
LOG.info("logicrep check success")
# 获取用户信息
@@ -189,7 +212,7 @@ class Logicrep:
self.deploy_mode = "nas" if info.get("deploy_mode") == "file" else "dbstore"
ctsql_file = glob.glob(CTSQL_INI_PATH)[0]
ctsql_ini_data = file_reader(ctsql_file)
- encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find('=') + 1:].strip()
+ encrypt_pwd = ctsql_ini_data[ctsql_ini_data.find("=") + 1 :].strip()
self.passwd = self.kmc_resovle_password("decrypted", encrypt_pwd)
else:
info_list = file_lines_reader(self.conf_file)
@@ -197,10 +220,12 @@ class Logicrep:
if "ds.username=" in line:
self.logicrep_user = line[12:].strip()
if "ds.passwd=" in line:
- self.passwd = self.kmc_resovle_password("decrypted",
- line[10:].strip(),
- key1=self.key1_file,
- key2=self.key2_file)
+ self.passwd = self.kmc_resovle_password(
+ "decrypted",
+ line[10:].strip(),
+ key1=self.key1_file,
+ key2=self.key2_file,
+ )
break
if not self.passwd:
raise Exception("get password failed")
@@ -226,15 +251,17 @@ class Logicrep:
if "PRIMARY" in stdout_data:
LOG.info("Current mode is primary")
return True
- LOG.info("Current mode is standby, not allowed to log in to zsql to perform operations.")
+ LOG.info(
+ "Current mode is standby, not allowed to log in to zsql to perform operations."
+ )
return False
def execute(self, sql, message):
for i in range(RETRY_TIMES):
- cmd = "source ~/.bashrc && echo -e '%s' | ctsql sys@127.0.0.1:%s -q -c \"%s\"" % (
- self.passwd,
- self.lsnr_port,
- sql)
+ cmd = (
+ "source ~/.bashrc && echo -e '%s' | ctsql sys@127.0.0.1:%s -q -c \"%s\""
+ % (self.passwd, self.lsnr_port, sql)
+ )
return_code, stdout_data, stderr_data = DefaultValue.exec_popen(cmd)
output = "%s%s" % (str(stdout_data), str(stderr_data))
@@ -243,17 +270,22 @@ class Logicrep:
# 数据库尚未启动完全
if re.match(".*CT-00827.*", result) or re.match(".*CT-00601.*", result):
time.sleep(30)
- LOG.info("Try to reconnect to the database, attempt:%s/%s", i + 1, RETRY_TIMES)
+ LOG.info(
+ "Try to reconnect to the database, attempt:%s/%s",
+ i + 1,
+ RETRY_TIMES,
+ )
continue
# 创建失败:改用户已经存在
if re.match(".*CT-00753.*", result):
- raise USEREXIST("%s already exist,please choose another name" % self.logicrep_user)
+ raise USEREXIST(
+ "%s already exist,please choose another name" % self.logicrep_user
+ )
if self.passwd in output:
output = "execute ctsql failed"
if return_code:
- raise Exception("Failed to %s by sql, output:%s"
- % (message, output))
+ raise Exception("Failed to %s by sql, output:%s" % (message, output))
# return code is 0, but output has error info, CT-xxx, ZS-xxx
if re.match(".*ZS-00001.*", result):
@@ -266,13 +298,24 @@ class Logicrep:
raise Exception("Execute sql timeout.")
def create_db_user(self):
- self.execute_sql(CMD_CREATE_LREP.replace("LREP", self.logicrep_user)
- % self.passwd, f"create {self.logicrep_user}")
- self.execute_sql(CMD_GRANT.replace("LREP", self.logicrep_user), f"create {self.logicrep_user}")
- self.execute_sql(CMD_CREATE_PROFILE.replace("LREP", self.logicrep_user), f"create {self.logicrep_user}")
+ self.execute_sql(
+ CMD_CREATE_LREP.replace("LREP", self.logicrep_user) % self.passwd,
+ f"create {self.logicrep_user}",
+ )
+ self.execute_sql(
+ CMD_GRANT.replace("LREP", self.logicrep_user),
+ f"create {self.logicrep_user}",
+ )
+ self.execute_sql(
+ CMD_CREATE_PROFILE.replace("LREP", self.logicrep_user),
+ f"create {self.logicrep_user}",
+ )
def set_resource_limit_true(self):
- self.execute_sql(CMD_RESOURCE_LIMIT.replace("LREP", self.logicrep_user), "set resource limit >>>")
+ self.execute_sql(
+ CMD_RESOURCE_LIMIT.replace("LREP", self.logicrep_user),
+ "set resource limit >>>",
+ )
def update_init_properties(self):
if self.node_id == "1":
@@ -282,24 +325,27 @@ class Logicrep:
info_list = file_lines_reader(self.init_conf_file)
for i, line in enumerate(info_list):
if "binlog.path=" in line:
- info_list[i] = f"binlog.path=/mnt/dbdata/remote/archive_{self.storage_archive_fs}/\n"
+ info_list[i] = (
+ f"binlog.path=/mnt/dbdata/remote/archive_{self.storage_archive_fs}/\n"
+ )
if "archive.path=" in line:
- info_list[i] = f"archive.path=/mnt/dbdata/remote/archive_{self.storage_archive_fs}/\n"
+ info_list[i] = (
+ f"archive.path=/mnt/dbdata/remote/archive_{self.storage_archive_fs}/\n"
+ )
if "deploy.mode" in line:
info_list[i] = f"deploy.mode={self.deploy_mode}\n"
- with os.fdopen(os.open(self.init_conf_file, flags, modes), 'w') as fs:
+ with os.fdopen(os.open(self.init_conf_file, flags, modes), "w") as fs:
fs.writelines(info_list)
# 秘钥等信息写入
def write_key(self):
LOG.info("begin to write key")
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
split_env.append("/opt/software/tools/logicrep/lib")
- os.environ['LD_LIBRARY_PATH'] = ":".join(split_env)
- new_encrypt_pwd = self.kmc_resovle_password("encrypted",
- self.passwd,
- key1=self.key1_file,
- key2=self.key2_file)
+ os.environ["LD_LIBRARY_PATH"] = ":".join(split_env)
+ new_encrypt_pwd = self.kmc_resovle_password(
+ "encrypted", self.passwd, key1=self.key1_file, key2=self.key2_file
+ )
url = ""
for ip in self.cmsip:
url += f"@{ip}:{self.lsnr_port}"
@@ -314,7 +360,7 @@ class Logicrep:
break
flags = os.O_WRONLY | os.O_CREAT
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(self.conf_file, flags, modes), 'w') as fs:
+ with os.fdopen(os.open(self.conf_file, flags, modes), "w") as fs:
fs.writelines(info_list)
self.update_init_properties()
@@ -385,16 +431,29 @@ class Logicrep:
if self.status_check():
self.execute_sql(CMD_CLOSE, "stop logicrep")
else:
- LOG.error("turn logicrep switch off failed, because cantiand does not exist.")
+ LOG.error(
+ "turn logicrep switch off failed, because cantiand does not exist."
+ )
self.stop()
# 参数解析
def main():
ctl_parse = argparse.ArgumentParser()
- ctl_parse.add_argument("--act", type=str,
- choices=["install", "init_container", "start", "startup", "stop", "shutdown", "pre_upgrade",
- "set_resource_limit"])
+ ctl_parse.add_argument(
+ "--act",
+ type=str,
+ choices=[
+ "install",
+ "init_container",
+ "start",
+ "startup",
+ "stop",
+ "shutdown",
+ "pre_upgrade",
+ "set_resource_limit",
+ ],
+ )
ctl_parse.add_argument("--mode", required=False, dest="mode")
arg = ctl_parse.parse_args()
act = arg.act
@@ -408,7 +467,7 @@ def main():
"stop": logicrep.stop,
"shutdown": logicrep.shutdown,
"pre_upgrade": logicrep.pre_upgrade,
- "set_resource_limit": logicrep.set_resource_limit
+ "set_resource_limit": logicrep.set_resource_limit,
}
func_dict.get(act)()
diff --git a/pkg/deploy/action/modify_env.py b/pkg/deploy/action/modify_env.py
index 19e89853c1982cd160de3c19fee13420b26a581e..f73d72e4bf6ea95e8d25710dde0cab360a900752 100644
--- a/pkg/deploy/action/modify_env.py
+++ b/pkg/deploy/action/modify_env.py
@@ -4,27 +4,29 @@ import os
import pathlib
import re
import stat
-
-
+
+
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CUR_PATH, "../"))
-
+
ENV_FILE = str(pathlib.Path(os.path.join(CUR_PATH, "env.sh")))
-CANTIAN_CONFIG = str(pathlib.Path(os.path.join(CUR_PATH, "cantian/install_config.json")))
-
-
+CANTIAN_CONFIG = str(
+ pathlib.Path(os.path.join(CUR_PATH, "cantian/install_config.json"))
+)
+
+
def read_file(file_path):
- with open(file_path, 'r', encoding='utf8') as file_path:
+ with open(file_path, "r", encoding="utf8") as file_path:
return file_path.read()
-
-
+
+
def write_file(write_data, file_path):
modes = stat.S_IRWXU | stat.S_IROTH | stat.S_IRGRP
flag = os.O_RDWR | os.O_CREAT | os.O_TRUNC
- with os.fdopen(os.open(file_path, flag, modes), 'w') as file_path:
+ with os.fdopen(os.open(file_path, flag, modes), "w") as file_path:
file_path.write(write_data)
-
-
+
+
def modify_env():
data = read_file(ENV_FILE)
new_data = ""
@@ -38,16 +40,16 @@ def modify_env():
new_line = re.sub(pattern_logicrep, "", new_line)
new_data += new_line + "\n"
write_file(new_data, ENV_FILE)
-
-
+
+
def modify_cantian_config():
data = json.loads(read_file(CANTIAN_CONFIG))
if "USE_DBSTOR" in data.keys():
del data["USE_DBSTOR"]
data = json.dumps(data, indent=4)
write_file(data, CANTIAN_CONFIG)
-
-
+
+
if __name__ == "__main__":
modify_env()
- modify_cantian_config()
\ No newline at end of file
+ modify_cantian_config()
diff --git a/pkg/deploy/action/mysql/mysqlctl.py b/pkg/deploy/action/mysql/mysqlctl.py
index f9a34a173139847a31ab48326b222f2991a3912b..c670e0550ec7a32774c1ec2ae697b718c0dd8e6d 100644
--- a/pkg/deploy/action/mysql/mysqlctl.py
+++ b/pkg/deploy/action/mysql/mysqlctl.py
@@ -23,7 +23,9 @@ class MysqlCtl(object):
if os.path.exists(INFO_SRC):
with open(INFO_SRC, "r") as f:
content = f.read()
- version = re.findall(r"Cantian patch source ([0-9]+.[0-9]+.[0-9]+)", content)
+ version = re.findall(
+ r"Cantian patch source ([0-9]+.[0-9]+.[0-9]+)", content
+ )
if len(version) > 0:
return version[0]
return version
@@ -37,9 +39,11 @@ class MysqlCtl(object):
single_process = install_config.get("M_RUNING_MODE")
cantian_in_container = get_value("cantian_in_container")
mysql_metadata_in_cantian = get_value("mysql_metadata_in_cantian")
- if (cantian_in_container != "0"
- or not mysql_metadata_in_cantian
- or single_process == "cantiand_in_cluster"):
+ if (
+ cantian_in_container != "0"
+ or not mysql_metadata_in_cantian
+ or single_process == "cantiand_in_cluster"
+ ):
return
patch_version = self.get_patch_version()
if patch_version != TARGET_VERSION:
diff --git a/pkg/deploy/action/obtains_lsid.py b/pkg/deploy/action/obtains_lsid.py
index c3a258244a228eb3618caeb9c879b75f40ecf55e..367e2236784ac3bd987f62bd4abbd38dca6b2007 100644
--- a/pkg/deploy/action/obtains_lsid.py
+++ b/pkg/deploy/action/obtains_lsid.py
@@ -16,10 +16,7 @@ if os.path.exists(INSTALL_FILE):
_tmp = f.read()
info = json.loads(_tmp)
else:
- info = {
- "cluster_id": "1",
- "random_seed": "1"
- }
+ info = {"cluster_id": "1", "random_seed": "1"}
class LSIDGenerate(object):
@@ -39,22 +36,31 @@ class LSIDGenerate(object):
@staticmethod
def generate_random_seed():
cluster_name = info.get("cluster_name", "")
- hash_object = int(hashlib.sha256(cluster_name.encode('utf-8')).hexdigest(), 16)
+ hash_object = int(hashlib.sha256(cluster_name.encode("utf-8")).hexdigest(), 16)
random.seed(hash_object)
return random.randint(0, 255)
def generate_lsid(self):
# 返回lsid十六进制
- return int(str(bin(self.n_type))[2:].rjust(2, "0")
- + str(bin(3))[2:].rjust(2, "0")
- + str(bin(self.cluster_id))[2:].rjust(8, "0")
- + str(bin(self.random_seed))[2:].rjust(8, "0")
- + str(bin(self.process_id))[2:].rjust(4, "0")
- + str(bin(self.node_id))[2:].rjust(8, "0"), 2)
+ return int(
+ str(bin(self.n_type))[2:].rjust(2, "0")
+ + str(bin(3))[2:].rjust(2, "0")
+ + str(bin(self.cluster_id))[2:].rjust(8, "0")
+ + str(bin(self.random_seed))[2:].rjust(8, "0")
+ + str(bin(self.process_id))[2:].rjust(4, "0")
+ + str(bin(self.node_id))[2:].rjust(8, "0"),
+ 2,
+ )
def execute(self):
self.random_seed = self.generate_random_seed()
- process_uuid = self.generate_uuid(self.n_type, self.cluster_id, self.random_seed, self.process_id, self.node_id)
+ process_uuid = self.generate_uuid(
+ self.n_type,
+ self.cluster_id,
+ self.random_seed,
+ self.process_id,
+ self.node_id,
+ )
ls_id = self.generate_lsid()
return ls_id, process_uuid
diff --git a/pkg/deploy/action/om_log.py b/pkg/deploy/action/om_log.py
index db54c3e426605cf019eb82de3408bb5769f25142..087259846cd3da913144087c8041b7092ed3e6a0 100644
--- a/pkg/deploy/action/om_log.py
+++ b/pkg/deploy/action/om_log.py
@@ -23,14 +23,30 @@ def _get_log_file_path(project):
os.makedirs(logger_dir)
return os.path.join(logger_dir, "{}.log".format(project))
- return ''
+ return ""
SENSITIVE_STR = [
- 'Password', 'passWord', 'PASSWORD', 'password', 'Pswd',
- 'PSWD', 'pwd', 'signature', 'HmacSHA256', 'newPasswd',
- 'private', 'certfile', 'secret', 'token', 'Token', 'pswd',
- 'passwd', 'mysql -u', 'session', 'cookie'
+ "Password",
+ "passWord",
+ "PASSWORD",
+ "password",
+ "Pswd",
+ "PSWD",
+ "pwd",
+ "signature",
+ "HmacSHA256",
+ "newPasswd",
+ "private",
+ "certfile",
+ "secret",
+ "token",
+ "Token",
+ "pswd",
+ "passwd",
+ "mysql -u",
+ "session",
+ "cookie",
]
@@ -52,7 +68,7 @@ def setup(project_name, console_info=True):
set_info = logging.INFO if console_info else logging.ERROR
console = logging.StreamHandler()
console.setLevel(set_info)
- console_formatter = logging.Formatter('[%(levelname)s ] %(message)s')
+ console_formatter = logging.Formatter("[%(levelname)s ] %(message)s")
console.setFormatter(console_formatter)
log_root = logging.getLogger(project_name)
@@ -61,12 +77,16 @@ def setup(project_name, console_info=True):
log_path = _get_log_file_path(project_name)
file_log = handlers.RotatingFileHandler(
- log_path, maxBytes=log_config.get("log_file_max_size"),
- backupCount=log_config.get("log_file_backup_count"))
+ log_path,
+ maxBytes=log_config.get("log_file_max_size"),
+ backupCount=log_config.get("log_file_backup_count"),
+ )
file_log.setFormatter(
logging.Formatter(
fmt=log_config.get("logging_context_format_string"),
- datefmt=log_config.get("log_date_format")))
+ datefmt=log_config.get("log_date_format"),
+ )
+ )
log_root.addHandler(file_log)
log_root.addHandler(console)
log_root.addFilter(DefaultLogFilter())
diff --git a/pkg/deploy/action/om_log_config.py b/pkg/deploy/action/om_log_config.py
index 0537fdc502e292668bf3b053e556220551220ab2..fe8323885b4c10c8f4d7718d5ad5d7aeeb110fd5 100644
--- a/pkg/deploy/action/om_log_config.py
+++ b/pkg/deploy/action/om_log_config.py
@@ -7,9 +7,9 @@ CONSOLE_CONF = {
"log_file_backup_count": 5,
"log_date_format": "%Y-%m-%d %H:%M:%S",
"logging_default_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
"logging_context_format_string": "%(asctime)s %(levelname)s [pid:%(process)d] [%(threadName)s] "
- "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s"
+ "[tid:%(thread)d] [%(filename)s:%(lineno)d %(funcName)s] %(message)s",
}
}
diff --git a/pkg/deploy/action/pre_install.py b/pkg/deploy/action/pre_install.py
index fbf70c922ef5a121ba9f7ac679a6f06251dcdd68..4207c399b183c26ceba7383c4abb6ebab924403b 100644
--- a/pkg/deploy/action/pre_install.py
+++ b/pkg/deploy/action/pre_install.py
@@ -19,44 +19,54 @@ NEEDED_MEM_SIZE = 16 * 1024 # M
dir_name, _ = os.path.split(os.path.abspath(__file__))
PKG_DIR = os.path.abspath(os.path.join(dir_name, ".."))
-CANTIAN_MEM_SPEC_FILE = os.path.join(dir_name, "config", "container_conf", "init_conf", "mem_spec")
+CANTIAN_MEM_SPEC_FILE = os.path.join(
+ dir_name, "config", "container_conf", "init_conf", "mem_spec"
+)
CANTIAND_INI_FILE = "/mnt/dbdata/local/cantian/tmp/data/cfg/cantiand.ini"
SINGLE_DOUBLE_PROCESS_MAP = {
"0": "cantiand_in_cluster",
- "1": "cantiand_with_mysql_in_cluster"
+ "1": "cantiand_with_mysql_in_cluster",
}
-ip_check_element = {
- 'cantian_vlan_ip',
- 'storage_vlan_ip',
- 'cms_ip'
-}
+ip_check_element = {"cantian_vlan_ip", "storage_vlan_ip", "cms_ip"}
ping_check_element = {
- 'cantian_vlan_ip',
- 'storage_vlan_ip',
- 'cms_ip',
- 'share_logic_ip',
- 'archive_logic_ip',
- 'metadata_logic_ip',
- 'storage_logic_ip'
+ "cantian_vlan_ip",
+ "storage_vlan_ip",
+ "cms_ip",
+ "share_logic_ip",
+ "archive_logic_ip",
+ "metadata_logic_ip",
+ "storage_logic_ip",
}
kernel_element = {
- 'TEMP_BUFFER_SIZE',
- 'DATA_BUFFER_SIZE',
- 'SHARED_POOL_SIZE',
- 'LOG_BUFFER_SIZE',
- 'SESSIONS',
- 'VARIANT_MEMORY_AREA_SIZE',
- '_INDEX_BUFFER_SIZE'
+ "TEMP_BUFFER_SIZE",
+ "DATA_BUFFER_SIZE",
+ "SHARED_POOL_SIZE",
+ "LOG_BUFFER_SIZE",
+ "SESSIONS",
+ "VARIANT_MEMORY_AREA_SIZE",
+ "_INDEX_BUFFER_SIZE",
}
use_dbstor = ["dbstor", "combined"]
-UnitConversionInfo = collections.namedtuple('UnitConversionInfo', ['tmp_gb', 'tmp_mb', 'tmp_kb', 'key', 'value',
- 'sga_buff_size', 'temp_buffer_size',
- 'data_buffer_size', 'shared_pool_size',
- 'log_buffer_size'])
+UnitConversionInfo = collections.namedtuple(
+ "UnitConversionInfo",
+ [
+ "tmp_gb",
+ "tmp_mb",
+ "tmp_kb",
+ "key",
+ "value",
+ "sga_buff_size",
+ "temp_buffer_size",
+ "data_buffer_size",
+ "shared_pool_size",
+ "log_buffer_size",
+ ],
+)
+
class ConfigChecker:
"""
@@ -66,7 +76,7 @@ class ConfigChecker:
@staticmethod
def node_id(value):
- node_id_enum = {'0', '1'}
+ node_id_enum = {"0", "1"}
if value not in node_id_enum:
return False
@@ -74,7 +84,7 @@ class ConfigChecker:
@staticmethod
def install_type(value):
- install_type_enum = {'override', 'reserve'}
+ install_type_enum = {"override", "reserve"}
if value not in install_type_enum:
return False
@@ -82,7 +92,7 @@ class ConfigChecker:
@staticmethod
def link_type(value):
- link_type_enum = {'1', '0', '2'} # 1为rdma 0为tcp 2为rdma 1823
+ link_type_enum = {"1", "0", "2"} # 1为rdma 0为tcp 2为rdma 1823
if value not in link_type_enum:
return False
@@ -90,7 +100,7 @@ class ConfigChecker:
@staticmethod
def db_type(value):
- db_type_enum = {'0', '1', '2'}
+ db_type_enum = {"0", "1", "2"}
if value not in db_type_enum:
return False
@@ -98,7 +108,7 @@ class ConfigChecker:
@staticmethod
def mysql_in_container(value):
- mysql_in_container_enum = {'0', '1'}
+ mysql_in_container_enum = {"0", "1"}
if value not in mysql_in_container_enum:
return False
@@ -119,13 +129,13 @@ class ConfigChecker:
return False
return True
-
+
@staticmethod
def cantian_in_container(value):
- cantian_in_container_enum = {'0', '1', '2'}
+ cantian_in_container_enum = {"0", "1", "2"}
if value not in cantian_in_container_enum:
return False
-
+
return True
@staticmethod
@@ -133,11 +143,11 @@ class ConfigChecker:
try:
value = int(value)
except Exception as error:
- LOG.error('cluster id type must be int : %s', str(error))
+ LOG.error("cluster id type must be int : %s", str(error))
return False
if value < 0 or value > 255:
- LOG.error('cluster id cannot be less than 0 or more than 255')
+ LOG.error("cluster id cannot be less than 0 or more than 255")
return False
return True
@@ -145,7 +155,7 @@ class ConfigChecker:
@staticmethod
def cluster_name(value):
if len(value) > 64 or not value:
- LOG.error('cluster name cannot be more than 64 or less than 1 in length')
+ LOG.error("cluster name cannot be more than 64 or less than 1 in length")
return False
return True
@@ -154,7 +164,7 @@ class ConfigChecker:
if value not in ["UC", "TCP", "UC_RDMA"]:
return False
return True
-
+
@staticmethod
def mes_ssl_switch(value):
if not isinstance(value, bool):
@@ -174,10 +184,10 @@ class ConfigChecker:
if int(value) <= 0:
return False
except Exception as error:
- LOG.error('redo_num type must be int : %s', str(error))
+ LOG.error("redo_num type must be int : %s", str(error))
return False
if int(value) < 3 or int(value) > 256:
- LOG.error('redo_num cannot be less than 3 or more than 256')
+ LOG.error("redo_num cannot be less than 3 or more than 256")
return False
return True
@@ -190,7 +200,7 @@ class ConfigChecker:
if int(int_value) <= 0:
return False
except Exception as error:
- LOG.error('redo_size type must be int : %s', str(error))
+ LOG.error("redo_size type must be int : %s", str(error))
return False
return True
@@ -220,7 +230,7 @@ class ConfigChecker:
value = int(value)
except Exception as error:
- LOG.error('dbstor_fs_vstore id type must be int : %s', str(error))
+ LOG.error("dbstor_fs_vstore id type must be int : %s", str(error))
return False
return True
@@ -250,8 +260,10 @@ class CheckBase(metaclass=abc.ABCMeta):
class CheckMem(CheckBase):
def __init__(self):
- super().__init__('memory available size smaller than {}M'.format(NEEDED_MEM_SIZE),
- 'current memory size {}M'.format(self.get_mem_available()))
+ super().__init__(
+ "memory available size smaller than {}M".format(NEEDED_MEM_SIZE),
+ "current memory size {}M".format(self.get_mem_available()),
+ )
@staticmethod
def get_mem_available():
@@ -260,15 +272,15 @@ class CheckMem(CheckBase):
return:单位M
"""
res = 0
- with open('/proc/meminfo') as file_path:
+ with open("/proc/meminfo") as file_path:
for line in file_path.readlines():
if "MemFree:" in line:
- mem_free = line.split(':')[1].strip()
+ mem_free = line.split(":")[1].strip()
mem_free = mem_free.split(" ")[0]
res += int(mem_free) // 1024
if "MemAvailable" in line:
- mem_avail = line.split(':')[1].strip()
+ mem_avail = line.split(":")[1].strip()
mem_avail = mem_avail.split(" ")[0]
res += int(mem_avail) // 1024
@@ -280,8 +292,10 @@ class CheckMem(CheckBase):
class CheckDisk(CheckBase):
def __init__(self):
- super().__init__('disk capacity available size smaller than {}M'.format(NEEDED_SIZE),
- 'current disk capacity {}M'.format(self.get_disk_available()))
+ super().__init__(
+ "disk capacity available size smaller than {}M".format(NEEDED_SIZE),
+ "current disk capacity {}M".format(self.get_disk_available()),
+ )
@staticmethod
def find_dir_path():
@@ -319,26 +333,64 @@ class CheckInstallPath(CheckBase):
class CheckInstallConfig(CheckBase):
def __init__(self, config_path=None):
- super().__init__("check config param", 'please check params in json file {}'.format(config_path))
+ super().__init__(
+ "check config param",
+ "please check params in json file {}".format(config_path),
+ )
self.config_path = config_path
self.value_checker = ConfigChecker
self.config_key = {
- 'deploy_user', 'node_id', 'cms_ip', 'storage_dbstor_fs', 'storage_share_fs', 'storage_archive_fs',
- 'storage_metadata_fs', 'share_logic_ip', 'archive_logic_ip', 'metadata_logic_ip', 'db_type',
- 'MAX_ARCH_FILES_SIZE', 'mysql_in_container', 'mysql_metadata_in_cantian', 'storage_logic_ip', 'deploy_mode',
- 'mes_ssl_switch', 'cantian_in_container', 'deploy_policy', 'link_type', 'ca_path', 'crt_path', 'key_path'
+ "deploy_user",
+ "node_id",
+ "cms_ip",
+ "storage_dbstor_fs",
+ "storage_share_fs",
+ "storage_archive_fs",
+ "storage_metadata_fs",
+ "share_logic_ip",
+ "archive_logic_ip",
+ "metadata_logic_ip",
+ "db_type",
+ "MAX_ARCH_FILES_SIZE",
+ "mysql_in_container",
+ "mysql_metadata_in_cantian",
+ "storage_logic_ip",
+ "deploy_mode",
+ "mes_ssl_switch",
+ "cantian_in_container",
+ "deploy_policy",
+ "link_type",
+ "ca_path",
+ "crt_path",
+ "key_path",
}
self.dss_config_key = {
- 'deploy_user', 'node_id', 'cms_ip', 'db_type', 'cantian_in_container',
- 'MAX_ARCH_FILES_SIZE', 'mysql_in_container', 'mysql_metadata_in_cantian',
- 'deploy_mode', 'mes_ssl_switch', "redo_num", "redo_size"}
- self.dbstor_config_key = {
- 'cluster_name', 'cantian_vlan_ip', 'storage_vlan_ip', 'link_type', 'storage_dbstor_page_fs',
- 'kerberos_key', 'cluster_id', 'mes_type', "vstore_id", "dbstor_fs_vstore_id"
+ "deploy_user",
+ "node_id",
+ "cms_ip",
+ "db_type",
+ "cantian_in_container",
+ "MAX_ARCH_FILES_SIZE",
+ "mysql_in_container",
+ "mysql_metadata_in_cantian",
+ "deploy_mode",
+ "mes_ssl_switch",
+ "redo_num",
+ "redo_size",
}
- self.file_config_key = {
- "redo_num", "redo_size"
+ self.dbstor_config_key = {
+ "cluster_name",
+ "cantian_vlan_ip",
+ "storage_vlan_ip",
+ "link_type",
+ "storage_dbstor_page_fs",
+ "kerberos_key",
+ "cluster_id",
+ "mes_type",
+ "vstore_id",
+ "dbstor_fs_vstore_id",
}
+ self.file_config_key = {"redo_num", "redo_size"}
self.mes_type_key = {"ca_path", "crt_path", "key_path"}
self.config_params = {}
self.cluster_name = None
@@ -356,7 +408,7 @@ class CheckInstallConfig(CheckBase):
socket.inet_aton(_ip)
except socket.error:
return False
- return _ip.count('.') == 3
+ return _ip.count(".") == 3
except socket.error:
return False
return True
@@ -378,16 +430,30 @@ class CheckInstallConfig(CheckBase):
process_list = []
for index, cmd in enumerate(cmd_list):
if index == 0:
- _p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
+ _p = subprocess.Popen(
+ shlex.split(cmd),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
else:
- _p = subprocess.Popen(shlex.split(cmd), stdin=process_list[index - 1].stdout,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
+ _p = subprocess.Popen(
+ shlex.split(cmd),
+ stdin=process_list[index - 1].stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
process_list.append(_p)
try:
stdout, stderr = process_list[-1].communicate(timeout=30)
except Exception as err:
return -1, str(err), -1
- return stdout.decode().strip("\n"), stderr.decode().strip("\n"), process_list[-1].returncode
+ return (
+ stdout.decode().strip("\n"),
+ stderr.decode().strip("\n"),
+ process_list[-1].returncode,
+ )
@staticmethod
def check_cantian_mem_spec():
@@ -395,8 +461,11 @@ class CheckInstallConfig(CheckBase):
with open(CANTIAN_MEM_SPEC_FILE, encoding="utf-8") as f:
mem_spec = json.load(f)
if mem_spec not in ["0", "1", "2", "3"]:
- LOG.error("Check mem spec failed, current value[%s], " \
- "value range [\"0\", \"1\", \"2\", \"3\"]", mem_spec)
+ LOG.error(
+ "Check mem spec failed, current value[%s], "
+ 'value range ["0", "1", "2", "3"]',
+ mem_spec,
+ )
return False
return True
@@ -421,19 +490,23 @@ class CheckInstallConfig(CheckBase):
return False
scale = 0
for i in range(len(cantian_vlan_ips)):
- scale += len(cantian_vlan_ips[i].split(";")) * len(storage_vlan_ips[i].split(";"))
+ scale += len(cantian_vlan_ips[i].split(";")) * len(
+ storage_vlan_ips[i].split(";")
+ )
if scale > 32:
- LOG.error("cantian_vlan_ip and storage_vlan_ip scale should be less than 32.")
+ LOG.error(
+ "cantian_vlan_ip and storage_vlan_ip scale should be less than 32."
+ )
return False
return True
def read_install_config(self):
try:
- with open(self.config_path, 'r', encoding='utf8') as file_path:
+ with open(self.config_path, "r", encoding="utf8") as file_path:
json_data = json.load(file_path)
return json_data
except Exception as error:
- LOG.error('load %s error, error: %s', self.config_path, str(error))
+ LOG.error("load %s error, error: %s", self.config_path, str(error))
return {}
@@ -443,15 +516,30 @@ class CheckInstallConfig(CheckBase):
# 如果 config_key中存在的关键字install_config.json中没有,报错。
for element in not_in_either:
# 去nas忽略部分参数
- dbstor_ignore_params = {"storage_metadata_fs", "share_logic_ip", "archive_logic_ip", "metadata_logic_ip",
- "vstore_id", "kerberos_key", "ca_path", "crt_path", "key_path"}
+ dbstor_ignore_params = {
+ "storage_metadata_fs",
+ "share_logic_ip",
+ "archive_logic_ip",
+ "metadata_logic_ip",
+ "vstore_id",
+ "kerberos_key",
+ "ca_path",
+ "crt_path",
+ "key_path",
+ }
combined_ignore_params = {"share_logic_ip", "vstore_id"}
- if element in dbstor_ignore_params and install_config['deploy_mode'] == "dbstor":
+ if (
+ element in dbstor_ignore_params
+ and install_config["deploy_mode"] == "dbstor"
+ ):
continue
- if element in combined_ignore_params and install_config['deploy_mode'] == "combined":
+ if (
+ element in combined_ignore_params
+ and install_config["deploy_mode"] == "combined"
+ ):
continue
if element not in install_config_keys:
- LOG.error('config_params.json need param %s', element)
+ LOG.error("config_params.json need param %s", element)
return False
return True
@@ -491,30 +579,50 @@ class CheckInstallConfig(CheckBase):
def write_result_to_json(self):
modes = stat.S_IRWXU | stat.S_IROTH | stat.S_IRGRP
flag = os.O_RDWR | os.O_CREAT | os.O_TRUNC
- with os.fdopen(os.open(str(Path('{}/deploy_param.json'.format(dir_name))), flag, modes), 'w') as file_path:
+ with os.fdopen(
+ os.open(str(Path("{}/deploy_param.json".format(dir_name))), flag, modes),
+ "w",
+ ) as file_path:
config_params = json.dumps(self.config_params, indent=4)
file_path.write(config_params)
def update_config_params(self):
# 使用域名部署场景,share_logic_ip、archive_logic_ip、metadata_logic_ip为空时需要更新字段为cluster_name
- if self.config_params.get("share_logic_ip") == "" and \
- self.config_params.get("archive_logic_ip") == "" and \
- self.config_params.get("metadata_logic_ip") == "":
- self.config_params["share_logic_ip"] = self.config_params.get("cluster_name")
- self.config_params["archive_logic_ip"] = self.config_params.get("cluster_name")
- self.config_params["metadata_logic_ip"] = self.config_params.get("cluster_name")
+ if (
+ self.config_params.get("share_logic_ip") == ""
+ and self.config_params.get("archive_logic_ip") == ""
+ and self.config_params.get("metadata_logic_ip") == ""
+ ):
+ self.config_params["share_logic_ip"] = self.config_params.get(
+ "cluster_name"
+ )
+ self.config_params["archive_logic_ip"] = self.config_params.get(
+ "cluster_name"
+ )
+ self.config_params["metadata_logic_ip"] = self.config_params.get(
+ "cluster_name"
+ )
modes = stat.S_IRWXU | stat.S_IROTH | stat.S_IRGRP
flag = os.O_RDWR | os.O_CREAT | os.O_TRUNC
config_params = json.dumps(self.config_params, indent=4)
- with os.fdopen(os.open(self.config_path, flag, modes), 'w') as file_path:
+ with os.fdopen(os.open(self.config_path, flag, modes), "w") as file_path:
file_path.write(config_params)
def do_unit_conversion(self, get_unit_conversion_info):
- tmp_gb, tmp_mb, tmb_kb, key, value,\
- sga_buff_size, temp_buffer_size, data_buffer_size,\
- shared_pool_size, log_buffer_size = get_unit_conversion_info
- if value[0: -1].isdigit() and value[-1:] in ["G", "M", "K"]:
+ (
+ tmp_gb,
+ tmp_mb,
+ tmb_kb,
+ key,
+ value,
+ sga_buff_size,
+ temp_buffer_size,
+ data_buffer_size,
+ shared_pool_size,
+ log_buffer_size,
+ ) = get_unit_conversion_info
+ if value[0:-1].isdigit() and value[-1:] in ["G", "M", "K"]:
unit_map = {
"G": tmp_gb,
"M": tmp_mb,
@@ -522,7 +630,7 @@ class CheckInstallConfig(CheckBase):
}
size_unit = unit_map.get(value[-1:])
sga_buff_size += int(value[0:-1]) * size_unit
-
+
if key == "TEMP_BUFFER_SIZE":
sga_buff_size -= temp_buffer_size
if key == "DATA_BUFFER_SIZE":
@@ -534,7 +642,7 @@ class CheckInstallConfig(CheckBase):
if key == "SESSIONS":
buff_size_pre_session = 5.5 * tmp_gb / 1024
sga_buff_size += int(value) * buff_size_pre_session
-
+
return sga_buff_size
def check_sga_buff_size(self):
@@ -548,37 +656,53 @@ class CheckInstallConfig(CheckBase):
shared_pool_size = 128 * tmp_mb
data_buffer_size = 128 * tmp_mb
temp_buffer_size = 32 * tmp_mb
- sga_buff_size = (log_buffer_size + shared_pool_size + data_buffer_size + temp_buffer_size)
+ sga_buff_size = (
+ log_buffer_size + shared_pool_size + data_buffer_size + temp_buffer_size
+ )
# parse the value of kernel parameters
modes = stat.S_IWUSR | stat.S_IRUSR
flags = os.O_RDONLY
- with os.fdopen(os.open(CANTIAND_INI_FILE, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(CANTIAND_INI_FILE, flags, modes), "r") as fp:
for line in fp:
if line == "\n":
continue
(key, value) = line.split(" = ")
if key in kernel_element:
# Unit consersion
- get_unit_conversion_info = UnitConversionInfo(tmp_gb, tmp_mb, tmp_kb, key, value.strip(),
- sga_buff_size, temp_buffer_size, data_buffer_size,
- shared_pool_size, log_buffer_size)
+ get_unit_conversion_info = UnitConversionInfo(
+ tmp_gb,
+ tmp_mb,
+ tmp_kb,
+ key,
+ value.strip(),
+ sga_buff_size,
+ temp_buffer_size,
+ data_buffer_size,
+ shared_pool_size,
+ log_buffer_size,
+ )
sga_buff_size = self.do_unit_conversion(get_unit_conversion_info)
-
+
# check sga buff size
cmd = "cat /proc/meminfo |grep -wE 'MemFree:|Buffers:|Cached:|SwapCached' |awk '{sum += $2};END {print sum}'"
ret_code, cur_avi_memory, stderr = exec_popen(cmd)
if ret_code:
- LOG.error("cannot get shmmax parameters, command: %s, err: %s" % (cmd, stderr))
+ LOG.error(
+ "cannot get shmmax parameters, command: %s, err: %s" % (cmd, stderr)
+ )
return False
if sga_buff_size < 114 * tmp_mb:
LOG.error("sga buffer size should not less than 114MB, please check it!")
return False
-
+
# memory for Mysql, share memory, Dbstor, and CMS
sga_buff_size += 12.2 * tmp_gb
if int(sga_buff_size) > int(cur_avi_memory) * tmp_kb:
- LOG.error("sga buffer size(%.2f GB) should less than availble memory(%.2f GB), please check it!" % (int(sga_buff_size) / tmp_gb, int(cur_avi_memory) / tmp_mb))
+ LOG.error(
+ "sga buffer size(%.2f GB) should less than availble memory(%.2f GB), please check it!"
+ % (int(sga_buff_size) / tmp_gb, int(cur_avi_memory) / tmp_mb)
+ )
return False
cmd = r"cat /proc/1/environ | tr '\0' '\n' | grep MY_MEMORY_SIZE | cut -d= -f2"
@@ -587,18 +711,26 @@ class CheckInstallConfig(CheckBase):
LOG.error("cannot get memory limit, command: %s, err: %s" % (cmd, stderr))
return False
if container_memory_limit and (int(container_memory_limit) / tmp_gb) < 28:
- LOG.error("container memory limit(%.2f GB) cannot be less than 28GB, please check it!" % (int(container_memory_limit) / tmp_gb))
+ LOG.error(
+ "container memory limit(%.2f GB) cannot be less than 28GB, please check it!"
+ % (int(container_memory_limit) / tmp_gb)
+ )
return False
if container_memory_limit and int(sga_buff_size) > int(container_memory_limit):
- LOG.error("sga buffer size(%.2f GB) should less than container memory limit(%.2f GB), please check it!" % (int(sga_buff_size) / tmp_gb, int(container_memory_limit) / tmp_gb))
+ LOG.error(
+ "sga buffer size(%.2f GB) should less than container memory limit(%.2f GB), please check it!"
+ % (int(sga_buff_size) / tmp_gb, int(container_memory_limit) / tmp_gb)
+ )
return False
-
+
LOG.info("End check sga buffer size")
return True
def get_result(self, *args, **kwargs):
if not self.config_path:
- LOG.error('path of config file is not entered, example: sh install.sh xxx/xxx/xxx')
+ LOG.error(
+ "path of config file is not entered, example: sh install.sh xxx/xxx/xxx"
+ )
return False
install_config_params = self.read_install_config()
@@ -613,30 +745,32 @@ class CheckInstallConfig(CheckBase):
self.cluster_name = install_config_params.get("cluster_name")
- if install_config_params['deploy_mode'] in use_dbstor:
+ if install_config_params["deploy_mode"] in use_dbstor:
self.config_key.remove("storage_logic_ip")
self.config_key.update(self.dbstor_config_key)
ping_check_element.remove("storage_logic_ip")
- if install_config_params['deploy_mode'] == "dbstor":
- if not install_config_params['mysql_metadata_in_cantian']:
- LOG.error('Failed to check. deploy_mode is dbstor, mysql_metadata_in_cantian must be true')
+ if install_config_params["deploy_mode"] == "dbstor":
+ if not install_config_params["mysql_metadata_in_cantian"]:
+ LOG.error(
+ "Failed to check. deploy_mode is dbstor, mysql_metadata_in_cantian must be true"
+ )
return False
ping_check_element.remove("share_logic_ip")
- install_config_params['share_logic_ip'] = "127.0.0.1"
+ install_config_params["share_logic_ip"] = "127.0.0.1"
# 去nas防止报错,后续版本删除
- install_config_params['archive_logic_ip'] = "127.0.0.1"
- install_config_params['metadata_logic_ip'] = "127.0.0.1"
+ install_config_params["archive_logic_ip"] = "127.0.0.1"
+ install_config_params["metadata_logic_ip"] = "127.0.0.1"
else:
- self.config_params['cluster_id'] = "0"
- self.config_params['mes_type'] = "TCP"
+ self.config_params["cluster_id"] = "0"
+ self.config_params["mes_type"] = "TCP"
self.config_key.update(self.file_config_key)
# 不开启归档时不检查归档连通性
if install_config_params.get("storage_archive_fs") == "":
ping_check_element.remove("archive_logic_ip")
- if install_config_params.get("cantian_in_container", "0") != '0':
- ip_check_element.remove('cms_ip')
+ if install_config_params.get("cantian_in_container", "0") != "0":
+ ip_check_element.remove("cms_ip")
ping_check_element.remove("cms_ip")
ip_check_element.remove("cantian_vlan_ip")
ping_check_element.remove("cantian_vlan_ip")
@@ -650,43 +784,56 @@ class CheckInstallConfig(CheckBase):
ping_check_element.remove("metadata_logic_ip")
ping_check_element.remove("storage_logic_ip")
- if (install_config_params.get("archive_logic_ip", "") == ""
- and install_config_params.get('share_logic_ip', "") == ""
- and install_config_params.get('metadata_logic_ip', "") == ""
- and install_config_params['deploy_mode'] in use_dbstor):
- install_config_params['archive_logic_ip'] = self.cluster_name
- install_config_params['share_logic_ip'] = self.cluster_name
- install_config_params['metadata_logic_ip'] = self.cluster_name
-
- max_arch_files_size = install_config_params.get('MAX_ARCH_FILES_SIZE', "")
+ if (
+ install_config_params.get("archive_logic_ip", "") == ""
+ and install_config_params.get("share_logic_ip", "") == ""
+ and install_config_params.get("metadata_logic_ip", "") == ""
+ and install_config_params["deploy_mode"] in use_dbstor
+ ):
+ install_config_params["archive_logic_ip"] = self.cluster_name
+ install_config_params["share_logic_ip"] = self.cluster_name
+ install_config_params["metadata_logic_ip"] = self.cluster_name
+
+ max_arch_files_size = install_config_params.get("MAX_ARCH_FILES_SIZE", "")
if not max_arch_files_size:
- install_config_params['MAX_ARCH_FILES_SIZE'] = '300G'
+ install_config_params["MAX_ARCH_FILES_SIZE"] = "300G"
if not self.check_install_config_params(install_config_params):
return False
- if (install_config_params['deploy_mode'] in use_dbstor and
- not self.check_storage_cantian_vlan_ip_scale(install_config_params)):
+ if install_config_params[
+ "deploy_mode"
+ ] in use_dbstor and not self.check_storage_cantian_vlan_ip_scale(
+ install_config_params
+ ):
return False
for key, value in install_config_params.items():
- if not install_config_params.get("mes_ssl_switch") and key in self.mes_type_key:
+ if (
+ not install_config_params.get("mes_ssl_switch")
+ and key in self.mes_type_key
+ ):
continue
if key in self.config_key:
checked_result = self.check_install_config_param(key, value)
if not checked_result:
- LOG.error('check %s with value: %s failed', str(key), str(value))
+ LOG.error("check %s with value: %s failed", str(key), str(value))
return False
self.config_params[key] = value
try:
self.update_config_params()
except Exception as error:
- LOG.error('write config param to config_param.json failed, error: %s', str(error))
+ LOG.error(
+ "write config param to config_param.json failed, error: %s", str(error)
+ )
return False
- if install_config_params.get("'cantian_in_container'", "0") == '0':
+ if install_config_params.get("'cantian_in_container'", "0") == "0":
try:
self.write_result_to_json()
except Exception as error:
- LOG.error('write config param to deploy_param.json failed, error: %s', str(error))
+ LOG.error(
+ "write config param to deploy_param.json failed, error: %s",
+ str(error),
+ )
return False
return True
@@ -702,51 +849,53 @@ class CheckInstallConfig(CheckBase):
return True
def install_config_params_init(self, install_config_params):
- if 'link_type' not in install_config_params.keys():
- install_config_params['link_type'] = '1'
- if 'storage_archive_fs' not in install_config_params.keys():
- install_config_params['storage_archive_fs'] = ''
- if 'archive_logic_ip' not in install_config_params.keys():
- install_config_params['archive_logic_ip'] = ''
- if 'mes_type' not in install_config_params.keys():
- install_config_params['mes_type'] = 'UC'
- if 'mes_ssl_switch' not in install_config_params.keys():
- install_config_params['mes_ssl_switch'] = False
- if 'deploy_mode' not in install_config_params.keys():
- install_config_params['deploy_mode'] = "combined"
- if 'dbstor_fs_vstore_id' not in install_config_params.keys():
- install_config_params['dbstor_fs_vstore_id'] = "0"
- if (install_config_params.get("mes_ssl_switch") and
- install_config_params.get("cantian_in_container", "-1") == "0"):
+ if "link_type" not in install_config_params.keys():
+ install_config_params["link_type"] = "1"
+ if "storage_archive_fs" not in install_config_params.keys():
+ install_config_params["storage_archive_fs"] = ""
+ if "archive_logic_ip" not in install_config_params.keys():
+ install_config_params["archive_logic_ip"] = ""
+ if "mes_type" not in install_config_params.keys():
+ install_config_params["mes_type"] = "UC"
+ if "mes_ssl_switch" not in install_config_params.keys():
+ install_config_params["mes_ssl_switch"] = False
+ if "deploy_mode" not in install_config_params.keys():
+ install_config_params["deploy_mode"] = "combined"
+ if "dbstor_fs_vstore_id" not in install_config_params.keys():
+ install_config_params["dbstor_fs_vstore_id"] = "0"
+ if (
+ install_config_params.get("mes_ssl_switch")
+ and install_config_params.get("cantian_in_container", "-1") == "0"
+ ):
self.config_key.update(self.mes_type_key)
- if 'db_type' not in install_config_params.keys():
- install_config_params['db_type'] = '0'
- if 'mysql_metadata_in_cantian' not in install_config_params.keys():
- install_config_params['mysql_metadata_in_cantian'] = True
- if 'cantian_in_container' not in install_config_params.keys():
- install_config_params['cantian_in_container'] = "0"
+ if "db_type" not in install_config_params.keys():
+ install_config_params["db_type"] = "0"
+ if "mysql_metadata_in_cantian" not in install_config_params.keys():
+ install_config_params["mysql_metadata_in_cantian"] = True
+ if "cantian_in_container" not in install_config_params.keys():
+ install_config_params["cantian_in_container"] = "0"
def parse_policy_config_file(self):
policy_path = os.path.join(dir_name, "deploy_policy_config.json")
try:
- with open(policy_path, 'r', encoding='utf8') as file_path:
+ with open(policy_path, "r", encoding="utf8") as file_path:
json_data = json.load(file_path)
return json_data
except Exception as error:
- LOG.error('load %s error, error: %s', policy_path, str(error))
+ LOG.error("load %s error, error: %s", policy_path, str(error))
return False
-
+
def parse_cantian_config_file(self):
cantian_config_path = os.path.join(dir_name, "cantian")
cantian_config_path = os.path.join(cantian_config_path, "install_config.json")
try:
- with open(cantian_config_path, 'r', encoding='utf8') as file_path:
+ with open(cantian_config_path, "r", encoding="utf8") as file_path:
json_data = json.load(file_path)
return json_data
except Exception as error:
- LOG.error('load %s error, error: %s', cantian_config_path, str(error))
+ LOG.error("load %s error, error: %s", cantian_config_path, str(error))
return False
-
+
def init_config_by_deploy_policy(self, install_config_params):
deploy_policy_json = self.parse_policy_config_file()
if deploy_policy_json is False:
@@ -765,16 +914,18 @@ class CheckInstallConfig(CheckBase):
# 如果未配置套餐参数,初始化套餐参数
install_config_params["deploy_policy"] = "default"
return True
- LOG.info("deploy policy is %s" % deploy_policy_key)
+ LOG.info("deploy policy is %s" % deploy_policy_key)
# 如果配置方案未配置则返回失败,安装结束
deploy_policy_value = deploy_policy_json.get(deploy_policy_key, {})
if deploy_policy_value == {}:
LOG.error("can not find the deploy policy(%s)" % deploy_policy_key)
return False
-
+
# 如果配置方案合法则将方案中的参数写入配置文件
is_single_process = deploy_policy_value.get("single_process", "0")
- if SINGLE_DOUBLE_PROCESS_MAP.get(is_single_process, "") != cantian_config_json.get("M_RUNING_MODE"):
+ if SINGLE_DOUBLE_PROCESS_MAP.get(
+ is_single_process, ""
+ ) != cantian_config_json.get("M_RUNING_MODE"):
LOG.error("The package type does not match the configuration parameters.")
return False
tmp_config = deploy_policy_value.get("config", {})
@@ -802,7 +953,11 @@ class PreInstall:
check_config = CheckInstallConfig()
res = check_config.sga_buffer_check()
if not res:
- LOG.error('failed: %s, suggestion: %s', check_config.check_name, check_config.suggestion)
+ LOG.error(
+ "failed: %s, suggestion: %s",
+ check_config.check_name,
+ check_config.suggestion,
+ )
return 1
return 0
@@ -827,19 +982,21 @@ class PreInstall:
check_result = False
if not check_result:
- LOG.error('failed: %s, suggestion: %s', item().check_name, item().suggestion)
+ LOG.error(
+ "failed: %s, suggestion: %s", item().check_name, item().suggestion
+ )
return 1
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
config_file = None
install_type = sys.argv[1]
- if install_type == 'sga_buffer_check':
+ if install_type == "sga_buffer_check":
exit(PreInstall.run_sga_buffer_check())
- elif install_type == 'override':
+ elif install_type == "override":
config_file = sys.argv[2]
pre_install = PreInstall(install_type, config_file)
- exit(pre_install.check_main())
\ No newline at end of file
+ exit(pre_install.check_main())
diff --git a/pkg/deploy/action/pre_upgrade.py b/pkg/deploy/action/pre_upgrade.py
index d1b2b717dee9d62b0394129336ba2db5387d2b59..f507ba066bbe24bd3d3680d4db6e6e6aea95ce60 100644
--- a/pkg/deploy/action/pre_upgrade.py
+++ b/pkg/deploy/action/pre_upgrade.py
@@ -7,26 +7,26 @@ from pre_install import CheckInstallConfig
from om_log import LOGGER as LOG
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
-NEW_CONFIG_PATH = str(Path(f'{CUR_PATH}/config_params.json'))
-SOURCE_CONFIG_PATH = str(Path('/opt/cantian/action/config_params.json'))
-NEW_FILE_CONFIG_PATH = str(Path(f'{CUR_PATH}/config_params_file.json'))
-SOURCE_FILE_CONFIG_PATH = str(Path('/opt/cantian/action/config_params_file.json'))
+NEW_CONFIG_PATH = str(Path(f"{CUR_PATH}/config_params.json"))
+SOURCE_CONFIG_PATH = str(Path("/opt/cantian/action/config_params.json"))
+NEW_FILE_CONFIG_PATH = str(Path(f"{CUR_PATH}/config_params_file.json"))
+SOURCE_FILE_CONFIG_PATH = str(Path("/opt/cantian/action/config_params_file.json"))
DEPLOY_CONFIG = str(Path("/opt/cantian/config/deploy_param.json"))
NEW_DEPLOY_CONFIG = str(Path(f"{CUR_PATH}/deploy_param.json"))
def read_install_config(config_path):
try:
- with open(config_path, 'r', encoding='utf8') as file_path:
+ with open(config_path, "r", encoding="utf8") as file_path:
json_data = json.load(file_path)
return json_data
except Exception as error:
- LOG.error('load %s error, error: %s', config_path, str(error))
+ LOG.error("load %s error, error: %s", config_path, str(error))
return {}
-if __name__ == '__main__':
+if __name__ == "__main__":
# 如果没有指定文件,检查升级包的配置文件中key与源配置文件key是否一致
deploy_config = read_install_config(DEPLOY_CONFIG)
deploy_mode = deploy_config.get("deploy_mode", "combined")
@@ -35,10 +35,16 @@ if __name__ == '__main__':
NEW_CONFIG_PATH = NEW_FILE_CONFIG_PATH
if os.path.exists(SOURCE_FILE_CONFIG_PATH):
SOURCE_CONFIG_PATH = SOURCE_FILE_CONFIG_PATH
- new_config_keys = read_install_config(NEW_CONFIG_PATH).keys() - {"install_type", "cantian_in_container",
- "auto_create_fs"}
- source_config_keys = read_install_config(SOURCE_CONFIG_PATH).keys() - {"install_type", "cantian_in_container",
- "auto_create_fs"}
+ new_config_keys = read_install_config(NEW_CONFIG_PATH).keys() - {
+ "install_type",
+ "cantian_in_container",
+ "auto_create_fs",
+ }
+ source_config_keys = read_install_config(SOURCE_CONFIG_PATH).keys() - {
+ "install_type",
+ "cantian_in_container",
+ "auto_create_fs",
+ }
keys_diff = new_config_keys ^ source_config_keys
if keys_diff:
LOG.error(f"config keys are different with difference: {keys_diff}")
@@ -55,7 +61,9 @@ if __name__ == '__main__':
mysql_metadata_in_cantian = deploy_config.get("mysql_metadata_in_cantian")
new_mysql_metadata_in_cantian = new_config.get("mysql_metadata_in_cantian")
if new_mysql_metadata_in_cantian != mysql_metadata_in_cantian:
- LOG.error("mysql_metadata_in_cantian is different from new config file, please check.")
+ LOG.error(
+ "mysql_metadata_in_cantian is different from new config file, please check."
+ )
sys.exit(1)
if res:
sys.exit(0)
diff --git a/pkg/deploy/action/rest_client.py b/pkg/deploy/action/rest_client.py
index ca4e51c6cb2f56f0f0c3a0834d521bc7eb7ff9f4..bc9578fee02201edba82c34e17086e6561f2f840 100644
--- a/pkg/deploy/action/rest_client.py
+++ b/pkg/deploy/action/rest_client.py
@@ -10,13 +10,17 @@ NORMAL_STATE, ABNORMAL_STATE = 0, 1
def get_cur_timestamp():
utc_now = datetime.utcnow()
- return utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None).strftime('%Y%m%d%H%M%S')
+ return (
+ utc_now.replace(tzinfo=timezone.utc)
+ .astimezone(tz=None)
+ .strftime("%Y%m%d%H%M%S")
+ )
class RestElemConstant:
- PORT = '8088'
- HTTPS = 'https://'
- LOGIN = '/deviceManager/rest/xxx/login'
+ PORT = "8088"
+ HTTPS = "https://"
+ LOGIN = "/deviceManager/rest/xxx/login"
class ExecutionError(Exception):
@@ -35,30 +39,33 @@ class RestClient:
self.session = None
self.res_login = None
- self.handler = {'create': self.create_snapshots,
- 'rollback': self.rollback_snapshots}
+ self.handler = {
+ "create": self.create_snapshots,
+ "rollback": self.rollback_snapshots,
+ }
@staticmethod
def gen_timestamp():
utc_now = datetime.utcnow()
cur_time = utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None)
- return str(cur_time.strftime('%Y%m%d%H%M%S'))
+ return str(cur_time.strftime("%Y%m%d%H%M%S"))
@staticmethod
def exception_handler(err_msg=None, cur_mode=None):
- err_info = '[current_mode] {}, [err_info] {}'.format(cur_mode, err_msg)
+ err_info = "[current_mode] {}, [err_info] {}".format(cur_mode, err_msg)
LOG.error(err_info)
raise ExecutionError(err_info)
@staticmethod
def response_parse(res_data):
status_code = res_data.status_code
- err_code, err_details = -1, 'failed'
+ err_code, err_details = -1, "failed"
if status_code == 200:
exec_res = res_data.json()
- err_code, err_details = \
- exec_res.get('error').get('code'), exec_res.get('error').get('description')
+ err_code, err_details = exec_res.get("error").get("code"), exec_res.get(
+ "error"
+ ).get("description")
return status_code, int(err_code), err_details
@@ -69,42 +76,59 @@ class RestClient:
return data
def update_cookies(self, res):
- res_body, set_cookie = res.json().get('data'), res.headers.get('Set-Cookie')
+ res_body, set_cookie = res.json().get("data"), res.headers.get("Set-Cookie")
- self.token, self.device_id = res_body.get('iBaseToken'), res_body.get('deviceid')
+ self.token, self.device_id = res_body.get("iBaseToken"), res_body.get(
+ "deviceid"
+ )
- match_res = re.findall(r'session=ismsession=\w+;', set_cookie)
+ match_res = re.findall(r"session=ismsession=\w+;", set_cookie)
if match_res:
self.ism_session = match_res[0][:-1]
- def make_header(self, content_type='application/json'):
- header = {'Content-type': content_type}
+ def make_header(self, content_type="application/json"):
+ header = {"Content-type": content_type}
if self.token:
- header['iBaseToken'] = self.token
+ header["iBaseToken"] = self.token
if self.ism_session:
- header['Cookie'] = self.ism_session
+ header["Cookie"] = self.ism_session
return header
def login(self, fs_name, keep_session=False):
- url = '{}{}:{}{}'.format(RestElemConstant.HTTPS, self.ip_addr, RestElemConstant.PORT, RestElemConstant.LOGIN)
- user_info = {'username': self.user_name,
- 'password': self.passwd,
- 'scope': 0,
- 'loginMode': 3,
- 'timeConversion': 0,
- 'isEncrypt': 'false'}
-
- login_header = {'Content-type': 'application/json',
- 'Cookie': '__LANGUAGE_KEY__=zh-CN; __IBASE_LANGUAGE_KEY__=zh-CN'}
+ url = "{}{}:{}{}".format(
+ RestElemConstant.HTTPS,
+ self.ip_addr,
+ RestElemConstant.PORT,
+ RestElemConstant.LOGIN,
+ )
+ user_info = {
+ "username": self.user_name,
+ "password": self.passwd,
+ "scope": 0,
+ "loginMode": 3,
+ "timeConversion": 0,
+ "isEncrypt": "false",
+ }
+
+ login_header = {
+ "Content-type": "application/json",
+ "Cookie": "__LANGUAGE_KEY__=zh-CN; __IBASE_LANGUAGE_KEY__=zh-CN",
+ }
requests.packages.urllib3.disable_warnings()
with requests.session() as session:
- res = session.post(url, data=json.dumps(user_info), headers=login_header, verify=False)
+ res = session.post(
+ url, data=json.dumps(user_info), headers=login_header, verify=False
+ )
status_code, err_code, err_details = self.response_parse(res)
if err_code:
- err_msg = ('Login {} failed before taking the snapshot of {}, status_code: {}, err_code: {}, '
- 'err_details: {}'.format(fs_name, self.ip_addr, status_code, err_code, err_details))
+ err_msg = (
+ "Login {} failed before taking the snapshot of {}, status_code: {}, err_code: {}, "
+ "err_details: {}".format(
+ fs_name, self.ip_addr, status_code, err_code, err_details
+ )
+ )
return err_code, err_msg
self.update_cookies(res)
@@ -115,7 +139,7 @@ class RestClient:
else:
res.close()
- return NORMAL_STATE, 'success'
+ return NORMAL_STATE, "success"
def normal_request(self, url, method, data=None, **kwargs):
requests.packages.urllib3.disable_warnings()
@@ -123,39 +147,51 @@ class RestClient:
if keep_session:
req = self.session
- self.token = self.res_login.get('data').get('ibasetoken')
+ self.token = self.res_login.get("data").get("ibasetoken")
else:
req = requests.session()
headers = self.make_header()
with req as session:
- if method == 'put':
- res = session.put(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'post':
- res = session.post(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'get':
- res = session.get(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'delete':
- res = session.delete(url, data=data, headers=headers, verify=False, timeout=timeout)
+ if method == "put":
+ res = session.put(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "post":
+ res = session.post(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "get":
+ res = session.get(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "delete":
+ res = session.delete(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
res.close()
return res
def get_file_system_id(self, fs_name):
- url = '{}{}:{}/deviceManager/rest/{}/filesystem?filter=NAME::{}'.format(RestElemConstant.HTTPS,
- self.ip_addr,
- RestElemConstant.PORT,
- str(self.device_id),
- fs_name)
- res = self.normal_request(url, 'get')
+ url = "{}{}:{}/deviceManager/rest/{}/filesystem?filter=NAME::{}".format(
+ RestElemConstant.HTTPS,
+ self.ip_addr,
+ RestElemConstant.PORT,
+ str(self.device_id),
+ fs_name,
+ )
+ res = self.normal_request(url, "get")
status_code, err_code, err_details = self.response_parse(res)
if err_code:
- err_msg = 'Get file system id of {} failed, status_code: {}, err_code: {}, ' \
- 'err_details: {}'.format(fs_name, status_code, err_code, err_details)
+ err_msg = (
+ "Get file system id of {} failed, status_code: {}, err_code: {}, "
+ "err_details: {}".format(fs_name, status_code, err_code, err_details)
+ )
return ABNORMAL_STATE, err_msg
- file_system_id = res.json().get('data')[0].get('ID')
+ file_system_id = res.json().get("data")[0].get("ID")
return NORMAL_STATE, file_system_id
def create_snapshots(self, fs_name):
@@ -164,25 +200,26 @@ class RestClient:
work_state, res_details = self.get_file_system_id(fs_name)
if work_state:
- return self.exception_handler(err_msg=res_details, cur_mode='create')
-
- url = '{}{}:{}/deviceManager/rest/{}/fssnapshot'.format(RestElemConstant.HTTPS,
- self.ip_addr,
- RestElemConstant.PORT,
- self.device_id)
- reg_version = '{}_{}'.format(fs_name, self.upgrade_version).replace('.', '_')
- snapshot_name = '{}_{}'.format(reg_version, self.gen_timestamp())
- data = {'NAME': snapshot_name,
- 'PARENTID': int(res_details),
- 'PARENTTYPE': 40}
- res = self.normal_request(url, 'post', data=json.dumps(data))
+ return self.exception_handler(err_msg=res_details, cur_mode="create")
+
+ url = "{}{}:{}/deviceManager/rest/{}/fssnapshot".format(
+ RestElemConstant.HTTPS, self.ip_addr, RestElemConstant.PORT, self.device_id
+ )
+ reg_version = "{}_{}".format(fs_name, self.upgrade_version).replace(".", "_")
+ snapshot_name = "{}_{}".format(reg_version, self.gen_timestamp())
+ data = {"NAME": snapshot_name, "PARENTID": int(res_details), "PARENTTYPE": 40}
+ res = self.normal_request(url, "post", data=json.dumps(data))
status_code, err_code, err_details = self.response_parse(res)
if err_code:
- err_msg = 'Take snapshot of {} failed, status_code: {}, ' \
- 'err_code: {}, err_details: {}'.format(fs_name, status_code, err_code, err_details)
- return self.exception_handler(err_msg=err_msg, cur_mode='create')
-
- snapshot_id = res.json().get('data').get('ID')
+ err_msg = (
+ "Take snapshot of {} failed, status_code: {}, "
+ "err_code: {}, err_details: {}".format(
+ fs_name, status_code, err_code, err_details
+ )
+ )
+ return self.exception_handler(err_msg=err_msg, cur_mode="create")
+
+ snapshot_id = res.json().get("data").get("ID")
self.processed_fs[fs_name] = snapshot_id
return NORMAL_STATE
@@ -191,17 +228,20 @@ class RestClient:
if not self.processed_fs.get(fs_name):
return NORMAL_STATE
- url = '{}{}:{}/deviceManager/rest/{}/fssnapshot/rollback_fssnapshot'.format(RestElemConstant.HTTPS,
- self.ip_addr,
- RestElemConstant.PORT,
- self.device_id)
- data = {'ID': self.processed_fs.get(fs_name)}
- res = self.normal_request(url, 'put', data=json.dumps(data))
+ url = "{}{}:{}/deviceManager/rest/{}/fssnapshot/rollback_fssnapshot".format(
+ RestElemConstant.HTTPS, self.ip_addr, RestElemConstant.PORT, self.device_id
+ )
+ data = {"ID": self.processed_fs.get(fs_name)}
+ res = self.normal_request(url, "put", data=json.dumps(data))
status_code, err_code, err_details = self.response_parse(res)
if err_code:
- err_msg = 'Rollback snapshot of {} failed, status_code: {}, ' \
- 'err_code: {}, err_details: {}'.format(fs_name, status_code, err_code, err_details)
- return self.exception_handler(err_msg=err_msg, cur_mode='rollback')
+ err_msg = (
+ "Rollback snapshot of {} failed, status_code: {}, "
+ "err_code: {}, err_details: {}".format(
+ fs_name, status_code, err_code, err_details
+ )
+ )
+ return self.exception_handler(err_msg=err_msg, cur_mode="rollback")
return NORMAL_STATE
@@ -220,23 +260,34 @@ class RestClient:
"""
if not self.processed_fs.get(fs_name):
return NORMAL_STATE, NORMAL_STATE
- url = '{}{}:{}/deviceManager/rest/{}/FSSNAPSHOT/query_fs_snapshot_rollback?PARENTNAME={}'\
- .format(RestElemConstant.HTTPS,
- self.ip_addr,
- RestElemConstant.PORT,
- self.device_id,
- fs_name)
-
- res = self.normal_request(url, 'get')
+ url = "{}{}:{}/deviceManager/rest/{}/FSSNAPSHOT/query_fs_snapshot_rollback?PARENTNAME={}".format(
+ RestElemConstant.HTTPS,
+ self.ip_addr,
+ RestElemConstant.PORT,
+ self.device_id,
+ fs_name,
+ )
+
+ res = self.normal_request(url, "get")
status_code, err_code, err_details = self.response_parse(res)
if err_code:
- err_msg = 'Query rollback snapshot process of {} failed, status_code: {}, ' \
- 'err_code: {}, err_details: {}'.format(fs_name, status_code, err_code, err_details)
- return self.exception_handler(err_msg=err_msg, cur_mode='rollback')
+ err_msg = (
+ "Query rollback snapshot process of {} failed, status_code: {}, "
+ "err_code: {}, err_details: {}".format(
+ fs_name, status_code, err_code, err_details
+ )
+ )
+ return self.exception_handler(err_msg=err_msg, cur_mode="rollback")
data = self.get_data(res)
- rollback_rate, rollback_status = data.get("rollbackRate"), data.get("rollbackStatus")
- LOG.info("Rollback snapshot process of %s details:rollback_rate[%s] rollback_status[%s]",
- fs_name, rollback_rate, rollback_status)
+ rollback_rate, rollback_status = data.get("rollbackRate"), data.get(
+ "rollbackStatus"
+ )
+ LOG.info(
+ "Rollback snapshot process of %s details:rollback_rate[%s] rollback_status[%s]",
+ fs_name,
+ rollback_rate,
+ rollback_status,
+ )
return rollback_rate, rollback_status
def execute(self, fs_name, mode):
@@ -246,6 +297,8 @@ class RestClient:
if not self.token:
work_state, res_details = self.login(fs_name)
if work_state:
- return self.exception_handler(err_msg=res_details, cur_mode='get login token')
+ return self.exception_handler(
+ err_msg=res_details, cur_mode="get login token"
+ )
return self.handler.get(mode)(fs_name)
diff --git a/pkg/deploy/action/storage_operate/create_file_system.py b/pkg/deploy/action/storage_operate/create_file_system.py
index aadfca6afff68e4a92e87507716d5dff23c27f4f..0d117f059d085dd558d2c82ce4182a035b8eb397 100644
--- a/pkg/deploy/action/storage_operate/create_file_system.py
+++ b/pkg/deploy/action/storage_operate/create_file_system.py
@@ -23,27 +23,32 @@ DEPLOY_PARAM_PATH = str(pathlib.Path(CUR_PATH, "../../config/deploy_param.json")
DEPLOY_PARAM = json.loads(read_helper(DEPLOY_PARAM_PATH))
DEPLOY_MODE = DEPLOY_PARAM.get("deploy_mode")
FS_TYPE_LIST = [
- "storage_dbstor_fs", "storage_dbstor_page_fs",
- "storage_share_fs", "storage_archive_fs",
- "storage_metadata_fs"
+ "storage_dbstor_fs",
+ "storage_dbstor_page_fs",
+ "storage_share_fs",
+ "storage_archive_fs",
+ "storage_metadata_fs",
]
if DEPLOY_MODE == "dbstor":
FS_TYPE_LIST = [
- "storage_dbstor_fs", "storage_dbstor_page_fs",
- "storage_share_fs", "storage_archive_fs"
+ "storage_dbstor_fs",
+ "storage_dbstor_page_fs",
+ "storage_share_fs",
+ "storage_archive_fs",
]
-SHARE_FS_TYPE_LIST = [
- "storage_share_fs", "storage_archive_fs",
- "storage_metadata_fs"
-]
+SHARE_FS_TYPE_LIST = ["storage_share_fs", "storage_archive_fs", "storage_metadata_fs"]
if DEPLOY_MODE == "file":
FS_TYPE_LIST = [
- "storage_dbstor_fs","storage_share_fs",
- "storage_archive_fs", "storage_metadata_fs"
+ "storage_dbstor_fs",
+ "storage_share_fs",
+ "storage_archive_fs",
+ "storage_metadata_fs",
]
SHARE_FS_TYPE_LIST = [
- "storage_dbstor_fs", "storage_share_fs",
- "storage_archive_fs", "storage_metadata_fs"
+ "storage_dbstor_fs",
+ "storage_share_fs",
+ "storage_archive_fs",
+ "storage_metadata_fs",
]
ID_NAS_DBSTOR = 1038
ID_NAS_DEFAULT = 11
@@ -56,7 +61,7 @@ def is_valid_string(string):
:param string:
:return:
"""
- pattern = r'^[\w\u4e00-\u9fa5.-]{1,255}$'
+ pattern = r"^[\w\u4e00-\u9fa5.-]{1,255}$"
match = re.match(pattern, string)
return match is not None
@@ -106,11 +111,7 @@ class CreateFS(object):
"""
capacity_digit = re.findall(r"\d+", capacity)[0]
capacity_unit = re.findall(r"[A-Z]+", capacity)[0]
- convert_dict = {
- "GB": 1,
- "TB": 1000,
- "PB": 1000 ** 2
- }
+ convert_dict = {"GB": 1, "TB": 1000, "PB": 1000**2}
return convert_dict.get(capacity_unit) * int(capacity_digit) * 1024 * 1024 * 2
@staticmethod
@@ -125,10 +126,10 @@ class CreateFS(object):
def _result_parse(cls, err_msg, res):
result = ResponseParse(res)
rsp_code, rsp_result, rsp_data = result.get_rsp_data()
- error_code = rsp_result.get('code')
+ error_code = rsp_result.get("code")
if rsp_code != 0 or error_code != 0:
- error_des = rsp_result.get('description')
- error_sgt = rsp_result.get('suggestion')
+ error_des = rsp_result.get("description")
+ error_sgt = rsp_result.get("suggestion")
err_msg = err_msg % (error_code, error_des, error_sgt)
cls.handle_error_msg(err_msg)
return rsp_data
@@ -143,11 +144,13 @@ class CreateFS(object):
4. check vstore exists
:return:
"""
+
def _check_func():
pool_id = self.fs_info.get("PARENTID")
self._check_vstore_exists()
self._check_storage_pool_exists(pool_id)
self._check_fs_exists()
+
LOG.info("Check file system params start.")
self._check_param_type()
if self.deploy_info.get("deploy_mode") == "dbstor":
@@ -170,6 +173,7 @@ class CreateFS(object):
5. add nfs client
:return:
"""
+
def _create_func():
fs_info = {}
deploy_mode = self.deploy_info.get("deploy_mode")
@@ -180,8 +184,13 @@ class CreateFS(object):
_fs_name = _fs_info.get("NAME")
LOG.info("Begin to create fs [%s] name: %s", fs_type, _fs_name)
vstore_id = self.fs_info.get(fs_type).get("vstoreId")
- LOG.info("Begin to create fs [%s] name: %s, vstore id:[%s] in [%s] deploy mode",
- fs_type, _fs_name, vstore_id, deploy_mode)
+ LOG.info(
+ "Begin to create fs [%s] name: %s, vstore id:[%s] in [%s] deploy mode",
+ fs_type,
+ _fs_name,
+ vstore_id,
+ deploy_mode,
+ )
_fs_info = self.storage_opt.query_filesystem_info(_fs_name, vstore_id)
if _fs_info:
err_msg = "The file system[%s] already exists." % _fs_name
@@ -195,9 +204,14 @@ class CreateFS(object):
fs_info[fs_type] = {
"fs_id": fs_id,
"nfs_share_id": nfs_share_id,
- "nfs_share_client_id": nfs_share_client_id
+ "nfs_share_client_id": nfs_share_client_id,
}
- LOG.info("Create fs [%s] success, detail:name[%s], info:%s", fs_type, _fs_name, fs_info)
+ LOG.info(
+ "Create fs [%s] success, detail:name[%s], info:%s",
+ fs_type,
+ _fs_name,
+ fs_info,
+ )
LOG.info("Create fs start.")
self.storage_opt.login()
@@ -217,6 +231,7 @@ class CreateFS(object):
4. delete fs info
:return:
"""
+
def _delete_func():
for fs_type in FS_TYPE_LIST:
_fs_info = self.fs_info.get(fs_type)
@@ -233,8 +248,13 @@ class CreateFS(object):
nfs_share_id = nfs_share_info[0].get("ID")
self.storage_opt.delete_nfs_share(nfs_share_id, vstore_id)
else:
- LOG.info("The nfs share of fs [%s] name %s is not exist", fs_type, _fs_name)
+ LOG.info(
+ "The nfs share of fs [%s] name %s is not exist",
+ fs_type,
+ _fs_name,
+ )
self.storage_opt.delete_file_system(fs_id)
+
LOG.info("Delete fs info start.")
self.storage_opt.login()
try:
@@ -246,12 +266,16 @@ class CreateFS(object):
def _check_vstore_id(self):
deploy_info = json.loads(read_helper(DEPLOY_PARAM_PATH))
deploy_info_dbstor_fs_vstore_id = deploy_info.get("dbstor_fs_vstore_id")
- fs_info_dbstor_fs_vstore_id = self.fs_info.get("storage_dbstor_fs").get("vstoreId")
+ fs_info_dbstor_fs_vstore_id = self.fs_info.get("storage_dbstor_fs").get(
+ "vstoreId"
+ )
if int(deploy_info_dbstor_fs_vstore_id) != int(fs_info_dbstor_fs_vstore_id):
- err_msg = "dbstor_fs_vstore_id of config_params.json is " \
- "different from file_system_info.json,details:" \
- " dbstor_fs_vstore_id:(%s, %s)" % (fs_info_dbstor_fs_vstore_id,
- deploy_info_dbstor_fs_vstore_id)
+ err_msg = (
+ "dbstor_fs_vstore_id of config_params.json is "
+ "different from file_system_info.json,details:"
+ " dbstor_fs_vstore_id:(%s, %s)"
+ % (fs_info_dbstor_fs_vstore_id, deploy_info_dbstor_fs_vstore_id)
+ )
LOG.error(err_msg)
raise Exception(err_msg)
@@ -269,10 +293,7 @@ class CreateFS(object):
name_err = []
digit_err = []
range_err = []
- range_check = {
- "SNAPSHOTRESERVEPER": (0, 51),
- "CAPACITYTHRESHOLD": (50, 99)
- }
+ range_check = {"SNAPSHOTRESERVEPER": (0, 51), "CAPACITYTHRESHOLD": (50, 99)}
digit_check = ["SNAPSHOTRESERVEPER", "CAPACITYTHRESHOLD"]
pool_id = self.fs_info.get("PARENTID")
if not isinstance(pool_id, int):
@@ -291,7 +312,9 @@ class CreateFS(object):
for key, value in fs_info.items():
if key in digit_check and not isinstance(value, int):
digit_err.append({fs_type: key})
- if key in list(range_check.keys()) and value not in range(*range_check.get(key)):
+ if key in list(range_check.keys()) and value not in range(
+ *range_check.get(key)
+ ):
range_err.append({fs_type: key})
if key == "CAPACITY" and not self.check_capacity(value):
digit_err.append({fs_type: key})
@@ -363,8 +386,8 @@ class CreateFS(object):
def _get_nfs_share_info(self, fs_type, fs_id):
data = {
"SHAREPATH": f"/{ self.fs_info.get(fs_type).get('NAME')}/",
- "vstoreId": self.fs_info.get(fs_type).get("vstoreId"),
- "FSID": fs_id
+ "vstoreId": self.fs_info.get(fs_type).get("vstoreId"),
+ "FSID": fs_id,
}
data.update(self.fs_info.get(fs_type))
return data
@@ -376,7 +399,7 @@ class CreateFS(object):
"ROOTSQUASH": 1,
"PARENTID": parent_id,
"vstoreId": self.fs_info.get(fs_type).get("vstoreId"),
- "NAME": self.fs_info.get("client_ip")
+ "NAME": self.fs_info.get("client_ip"),
}
return data
@@ -413,10 +436,16 @@ class CreateFS(object):
data = self._get_share_client_info(nfs_share_id, fs_type)
return self.storage_opt.add_nfs_client(data)
+
def main():
create_parser = argparse.ArgumentParser()
- create_parser.add_argument('--action', choices=["create", "delete", "pre_check"], dest="action", required=True)
- create_parser.add_argument('--ip', dest="ip_addr", required=True)
+ create_parser.add_argument(
+ "--action",
+ choices=["create", "delete", "pre_check"],
+ dest="action",
+ required=True,
+ )
+ create_parser.add_argument("--ip", dest="ip_addr", required=True)
args = create_parser.parse_args()
action = args.action
ip_addr = args.ip_addr
@@ -431,5 +460,9 @@ if __name__ == "__main__":
try:
main()
except Exception as err:
- LOG.error("Execute create fs failed, details:%s, traceback:%s", str(err), traceback.format_exc())
+ LOG.error(
+ "Execute create fs failed, details:%s, traceback:%s",
+ str(err),
+ traceback.format_exc(),
+ )
exit(1)
diff --git a/pkg/deploy/action/storage_operate/do_snapshot.py b/pkg/deploy/action/storage_operate/do_snapshot.py
index 5560d1329110fe64943d90a46c27b7e40f7c11a2..2abdf03ade0830281cea6b19ff0bd399184098cd 100644
--- a/pkg/deploy/action/storage_operate/do_snapshot.py
+++ b/pkg/deploy/action/storage_operate/do_snapshot.py
@@ -15,8 +15,8 @@ from storage_operate.dr_deploy_operate.dr_deploy_common import DRDeployCommon
from utils.client.rest_client import get_cur_timestamp, read_helper, write_helper
from om_log import REST_LOG as LOG
-DEPLOY_PARAM_PATH = '/opt/cantian/config/deploy_param.json'
-DR_DEPLOY_FLAG = os.path.join(CUR_PATH, '../../config/.dr_deploy_flag')
+DEPLOY_PARAM_PATH = "/opt/cantian/config/deploy_param.json"
+DR_DEPLOY_FLAG = os.path.join(CUR_PATH, "../../config/.dr_deploy_flag")
NORMAL_STATE, ABNORMAL_STATE = 0, 1
@@ -27,14 +27,14 @@ class SnapShotRestClient(object):
self.upgrade_version = get_cur_timestamp()
self.processed_fs = processed_fs
self.handler = {
- 'create': self.create_snapshots,
- 'rollback': self.rollback_snapshots,
- "delete": self.delete_snapshots
+ "create": self.create_snapshots,
+ "rollback": self.rollback_snapshots,
+ "delete": self.delete_snapshots,
}
@staticmethod
def exception_handler(err_msg=None, cur_mode=None):
- err_info = '[current_mode] {}, [err_info] {}'.format(cur_mode, err_msg)
+ err_info = "[current_mode] {}, [err_info] {}".format(cur_mode, err_msg)
LOG.error(err_info)
raise Exception(err_info)
@@ -47,7 +47,9 @@ class SnapShotRestClient(object):
self.storage_operate.login()
config_params = json.loads(read_helper(DEPLOY_PARAM_PATH))
storage_dbstor_page_fs = config_params.get("storage_dbstor_page_fs")
- page_fs_info = self.storage_operate.query_filesystem_info(storage_dbstor_page_fs)
+ page_fs_info = self.storage_operate.query_filesystem_info(
+ storage_dbstor_page_fs
+ )
page_fs_id = page_fs_info.get("ID")
dr_deploy_opt = DRDeployCommon(self.storage_operate)
page_pair_info = dr_deploy_opt.query_remote_replication_pair_info(page_fs_id)
@@ -57,7 +59,9 @@ class SnapShotRestClient(object):
if not os.path.exists(DR_DEPLOY_FLAG):
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
- with os.fdopen(os.open(DR_DEPLOY_FLAG, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(
+ os.open(DR_DEPLOY_FLAG, flags, modes), "w", encoding="utf-8"
+ ) as file:
file.write("")
return secondary == "false"
LOG.info("Current node is not dr or is primary")
@@ -66,15 +70,19 @@ class SnapShotRestClient(object):
def create_snapshots(self, fs_name, vstore_id=0):
if self.processed_fs.get(fs_name):
return NORMAL_STATE
- fs_info = self.storage_operate.query_filesystem_info(fs_name, vstore_id=vstore_id)
+ fs_info = self.storage_operate.query_filesystem_info(
+ fs_name, vstore_id=vstore_id
+ )
if not fs_info:
err_msg = "file system [%s] is not exist, please check." % fs_name
- self.exception_handler(err_msg=err_msg, cur_mode='create')
+ self.exception_handler(err_msg=err_msg, cur_mode="create")
fs_id = fs_info.get("ID")
- reg_version = '{}_{}'.format(fs_name, self.upgrade_version).replace('.', '_')
- snapshot_name = '{}_{}'.format(reg_version, str(get_cur_timestamp()))
- snapshot_info = self.storage_operate.create_file_system_snapshot(snapshot_name, fs_id, vstore_id=vstore_id)
- snapshot_id = snapshot_info.get('ID')
+ reg_version = "{}_{}".format(fs_name, self.upgrade_version).replace(".", "_")
+ snapshot_name = "{}_{}".format(reg_version, str(get_cur_timestamp()))
+ snapshot_info = self.storage_operate.create_file_system_snapshot(
+ snapshot_name, fs_id, vstore_id=vstore_id
+ )
+ snapshot_id = snapshot_info.get("ID")
self.processed_fs[fs_name] = snapshot_id
return NORMAL_STATE
@@ -84,9 +92,14 @@ class SnapShotRestClient(object):
return NORMAL_STATE
snapshot_id = self.processed_fs.get(fs_name)
try:
- snapshot_info = self.storage_operate.query_file_system_snapshot_info(snapshot_id)
+ snapshot_info = self.storage_operate.query_file_system_snapshot_info(
+ snapshot_id
+ )
except Exception as e:
- if str(e).find("1077937875") != -1 or str(e).find("snapshot does not exist") != -1:
+ if (
+ str(e).find("1077937875") != -1
+ or str(e).find("snapshot does not exist") != -1
+ ):
err_msg = "The snapshot is already not exist, details:%s" % str(e)
LOG.info(err_msg)
return NORMAL_STATE
@@ -105,9 +118,14 @@ class SnapShotRestClient(object):
return NORMAL_STATE
snapshot_id = self.processed_fs.get(fs_name)
try:
- snapshot_info = self.storage_operate.query_file_system_snapshot_info(snapshot_id)
+ snapshot_info = self.storage_operate.query_file_system_snapshot_info(
+ snapshot_id
+ )
except Exception as e:
- if str(e).find("1077937875") != -1 or str(e).find("snapshot does not exist") != -1:
+ if (
+ str(e).find("1077937875") != -1
+ or str(e).find("snapshot does not exist") != -1
+ ):
err_msg = "The snapshot is already not exist, details:%s" % str(e)
LOG.info(err_msg)
return NORMAL_STATE
@@ -130,8 +148,8 @@ class SnapShotRestClient(object):
def get_fs_processed_info(info_path, fs_names):
- json_file_path = os.path.join(info_path, 'processed_snapshots.json')
- init_fs_info = {name: '' for name in fs_names}
+ json_file_path = os.path.join(info_path, "processed_snapshots.json")
+ init_fs_info = {name: "" for name in fs_names}
if not os.path.exists(info_path):
os.makedirs(info_path)
return init_fs_info
@@ -160,18 +178,28 @@ def main(mode, ip_address, main_path):
dbstor_fs_vstore_id = config_params.get("dbstor_fs_vstore_id", 0)
fs_names_type = []
for fs_type, fs_name in config_params.items():
- if fs_type.endswith('_fs') and fs_type.startswith("storage") and fs_type == "storage_share_fs" and fs_name:
+ if (
+ fs_type.endswith("_fs")
+ and fs_type.startswith("storage")
+ and fs_type == "storage_share_fs"
+ and fs_name
+ ):
fs_names_type.append((fs_name, fs_type, vstore_id))
- elif fs_type.endswith('_fs') and fs_type.startswith("storage") and fs_type == "storage_dbstor_fs" and fs_name:
+ elif (
+ fs_type.endswith("_fs")
+ and fs_type.startswith("storage")
+ and fs_type == "storage_dbstor_fs"
+ and fs_name
+ ):
fs_names_type.append((fs_name, fs_type, dbstor_fs_vstore_id))
- elif fs_type.endswith('_fs') and fs_type.startswith("storage") and fs_name:
+ elif fs_type.endswith("_fs") and fs_type.startswith("storage") and fs_name:
fs_names_type.append((fs_name, fs_type, 0))
fs_names = [
fs_val
for fs_name, fs_val in config_params.items()
- if fs_name.endswith('_fs') and fs_name.startswith("storage") and fs_val
+ if fs_name.endswith("_fs") and fs_name.startswith("storage") and fs_val
]
- process_fs_path = '{}/cantian_upgrade_snapshots'.format(main_path)
+ process_fs_path = "{}/cantian_upgrade_snapshots".format(main_path)
fs_processed_data = get_fs_processed_info(process_fs_path, fs_names)
login_data = (ip_address, user_name, passwd)
@@ -184,19 +212,23 @@ def main(mode, ip_address, main_path):
try:
_ = rest_client_obj.execute(fs_name, mode, _vstore_id)
except Exception as error:
- LOG.error('error happened when try to {} snapshot of {}, err_info: {}, '
- 'err_traceback: {}'.format(mode, fs_name, str(error), traceback.format_exc(limit=-1)))
+ LOG.error(
+ "error happened when try to {} snapshot of {}, err_info: {}, "
+ "err_traceback: {}".format(
+ mode, fs_name, str(error), traceback.format_exc(limit=-1)
+ )
+ )
return ABNORMAL_STATE
- if mode == 'rollback':
+ if mode == "rollback":
query_rollback_process(fs_names_type, rest_client_obj.storage_operate)
- recoder_path = os.path.join(process_fs_path, 'processed_snapshots.json')
- if mode == 'create':
+ recoder_path = os.path.join(process_fs_path, "processed_snapshots.json")
+ if mode == "create":
new_processed_info = rest_client_obj.processed_fs
write_helper(recoder_path, new_processed_info)
- elif mode == 'delete':
- write_helper(recoder_path, '')
+ elif mode == "delete":
+ write_helper(recoder_path, "")
return NORMAL_STATE
@@ -213,11 +245,20 @@ def query_rollback_process(fs_names_type, rest_client_obj):
while query_list:
for item in query_list:
fs_name, fs_type, _vstore_id = item
- data = rest_client_obj.query_rollback_snapshots_process(fs_name, vstore_id=_vstore_id)
- rollback_rate, rollback_status = data.get("rollbackRate"), data.get("rollbackStatus")
+ data = rest_client_obj.query_rollback_snapshots_process(
+ fs_name, vstore_id=_vstore_id
+ )
+ rollback_rate, rollback_status = data.get("rollbackRate"), data.get(
+ "rollbackStatus"
+ )
if int(rollback_status) == 0:
success_list.append(item)
- LOG.info("Filesystem[%s] rollback status[%s], process[%s]", fs_name, rollback_status, rollback_rate)
+ LOG.info(
+ "Filesystem[%s] rollback status[%s], process[%s]",
+ fs_name,
+ rollback_status,
+ rollback_rate,
+ )
query_list = list(set(query_list) - set(success_list))
time.sleep(30)
@@ -228,8 +269,12 @@ if __name__ == "__main__":
try:
RET_VAL = main(snapshot_mode, ip, main_backup_file_path)
except Exception as err:
- LOG.error('{} snapshots failed, err_details: {}, '
- 'err_traceback: {}'.format(snapshot_mode, str(err), traceback.format_exc(limit=-1)))
+ LOG.error(
+ "{} snapshots failed, err_details: {}, "
+ "err_traceback: {}".format(
+ snapshot_mode, str(err), traceback.format_exc(limit=-1)
+ )
+ )
exit(ABNORMAL_STATE)
exit(RET_VAL)
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py
index 8f98ee285100a177a75f5e0db91829f4478aa3f4..6c2a5559c77530d2bb5540bf2101da61bce914db 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py
@@ -12,8 +12,18 @@ import traceback
from storage_operate.dr_deploy_operate.dr_deploy_common import DRDeployCommon
from storage_operate.dr_deploy_operate.dr_deploy_common import KmcResolve
-from utils.config.rest_constant import HealthStatus, MetroDomainRunningStatus, SecresAccess, VstorePairRunningStatus, \
- FilesystemPairRunningStatus, ReplicationRunningStatus, CANTIAN_DOMAIN_PREFIX, Constant, SPEED, VstorePairConfigStatus
+from utils.config.rest_constant import (
+ HealthStatus,
+ MetroDomainRunningStatus,
+ SecresAccess,
+ VstorePairRunningStatus,
+ FilesystemPairRunningStatus,
+ ReplicationRunningStatus,
+ CANTIAN_DOMAIN_PREFIX,
+ Constant,
+ SPEED,
+ VstorePairConfigStatus,
+)
from logic.storage_operate import StorageInf
from logic.common_func import read_json_config
from logic.common_func import write_json_config
@@ -33,16 +43,22 @@ DR_DEPLOY_CONFIG = os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.j
DEPLOY_PARAM_FILE = "/opt/cantian/config/deploy_param.json"
DEFAULT_PARAM_FILE = os.path.join(CURRENT_PATH, "../../config_params.json")
EXEC_SQL = os.path.join(CURRENT_PATH, "../../cantian_common/exec_sql.py")
-LOCAL_PROCESS_RECORD_FILE = os.path.join(CURRENT_PATH, "../../../config/dr_process_record.json")
-FULL_CHECK_POINT_CMD = 'echo -e "alter system checkpoint global;" | '\
- 'su -s /bin/bash - %s -c \'source ~/.bashrc && '\
- 'export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && '\
- 'python3 -B %s\'' % (RUN_USER, EXEC_SQL)
-CANTIAN_DISASTER_RECOVERY_STATUS_CHECK = 'echo -e "select * from DV_LRPL_DETAIL;" | '\
- 'su -s /bin/bash - %s -c \'source ~/.bashrc && '\
- 'export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && '\
- 'python3 -B %s\'' % (RUN_USER, EXEC_SQL)
-ZSQL_INI_PATH = '/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini'
+LOCAL_PROCESS_RECORD_FILE = os.path.join(
+ CURRENT_PATH, "../../../config/dr_process_record.json"
+)
+FULL_CHECK_POINT_CMD = (
+ 'echo -e "alter system checkpoint global;" | '
+ "su -s /bin/bash - %s -c 'source ~/.bashrc && "
+ "export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && "
+ "python3 -B %s'" % (RUN_USER, EXEC_SQL)
+)
+CANTIAN_DISASTER_RECOVERY_STATUS_CHECK = (
+ 'echo -e "select * from DV_LRPL_DETAIL;" | '
+ "su -s /bin/bash - %s -c 'source ~/.bashrc && "
+ "export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && "
+ "python3 -B %s'" % (RUN_USER, EXEC_SQL)
+)
+ZSQL_INI_PATH = "/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini"
LOCK_INSTANCE_STEP1 = "set @ctc_ddl_enabled=true"
LOCK_INSTANCE_STEP2 = "lock instance for backup"
UNLOCK_INSTANCE = "unlock instance"
@@ -51,7 +67,7 @@ FLUSH_TABLE = "flush table with read lock;unlock tables;"
INSTALL_TIMEOUT = 900
START_TIMEOUT = 3600
FS_CREAT_TIMEOUT = 300
-TOTAL_CHECK_DURATION = 180 # 创建双活pair检查时间
+TOTAL_CHECK_DURATION = 180 # 创建双活pair检查时间
ACTIVE_RECORD_DICT = {
@@ -63,7 +79,7 @@ ACTIVE_RECORD_DICT = {
"create_metro_fs_pair": "default",
"create_rep_page_fs_pair": "default",
"sync_metro_fs_pair": "default",
- "sync_rep_page_fs_pair": "default"
+ "sync_rep_page_fs_pair": "default",
}
STANDBY_RECORD_DICT = {
"create_metro_domain": "default",
@@ -73,7 +89,7 @@ STANDBY_RECORD_DICT = {
"standby_install": "default",
"sync_metro_fs_pair": "default",
"sync_rep_page_fs_pair": "default",
- "standby_start": "default"
+ "standby_start": "default",
}
@@ -112,11 +128,15 @@ class DRDeploy(object):
容灾告警需要重启cantian_exporter
:return:
"""
- cmd = "ps -ef | grep \"python3 /opt/cantian/ct_om/service/cantian_exporter/exporter/execute.py\"" \
- " | grep -v grep | awk '{print $2}' | xargs kill -9"
+ cmd = (
+ 'ps -ef | grep "python3 /opt/cantian/ct_om/service/cantian_exporter/exporter/execute.py"'
+ " | grep -v grep | awk '{print $2}' | xargs kill -9"
+ )
exec_popen(cmd)
- def record_deploy_process(self, exec_step: str, exec_status: str, code=0, description="") -> None:
+ def record_deploy_process(
+ self, exec_step: str, exec_status: str, code=0, description=""
+ ) -> None:
"""
:param exec_step: 执行步骤
:param exec_status: 执行状态
@@ -133,7 +153,7 @@ class DRDeploy(object):
data.update({exec_step: exec_status})
error["code"] = code
error["description"] = description
- with os.fdopen(os.open(self.record_progress_file, flags, modes), 'w') as fp:
+ with os.fdopen(os.open(self.record_progress_file, flags, modes), "w") as fp:
json.dump(result, fp, indent=4)
def record_deploy_process_init(self):
@@ -165,38 +185,35 @@ class DRDeploy(object):
:return:
"""
self.metadata_in_cantian = self.dr_deploy_info.get("mysql_metadata_in_cantian")
- dr_record_dict = ACTIVE_RECORD_DICT if self.site == "active" else STANDBY_RECORD_DICT
+ dr_record_dict = (
+ ACTIVE_RECORD_DICT if self.site == "active" else STANDBY_RECORD_DICT
+ )
if not self.metadata_in_cantian:
- dr_record_dict.update({
- "create_rep_meta_fs_pair": "default",
- "sync_rep_meta_fs_pair": "default"
- })
+ dr_record_dict.update(
+ {
+ "create_rep_meta_fs_pair": "default",
+ "sync_rep_meta_fs_pair": "default",
+ }
+ )
if self.site == "standby":
- dr_record_dict.update({
- "cantian_disaster_recovery_status": "default"
- })
+ dr_record_dict.update({"cantian_disaster_recovery_status": "default"})
else:
if not self.metadata_in_cantian:
- dr_record_dict.update({
- "cancel_rep_meta_fs_secondary_write_lock": "default"
- })
- dr_record_dict.update({
- "cancel_rep_page_fs_secondary_write_lock": "default",
- "do_unlock_instance_for_backup": "default"
- })
- dr_record_dict.update({"dr_deploy": "default"})
- result = {
- "data": dr_record_dict,
- "error":
+ dr_record_dict.update(
+ {"cancel_rep_meta_fs_secondary_write_lock": "default"}
+ )
+ dr_record_dict.update(
{
- "code": 0,
- "description": ""
+ "cancel_rep_page_fs_secondary_write_lock": "default",
+ "do_unlock_instance_for_backup": "default",
}
- }
+ )
+ dr_record_dict.update({"dr_deploy": "default"})
+ result = {"data": dr_record_dict, "error": {"code": 0, "description": ""}}
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(self.record_progress_file, flags, modes), 'w') as fp:
+ with os.fdopen(os.open(self.record_progress_file, flags, modes), "w") as fp:
json.dump(result, fp, indent=4)
def record_disaster_recovery_info(self, key, value):
@@ -219,29 +236,39 @@ class DRDeploy(object):
lock_instance_flag = False
wait_lock_instance_time = 0
mysql_shell = None
- while not lock_instance_flag and wait_lock_instance_time < LOCK_INSTANCE_TIMEOUT:
+ while (
+ not lock_instance_flag and wait_lock_instance_time < LOCK_INSTANCE_TIMEOUT
+ ):
try:
# 创建mysql shell会话,并启动会话
- mysql_shell = MysqlShell(self.mysql_cmd, user=self.mysql_user, password=self.mysql_pwd)
+ mysql_shell = MysqlShell(
+ self.mysql_cmd, user=self.mysql_user, password=self.mysql_pwd
+ )
mysql_shell.start_session()
self.record_deploy_process("do_lock_instance_for_backup", "start")
mysql_shell.execute_command(LOCK_INSTANCE_STEP1, timeout=3)
- lock_instance_output = mysql_shell.execute_command(LOCK_INSTANCE_STEP2, timeout=3)
+ lock_instance_output = mysql_shell.execute_command(
+ LOCK_INSTANCE_STEP2, timeout=3
+ )
if "Query OK" in lock_instance_output:
lock_instance_flag = True
LOG.info("Success to do lock instance for backup.")
self.backup_lock_shell = mysql_shell
except Exception as err:
mysql_shell.close_session()
- LOG.info("Failed to do lock instance for backup, error msg:%s" % (str(err)))
+ LOG.info(
+ "Failed to do lock instance for backup, error msg:%s" % (str(err))
+ )
wait_lock_instance_time += 10
time.sleep(10)
if not lock_instance_flag:
err_msg = "Failed to do lock instance for backup, timeout."
LOG.error(err_msg)
- self.record_deploy_process("do_lock_instance_for_backup", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "do_lock_instance_for_backup", "failed", code=-1, description=err_msg
+ )
raise Exception("err_msg")
self.record_deploy_process("do_lock_instance_for_backup", "success")
@@ -279,7 +306,9 @@ class DRDeploy(object):
else:
err_msg = "Failed to do unlock instance for backup."
LOG.error(err_msg)
- self.record_deploy_process("do_unlock_instance_for_backup", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "do_unlock_instance_for_backup", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
self.backup_lock_shell = None
@@ -290,10 +319,12 @@ class DRDeploy(object):
:return:
"""
LOG.info("Start to do flush table with read lock.")
- cmd = "%s -u'%s' -p'%s' -e \"%s;\"" % (self.mysql_cmd,
- self.mysql_user,
- self.mysql_pwd,
- FLUSH_TABLE)
+ cmd = "%s -u'%s' -p'%s' -e \"%s;\"" % (
+ self.mysql_cmd,
+ self.mysql_user,
+ self.mysql_pwd,
+ FLUSH_TABLE,
+ )
cmd += ";echo last_cmd=$?"
self.record_deploy_process("do_flush_table_with_read_lock", "start")
attempts = 15
@@ -311,15 +342,19 @@ class DRDeploy(object):
time.sleep(20)
attempts -= 1
continue
-
+
else:
break
- err_msg = "Failed to do unlock table with read lock, " \
- "output:%s, stderr:%s" % (output, stderr)
+ err_msg = (
+ "Failed to do unlock table with read lock, "
+ "output:%s, stderr:%s" % (output, stderr)
+ )
err_msg.replace(self.mysql_pwd, "***")
LOG.error(err_msg)
- self.record_deploy_process("do_flush_table_with_read_lock", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "do_flush_table_with_read_lock", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
def do_full_check_point(self):
@@ -331,9 +366,14 @@ class DRDeploy(object):
self.record_deploy_process("do_full_check_point", "start")
return_code, output, stderr = exec_popen(FULL_CHECK_POINT_CMD, timeout=100)
if return_code:
- err_msg = "Do full checkpoint failed, output: %s, stderr:%s" % (output, stderr)
+ err_msg = "Do full checkpoint failed, output: %s, stderr:%s" % (
+ output,
+ stderr,
+ )
LOG.error(err_msg)
- self.record_deploy_process("do_full_check_point", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "do_full_check_point", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
self.record_deploy_process("do_full_check_point", "success")
LOG.info("Success to do full checkpoint.")
@@ -371,17 +411,24 @@ class DRDeploy(object):
domain_name = self.dr_deploy_info.get("domain_name")
if hyper_domain_id is None:
domain_info = self.dr_deploy_opt.create_filesystem_hyper_metro_domain(
- remote_dev_name, remote_dev_esn, remote_device_id, domain_name)
+ remote_dev_name, remote_dev_esn, remote_device_id, domain_name
+ )
else:
- domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(hyper_domain_id)
+ domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(
+ hyper_domain_id
+ )
if not domain_info:
domain_info = self.dr_deploy_opt.create_filesystem_hyper_metro_domain(
- remote_dev_name, remote_dev_esn, remote_device_id, domain_name)
+ remote_dev_name, remote_dev_esn, remote_device_id, domain_name
+ )
else:
exist_domain_name = domain_info.get("NAME")
if exist_domain_name != domain_name:
- err_msg = "Hyper metro domain [name:%s] is unmatched with config parmas [name:%s], " \
- "please check, details: %s" % (exist_domain_name, domain_name, domain_info)
+ err_msg = (
+ "Hyper metro domain [name:%s] is unmatched with config parmas [name:%s], "
+ "please check, details: %s"
+ % (exist_domain_name, domain_name, domain_info)
+ )
LOG.error(err_msg)
raise Exception(err_msg)
@@ -415,20 +462,29 @@ class DRDeploy(object):
if vstore_pair_id is None:
vstore_pair_info = self.dr_deploy_opt.create_hyper_metro_vstore_pair(
- domain_id, local_vstore_id, remote_vstore_id)
+ domain_id, local_vstore_id, remote_vstore_id
+ )
else:
- vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(vstore_pair_id)
+ vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(
+ vstore_pair_id
+ )
if not vstore_pair_info:
vstore_pair_info = self.dr_deploy_opt.create_hyper_metro_vstore_pair(
- domain_id, local_vstore_id, remote_vstore_id)
+ domain_id, local_vstore_id, remote_vstore_id
+ )
else:
exist_remote_vstoreid = vstore_pair_info.get("REMOTEVSTOREID")
exist_local_vstoreid = vstore_pair_info.get("LOCALVSTOREID")
exist_domain_id = vstore_pair_info.get("DOMAINID")
- if exist_local_vstoreid != local_vstore_id or remote_vstore_id != exist_remote_vstoreid or \
- exist_domain_id != domain_id:
- err_msg = "The vstore pair [id:%s] is unmatched with config params, please check, details: %s" % \
- (vstore_pair_id, vstore_pair_info)
+ if (
+ exist_local_vstoreid != local_vstore_id
+ or remote_vstore_id != exist_remote_vstoreid
+ or exist_domain_id != domain_id
+ ):
+ err_msg = (
+ "The vstore pair [id:%s] is unmatched with config params, please check, details: %s"
+ % (vstore_pair_id, vstore_pair_info)
+ )
LOG.error(err_msg)
raise Exception(err_msg)
if not vstore_pair_info:
@@ -439,24 +495,33 @@ class DRDeploy(object):
tmp_time = 0
while tmp_time < TOTAL_CHECK_DURATION:
- vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(vstore_pair_id)
+ vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(
+ vstore_pair_id
+ )
health_status = vstore_pair_info.get("HEALTHSTATUS")
running_status = vstore_pair_info.get("RUNNINGSTATUS")
config_status = vstore_pair_info.get("CONFIGSTATUS")
- if (running_status == VstorePairRunningStatus.Normal and health_status == HealthStatus.Normal
- and config_status == VstorePairConfigStatus.Normal):
+ if (
+ running_status == VstorePairRunningStatus.Normal
+ and health_status == HealthStatus.Normal
+ and config_status == VstorePairConfigStatus.Normal
+ ):
self.record_deploy_process("create_metro_vstore_pair", "success")
return vstore_pair_info
time.sleep(10)
tmp_time += 10
- err_msg = "Hyper metro vstore pair status is not normal, " \
- "health_status[%s], running_status[%s], details: %s" % \
- (get_status(health_status, HealthStatus),
- get_status(running_status, VstorePairRunningStatus),
- vstore_pair_info)
+ err_msg = (
+ "Hyper metro vstore pair status is not normal, "
+ "health_status[%s], running_status[%s], details: %s"
+ % (
+ get_status(health_status, HealthStatus),
+ get_status(running_status, VstorePairRunningStatus),
+ vstore_pair_info,
+ )
+ )
LOG.error(err_msg)
raise Exception(err_msg)
@@ -473,38 +538,61 @@ class DRDeploy(object):
dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id")
filesystem_pair_id = self.dr_deploy_info.get("ulog_fs_pair_id")
storage_dbstor_fs = self.dr_deploy_info.get("storage_dbstor_fs")
- dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(storage_dbstor_fs,
- dbstor_fs_vstore_id)
+ dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(
+ storage_dbstor_fs, dbstor_fs_vstore_id
+ )
dbstor_fs_id = dbstor_fs_info.get("ID")
if filesystem_pair_id is None:
- filesystem_pair_infos = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ filesystem_pair_infos = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ )
if filesystem_pair_infos is None:
- filesystem_pair_info = self.dr_deploy_opt.create_hyper_metro_filesystem_pair(
- filesystem_id=dbstor_fs_id, pool_id=remote_pool_id, vstore_pair_id=vstore_pair_id)
+ filesystem_pair_info = (
+ self.dr_deploy_opt.create_hyper_metro_filesystem_pair(
+ filesystem_id=dbstor_fs_id,
+ pool_id=remote_pool_id,
+ vstore_pair_id=vstore_pair_id,
+ )
+ )
task_id = filesystem_pair_info.get("taskId")
self.record_deploy_process("create_metro_fs_pair", "running")
self.dr_deploy_opt.query_omtask_process(task_id, timeout=120)
else:
- filesystem_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
- pair_id=filesystem_pair_id)
+ filesystem_pair_info = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
+ pair_id=filesystem_pair_id
+ )
+ )
if filesystem_pair_info is None:
- filesystem_pair_info = self.dr_deploy_opt.create_hyper_metro_filesystem_pair(
- filesystem_id=dbstor_fs_id, pool_id=remote_pool_id, vstore_pair_id=vstore_pair_id)
+ filesystem_pair_info = (
+ self.dr_deploy_opt.create_hyper_metro_filesystem_pair(
+ filesystem_id=dbstor_fs_id,
+ pool_id=remote_pool_id,
+ vstore_pair_id=vstore_pair_id,
+ )
+ )
task_id = filesystem_pair_info.get("taskId")
self.record_deploy_process("create_metro_fs_pair", "running")
self.dr_deploy_opt.query_omtask_process(task_id, timeout=120)
else:
exist_domain_id = filesystem_pair_info.get("DOMAINID")
if exist_domain_id != hyper_domain_id:
- err_msg = "The HyperMetro domain [id:%s] of filesystem pair is unmatched with config " \
- "params [id:%s], please check, details: %s" % \
- (exist_domain_id, hyper_domain_id, filesystem_pair_info)
+ err_msg = (
+ "The HyperMetro domain [id:%s] of filesystem pair is unmatched with config "
+ "params [id:%s], please check, details: %s"
+ % (exist_domain_id, hyper_domain_id, filesystem_pair_info)
+ )
LOG.error(err_msg)
raise Exception(err_msg)
- filesystem_pair_infos = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ filesystem_pair_infos = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ )
if len(filesystem_pair_infos) != 1:
- err_msg = "The metro filesystem pair create failed, Details: %s" % filesystem_pair_infos
+ err_msg = (
+ "The metro filesystem pair create failed, Details: %s"
+ % filesystem_pair_infos
+ )
raise Exception(err_msg)
filesystem_pair_id = filesystem_pair_infos[0].get("ID")
@@ -513,25 +601,35 @@ class DRDeploy(object):
running_status = None
config_status = None
while tmp_time < TOTAL_CHECK_DURATION:
- filesystem_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
- pair_id=filesystem_pair_id)
+ filesystem_pair_info = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
+ pair_id=filesystem_pair_id
+ )
+ )
health_status = filesystem_pair_info.get("HEALTHSTATUS")
running_status = filesystem_pair_info.get("RUNNINGSTATUS")
config_status = filesystem_pair_info.get("CONFIGSTATUS")
- if health_status == HealthStatus.Normal and config_status == VstorePairConfigStatus.Normal:
+ if (
+ health_status == HealthStatus.Normal
+ and config_status == VstorePairConfigStatus.Normal
+ ):
self.record_deploy_process("create_metro_fs_pair", "success")
return filesystem_pair_info
time.sleep(10)
tmp_time += 10
- err_msg = "Hyper metro vstore pair status is not normal, " \
- "health_status[%s], running_status[%s], config_status[%s], details: %s" % \
- (get_status(health_status, HealthStatus),
- get_status(running_status, FilesystemPairRunningStatus),
- get_status(config_status, VstorePairConfigStatus),
- filesystem_pair_info)
+ err_msg = (
+ "Hyper metro vstore pair status is not normal, "
+ "health_status[%s], running_status[%s], config_status[%s], details: %s"
+ % (
+ get_status(health_status, HealthStatus),
+ get_status(running_status, FilesystemPairRunningStatus),
+ get_status(config_status, VstorePairConfigStatus),
+ filesystem_pair_info,
+ )
+ )
LOG.error(err_msg)
raise Exception(err_msg)
@@ -541,32 +639,52 @@ class DRDeploy(object):
:param pair_id: ulog文件系统pair id
:return:
"""
- filesystem_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(pair_id)
+ filesystem_pair_info = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
+ pair_id
+ )
+ )
running_status = filesystem_pair_info.get("RUNNINGSTATUS")
sync_progress = filesystem_pair_info.get("SYNCPROGRESS")
health_status = filesystem_pair_info.get("HEALTHSTATUS")
- if running_status == FilesystemPairRunningStatus.Normal \
- and health_status == HealthStatus.Normal \
- and sync_progress == "100":
+ if (
+ running_status == FilesystemPairRunningStatus.Normal
+ and health_status == HealthStatus.Normal
+ and sync_progress == "100"
+ ):
LOG.info("Sync hyper metro filesystem pair success")
self.record_deploy_process("sync_metro_fs_pair", "success")
return True
- if running_status == FilesystemPairRunningStatus.Invalid or health_status != HealthStatus.Normal:
- err_msg = "Failed to create hyper metro filesystem pair, " \
- "health status[%s], running status[%s] details:%s" % \
- (get_status(health_status, HealthStatus),
- get_status(running_status, FilesystemPairRunningStatus),
- filesystem_pair_info)
- self.record_deploy_process("sync_metro_fs_pair", "failed", code=-1, description=err_msg)
+ if (
+ running_status == FilesystemPairRunningStatus.Invalid
+ or health_status != HealthStatus.Normal
+ ):
+ err_msg = (
+ "Failed to create hyper metro filesystem pair, "
+ "health status[%s], running status[%s] details:%s"
+ % (
+ get_status(health_status, HealthStatus),
+ get_status(running_status, FilesystemPairRunningStatus),
+ filesystem_pair_info,
+ )
+ )
+ self.record_deploy_process(
+ "sync_metro_fs_pair", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
time.sleep(2)
- LOG.info("Create hyper metro filesystem pair process[%s%%], "
- "running_status[%s], health_status[%s]",
- sync_progress,
- get_status(running_status, FilesystemPairRunningStatus),
- get_status(health_status, HealthStatus))
+ LOG.info(
+ "Create hyper metro filesystem pair process[%s%%], "
+ "running_status[%s], health_status[%s]",
+ sync_progress,
+ get_status(running_status, FilesystemPairRunningStatus),
+ get_status(health_status, HealthStatus),
+ )
if running_status == FilesystemPairRunningStatus.Paused:
- self.record_deploy_process("sync_metro_fs_pair", get_status(running_status, FilesystemPairRunningStatus))
+ self.record_deploy_process(
+ "sync_metro_fs_pair",
+ get_status(running_status, FilesystemPairRunningStatus),
+ )
else:
self.record_deploy_process("sync_metro_fs_pair", sync_progress + "%")
return False
@@ -581,25 +699,42 @@ class DRDeploy(object):
remote_pool_id = self.dr_deploy_info.get("remote_pool_id")
name_suffix = self.dr_deploy_info.get("name_suffix", "")
remote_name_rule = 2 if name_suffix else 1
- remote_replication_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(
- filesystem_id=page_fs_id)
+ remote_replication_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info(
+ filesystem_id=page_fs_id
+ )
+ )
if remote_replication_pair_info is None:
- rep_filesystem_pair_task_info = self.dr_deploy_opt.create_remote_replication_filesystem_pair(
- remote_device_id=remote_device_id,
- remote_pool_id=remote_pool_id,
- local_fs_id=page_fs_id,
- remote_name_rule=remote_name_rule,
- name_suffix=name_suffix,
- speed=self.sync_speed
+ rep_filesystem_pair_task_info = (
+ self.dr_deploy_opt.create_remote_replication_filesystem_pair(
+ remote_device_id=remote_device_id,
+ remote_pool_id=remote_pool_id,
+ local_fs_id=page_fs_id,
+ remote_name_rule=remote_name_rule,
+ name_suffix=name_suffix,
+ speed=self.sync_speed,
+ )
)
rep_filesystem_pair_task_id = rep_filesystem_pair_task_info.get("taskId")
- self.dr_deploy_opt.query_omtask_process(rep_filesystem_pair_task_id, timeout=120)
- remote_replication_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(
- filesystem_id=page_fs_id)
+ self.dr_deploy_opt.query_omtask_process(
+ rep_filesystem_pair_task_id, timeout=120
+ )
+ remote_replication_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info(
+ filesystem_id=page_fs_id
+ )
+ )
return remote_replication_pair_info
- @retry(retry_times=3, wait_times=20, log=LOG, task="do_sync_remote_replication_filesystem_pair")
- def do_sync_remote_replication_filesystem_pair(self, pair_id: str, is_page: bool) -> bool:
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="do_sync_remote_replication_filesystem_pair",
+ )
+ def do_sync_remote_replication_filesystem_pair(
+ self, pair_id: str, is_page: bool
+ ) -> bool:
"""
同步远程复制pair
:param is_page: page文件系统或者是meta文件系统
@@ -607,57 +742,88 @@ class DRDeploy(object):
:return:
"""
exec_step = "sync_rep_meta_fs_pair" if not is_page else "sync_rep_page_fs_pair"
- remote_replication_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- pair_id=pair_id)
+ remote_replication_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ pair_id=pair_id
+ )
+ )
replication_pair_id = remote_replication_pair_info.get("ID")
replication_progress = remote_replication_pair_info.get("REPLICATIONPROGRESS")
start_time = remote_replication_pair_info.get("STARTTIME")
end_time = remote_replication_pair_info.get("ENDTIME")
- replication_pair_health_status = remote_replication_pair_info.get("HEALTHSTATUS")
- replication_pair_running_status = remote_replication_pair_info.get("RUNNINGSTATUS")
+ replication_pair_health_status = remote_replication_pair_info.get(
+ "HEALTHSTATUS"
+ )
+ replication_pair_running_status = remote_replication_pair_info.get(
+ "RUNNINGSTATUS"
+ )
# 当已经设置从端可读写状态,且为分裂状态时,直接返回
secres_access = remote_replication_pair_info.get("SECRESACCESS")
if not is_page:
self.meta_fs_pair_id = replication_pair_id
else:
self.page_fs_pair_id = replication_pair_id
- if secres_access == SecresAccess.ReadAndWrite and \
- replication_pair_running_status == ReplicationRunningStatus.Split:
+ if (
+ secres_access == SecresAccess.ReadAndWrite
+ and replication_pair_running_status == ReplicationRunningStatus.Split
+ ):
LOG.info("Create remote replication pair success.")
self.record_deploy_process(exec_step, "success")
return True
self.replication_status_check_and_sync(exec_step, remote_replication_pair_info)
time.sleep(20)
- LOG.info("Sync remote replication filesystem pair[%s], health status:[%s], "
- "running status[%s], progress[%s%%], start time[%s]",
- replication_pair_id,
- get_status(replication_pair_health_status, HealthStatus),
- get_status(replication_pair_running_status, ReplicationRunningStatus),
- replication_progress,
- datetime.datetime.fromtimestamp(int(start_time)))
- if replication_progress == "100" and \
- replication_pair_running_status == ReplicationRunningStatus.Normal and \
- replication_pair_health_status == HealthStatus.Normal:
- LOG.info("Success to sync remote replication filesystem pair[%s], end time[%s]",
- replication_pair_id,
- datetime.datetime.fromtimestamp(int(end_time)))
+ LOG.info(
+ "Sync remote replication filesystem pair[%s], health status:[%s], "
+ "running status[%s], progress[%s%%], start time[%s]",
+ replication_pair_id,
+ get_status(replication_pair_health_status, HealthStatus),
+ get_status(replication_pair_running_status, ReplicationRunningStatus),
+ replication_progress,
+ datetime.datetime.fromtimestamp(int(start_time)),
+ )
+ if (
+ replication_progress == "100"
+ and replication_pair_running_status == ReplicationRunningStatus.Normal
+ and replication_pair_health_status == HealthStatus.Normal
+ ):
+ LOG.info(
+ "Success to sync remote replication filesystem pair[%s], end time[%s]",
+ replication_pair_id,
+ datetime.datetime.fromtimestamp(int(end_time)),
+ )
if int(start_time) - int(end_time) > Constant.FULL_SYNC_MAX_TIME:
- LOG.info("Do sync remote replication filesystem[%s] pair of full copy." % replication_pair_id)
- self.dr_deploy_opt.sync_remote_replication_filesystem_pair(pair_id=replication_pair_id, vstore_id=0,
- is_full_copy=False)
+ LOG.info(
+ "Do sync remote replication filesystem[%s] pair of full copy."
+ % replication_pair_id
+ )
+ self.dr_deploy_opt.sync_remote_replication_filesystem_pair(
+ pair_id=replication_pair_id, vstore_id=0, is_full_copy=False
+ )
return False
self.record_deploy_process(exec_step, "success")
return True
- if replication_pair_health_status != ReplicationRunningStatus.Normal or \
- replication_pair_running_status not in \
- [ReplicationRunningStatus.Normal, ReplicationRunningStatus.Synchronizing]:
- err_msg = "Failed to sync remote replication filesystem[%s] pair." % replication_pair_id
- self.record_deploy_process(exec_step, "failed", code=-1, description=err_msg)
+ if (
+ replication_pair_health_status != ReplicationRunningStatus.Normal
+ or replication_pair_running_status
+ not in [
+ ReplicationRunningStatus.Normal,
+ ReplicationRunningStatus.Synchronizing,
+ ]
+ ):
+ err_msg = (
+ "Failed to sync remote replication filesystem[%s] pair."
+ % replication_pair_id
+ )
+ self.record_deploy_process(
+ exec_step, "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
self.record_deploy_process(exec_step, replication_progress + "%")
return False
- def replication_status_check_and_sync(self, exec_step, remote_replication_pair_info):
+ def replication_status_check_and_sync(
+ self, exec_step, remote_replication_pair_info
+ ):
"""
检查复制pair对状态,并进行同步
:param exec_step:
@@ -666,37 +832,67 @@ class DRDeploy(object):
"""
replication_pair_id = remote_replication_pair_info.get("ID")
start_time = remote_replication_pair_info.get("STARTTIME")
- replication_pair_health_status = remote_replication_pair_info.get("HEALTHSTATUS")
- replication_pair_running_status = remote_replication_pair_info.get("RUNNINGSTATUS")
+ replication_pair_health_status = remote_replication_pair_info.get(
+ "HEALTHSTATUS"
+ )
+ replication_pair_running_status = remote_replication_pair_info.get(
+ "RUNNINGSTATUS"
+ )
# 当复制pair对健康状态为非正常状态,并且running状态不为正常、正在同步、待恢复状态,异常退出
- if replication_pair_health_status != HealthStatus.Normal and \
- replication_pair_running_status not in [ReplicationRunningStatus.Normal,
- ReplicationRunningStatus.Synchronizing,
- ReplicationRunningStatus.TobeRecovered]:
- err_msg = "Current replication pair health is not normal, " \
- "current status: %s, running status:%s, filesystem: %s" % \
- (get_status(replication_pair_health_status, HealthStatus),
- get_status(replication_pair_running_status, ReplicationRunningStatus),
- replication_pair_id)
- self.record_deploy_process(exec_step, "failed", code=-1, description=err_msg)
+ if (
+ replication_pair_health_status != HealthStatus.Normal
+ and replication_pair_running_status
+ not in [
+ ReplicationRunningStatus.Normal,
+ ReplicationRunningStatus.Synchronizing,
+ ReplicationRunningStatus.TobeRecovered,
+ ]
+ ):
+ err_msg = (
+ "Current replication pair health is not normal, "
+ "current status: %s, running status:%s, filesystem: %s"
+ % (
+ get_status(replication_pair_health_status, HealthStatus),
+ get_status(
+ replication_pair_running_status, ReplicationRunningStatus
+ ),
+ replication_pair_id,
+ )
+ )
+ self.record_deploy_process(
+ exec_step, "failed", code=-1, description=err_msg
+ )
LOG.error(err_msg)
raise Exception(err_msg)
# 当前远程复制pair对状态为分裂且没有同步开始时间时,表示当前为首次创建还未同步,执行全量同步
- if replication_pair_running_status == ReplicationRunningStatus.Split and start_time is None:
- LOG.info("Do sync remote replication filesystem[%s] pair of full copy." % replication_pair_id)
- self.dr_deploy_opt.sync_remote_replication_filesystem_pair(pair_id=replication_pair_id,
- vstore_id="0",
- is_full_copy=True)
+ if (
+ replication_pair_running_status == ReplicationRunningStatus.Split
+ and start_time is None
+ ):
+ LOG.info(
+ "Do sync remote replication filesystem[%s] pair of full copy."
+ % replication_pair_id
+ )
+ self.dr_deploy_opt.sync_remote_replication_filesystem_pair(
+ pair_id=replication_pair_id, vstore_id="0", is_full_copy=True
+ )
# 当前远程复制pair对状态为分裂且有同步开始时间时,表示当前为首次创建还未同步,执行增量同步
- if replication_pair_running_status in \
- [ReplicationRunningStatus.Split, ReplicationRunningStatus.TobeRecovered] \
- and start_time is not None:
- LOG.info("Do sync remote replication filesystem[%s] pair of incremental." % replication_pair_id)
- self.dr_deploy_opt.sync_remote_replication_filesystem_pair(pair_id=replication_pair_id,
- vstore_id="0",
- is_full_copy=False)
+ if (
+ replication_pair_running_status
+ in [ReplicationRunningStatus.Split, ReplicationRunningStatus.TobeRecovered]
+ and start_time is not None
+ ):
+ LOG.info(
+ "Do sync remote replication filesystem[%s] pair of incremental."
+ % replication_pair_id
+ )
+ self.dr_deploy_opt.sync_remote_replication_filesystem_pair(
+ pair_id=replication_pair_id, vstore_id="0", is_full_copy=False
+ )
- def do_remote_replication_filesystem_pair_cancel_secondary_write_lock(self, pair_id: str, is_page: bool) -> None:
+ def do_remote_replication_filesystem_pair_cancel_secondary_write_lock(
+ self, pair_id: str, is_page: bool
+ ) -> None:
"""
远程复制pair对分裂后取消从端写锁
1、 查询pair对状态
@@ -705,18 +901,31 @@ class DRDeploy(object):
:param is_page: 是否是page文件系统,否则为meta文件系统
:param pair_id: 远程复制pair对id
"""
- exec_step = "cancel_rep_meta_fs_secondary_write_lock" if not is_page \
+ exec_step = (
+ "cancel_rep_meta_fs_secondary_write_lock"
+ if not is_page
else "cancel_rep_page_fs_secondary_write_lock"
+ )
self.record_deploy_process(exec_step, "start")
- rep_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(pair_id)
+ rep_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(pair_id)
+ )
self.record_deploy_process(exec_step, "running")
secres_access = rep_pair_info.get("SECRESACCESS")
running_status = rep_pair_info.get("RUNNINGSTATUS")
- if secres_access == SecresAccess.ReadAndWrite and running_status == ReplicationRunningStatus.Split:
- LOG.info("Current replicantion pair status already is[%s].", get_status(secres_access, SecresAccess))
+ if (
+ secres_access == SecresAccess.ReadAndWrite
+ and running_status == ReplicationRunningStatus.Split
+ ):
+ LOG.info(
+ "Current replicantion pair status already is[%s].",
+ get_status(secres_access, SecresAccess),
+ )
self.record_deploy_process(exec_step, "success")
return
- self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(pair_id)
+ self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(
+ pair_id
+ )
self.record_deploy_process(exec_step, "success")
def deploy_remote_replication_pair(self, fs_name: str, is_page: bool) -> str:
@@ -733,15 +942,24 @@ class DRDeploy(object):
:param fs_name: 文件系统名
:return:
"""
- exec_step = "create_rep_meta_fs_pair" if not is_page else "create_rep_page_fs_pair"
+ exec_step = (
+ "create_rep_meta_fs_pair" if not is_page else "create_rep_page_fs_pair"
+ )
self.record_deploy_process(exec_step, "start")
LOG.info("Start to create [%s]remote replication pair success.", fs_name)
- LOG.info("Create remote replication pair step 1: query filesystem[%s] info.", fs_name)
+ LOG.info(
+ "Create remote replication pair step 1: query filesystem[%s] info.", fs_name
+ )
fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(fs_name)
fs_id = fs_info.get("ID")
- LOG.info("Create remote replication pair step 2: create filesystem[%s] pair.", fs_name)
+ LOG.info(
+ "Create remote replication pair step 2: create filesystem[%s] pair.",
+ fs_name,
+ )
self.record_deploy_process(exec_step, "running")
- remote_replication_pair_info = self.do_create_remote_replication_filesystem_pair(fs_id)
+ remote_replication_pair_info = (
+ self.do_create_remote_replication_filesystem_pair(fs_id)
+ )
replication_pair_id = remote_replication_pair_info[0].get("ID")
key = "page_fs_pair_id" if is_page else "meta_fs_pair_id"
self.record_disaster_recovery_info(key, replication_pair_id)
@@ -762,24 +980,35 @@ class DRDeploy(object):
try:
domain_info = self.do_create_filesystem_hyper_metro_domain()
except Exception as err:
- self.record_deploy_process("create_metro_domain", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "create_metro_domain", "failed", code=-1, description=str(err)
+ )
raise err
self.record_disaster_recovery_info("hyper_domain_id", domain_info.get("ID"))
try:
vstore_pair_info = self.do_create_hyper_metro_vstore_pair(domain_info)
except Exception as err:
- self.record_deploy_process("create_metro_vstore_pair", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "create_metro_vstore_pair", "failed", code=-1, description=str(err)
+ )
raise err
self.record_disaster_recovery_info("vstore_pair_id", vstore_pair_info.get("ID"))
try:
- filesystem_pair_info = self.do_create_hyper_metro_filesystem_pair(vstore_pair_info)
+ filesystem_pair_info = self.do_create_hyper_metro_filesystem_pair(
+ vstore_pair_info
+ )
except Exception as err:
- self.record_deploy_process("create_metro_fs_pair", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "create_metro_fs_pair", "failed", code=-1, description=str(err)
+ )
raise err
self.ulog_fs_pair_id = filesystem_pair_info.get("ID")
- self.dr_deploy_opt.modify_hyper_metro_filesystem_pair_sync_speed(vstore_pair_id=self.ulog_fs_pair_id,
- speed=self.sync_speed)
- self.record_disaster_recovery_info("ulog_fs_pair_id", filesystem_pair_info.get("ID"))
+ self.dr_deploy_opt.modify_hyper_metro_filesystem_pair_sync_speed(
+ vstore_pair_id=self.ulog_fs_pair_id, speed=self.sync_speed
+ )
+ self.record_disaster_recovery_info(
+ "ulog_fs_pair_id", filesystem_pair_info.get("ID")
+ )
def query_cantian_disaster_recovery_status(self):
"""
@@ -789,35 +1018,60 @@ class DRDeploy(object):
"""
self.record_deploy_process("cantian_disaster_recovery_status", "start")
node_id = self.deploy_params.get("node_id")
- cms_cmd = "su -s /bin/bash - %s -c 'source ~/.bashrc " \
- "&& cms stat | awk \"{print \$1, \$9}\"'" % self.run_user
+ cms_cmd = (
+ "su -s /bin/bash - %s -c 'source ~/.bashrc "
+ '&& cms stat | awk "{print \$1, \$9}"\'' % self.run_user
+ )
return_code, output, stderr = exec_popen(cms_cmd)
LOG.info("Check cms reformer node.")
if return_code:
- err_msg = "Execute cms command[%s] query reform node failed, output:%s, " \
- "stderr:%s" % (cms_cmd, output, stderr)
- self.record_deploy_process("cantian_disaster_recovery_status", "failed",
- code=-1, description=err_msg)
+ err_msg = (
+ "Execute cms command[%s] query reform node failed, output:%s, "
+ "stderr:%s" % (cms_cmd, output, stderr)
+ )
+ self.record_deploy_process(
+ "cantian_disaster_recovery_status",
+ "failed",
+ code=-1,
+ description=err_msg,
+ )
raise Exception(err_msg)
cms_stat = output.split("\n")
LOG.info("Cms stat is:\n %s", cms_stat)
LOG.info("Check cantian replay status.")
self.record_deploy_process("cantian_disaster_recovery_status", "running")
for node_stat in cms_stat:
- if "REFORMER" in node_stat and node_id == node_stat.split(" ")[0].strip(" "):
- return_code, output, stderr = exec_popen(CANTIAN_DISASTER_RECOVERY_STATUS_CHECK, timeout=20)
+ if "REFORMER" in node_stat and node_id == node_stat.split(" ")[0].strip(
+ " "
+ ):
+ return_code, output, stderr = exec_popen(
+ CANTIAN_DISASTER_RECOVERY_STATUS_CHECK, timeout=20
+ )
if return_code:
- err_msg = "Execute check cantian disaster recovery command failed, " \
- "oupout:%s, stderr:%s" % (output, stderr)
- self.record_deploy_process("cantian_disaster_recovery_status", "failed",
- code=-1, description=err_msg)
+ err_msg = (
+ "Execute check cantian disaster recovery command failed, "
+ "oupout:%s, stderr:%s" % (output, stderr)
+ )
+ self.record_deploy_process(
+ "cantian_disaster_recovery_status",
+ "failed",
+ code=-1,
+ description=err_msg,
+ )
LOG.info("Check cantian replay failed.")
raise Exception(err_msg)
if "START_REPLAY" not in output:
- err_msg = "Cantian lrpl status is abnormal, details: %s" % output.split("SQL>")[1:]
- self.record_deploy_process("cantian_disaster_recovery_status", "failed", code=-1,
- description=err_msg)
+ err_msg = (
+ "Cantian lrpl status is abnormal, details: %s"
+ % output.split("SQL>")[1:]
+ )
+ self.record_deploy_process(
+ "cantian_disaster_recovery_status",
+ "failed",
+ code=-1,
+ description=err_msg,
+ )
LOG.info("Check cantian replay failed.")
raise Exception(err_msg)
@@ -841,9 +1095,13 @@ class DRDeploy(object):
self.update_install_status(node_id, "start", "failed")
err_pattern = re.compile(".*ERROR.*")
_err = err_pattern.findall(output + stderr)
- err_msg = "Failed to execute start, details:\n%s for details see " \
- "/opt/cantian/log/deploy/deploy.log" % "\n".join(_err)
- self.record_deploy_process("standby_start", "failed", code=-1, description=err_msg)
+ err_msg = (
+ "Failed to execute start, details:\n%s for details see "
+ "/opt/cantian/log/deploy/deploy.log" % "\n".join(_err)
+ )
+ self.record_deploy_process(
+ "standby_start", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
self.update_install_status(node_id, "start", "success")
LOG.info("Start node[%s] cantian success.", node_id)
@@ -866,13 +1124,13 @@ class DRDeploy(object):
if os.path.exists(install_record_file):
with open(install_record_file, "r") as fp:
install_status = json.loads(fp.read())
- with os.fdopen(os.open(install_record_file, flags, modes), 'w') as fp:
+ with os.fdopen(os.open(install_record_file, flags, modes), "w") as fp:
install_status.update({exec_step: exec_status})
json.dump(install_status, fp, indent=4)
else:
data = {"install": "default", "start": "default", "stop": "default"}
data.update({exec_step: exec_status})
- with os.fdopen(os.open(install_record_file, flags, modes), 'w') as fp:
+ with os.fdopen(os.open(install_record_file, flags, modes), "w") as fp:
json.dump(data, fp, indent=4)
LOG.info("Update %s status[%s] success", exec_step, exec_status)
@@ -903,15 +1161,21 @@ class DRDeploy(object):
dbstor_fs_name = self.dr_deploy_info.get("storage_dbstor_fs")
dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id")
dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(
- dbstor_fs_name, vstore_id=dbstor_fs_vstore_id)
+ dbstor_fs_name, vstore_id=dbstor_fs_vstore_id
+ )
ulog_fs_pair_info = None
if dbstor_fs_info and not ulog_fs_pair_ready_flag:
dbstor_fs_id = dbstor_fs_info.get("ID")
try:
- ulog_fs_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ ulog_fs_pair_info = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(
+ dbstor_fs_id
+ )
+ )
except Exception as err:
- self.record_deploy_process("create_metro_fs_pair", "failed",
- code=-1, description=str(err))
+ self.record_deploy_process(
+ "create_metro_fs_pair", "failed", code=-1, description=str(err)
+ )
raise err
self.record_deploy_process("create_metro_domain", "success")
self.record_deploy_process("create_metro_vstore_pair", "success")
@@ -927,13 +1191,19 @@ class DRDeploy(object):
self.record_disaster_recovery_info("hyper_domain_id", hyper_domain_id)
self.record_disaster_recovery_info("vstore_pair_id", vstore_pair_id)
if running_status == FilesystemPairRunningStatus.Paused:
- self.record_deploy_process("sync_metro_fs_pair",
- get_status(running_status, FilesystemPairRunningStatus))
+ self.record_deploy_process(
+ "sync_metro_fs_pair",
+ get_status(running_status, FilesystemPairRunningStatus),
+ )
else:
- self.record_deploy_process("sync_metro_fs_pair", sync_progress + "%")
- if running_status == FilesystemPairRunningStatus.Normal \
- and health_status == HealthStatus.Normal \
- and sync_progress == "100":
+ self.record_deploy_process(
+ "sync_metro_fs_pair", sync_progress + "%"
+ )
+ if (
+ running_status == FilesystemPairRunningStatus.Normal
+ and health_status == HealthStatus.Normal
+ and sync_progress == "100"
+ ):
LOG.info("Hyper metro filesystem[%s] pair ready", dbstor_fs_name)
self.record_deploy_process("sync_metro_fs_pair", "success")
ulog_fs_pair_ready_flag = True
@@ -947,23 +1217,36 @@ class DRDeploy(object):
"""
dbstor_page_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs")
dbstor_page_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(
- dbstor_page_fs_name)
+ dbstor_page_fs_name
+ )
page_fs_pair_info = None
if dbstor_page_fs_info and not page_fs_pair_ready_flag:
self.record_deploy_process("create_rep_page_fs_pair", "success")
dbstor_page_fs_id = dbstor_page_fs_info.get("ID")
- page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(dbstor_page_fs_id)
+ page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(
+ dbstor_page_fs_id
+ )
if page_fs_pair_info:
# 当已经设置从端可读写状态,且为分裂状态时,表示当前同步完成
page_fs_pair_id = page_fs_pair_info[0].get("ID")
secres_access = page_fs_pair_info[0].get("SECRESACCESS")
running_status = page_fs_pair_info[0].get("RUNNINGSTATUS")
self.record_disaster_recovery_info("page_fs_pair_id", page_fs_pair_id)
- remote_replication_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- page_fs_pair_id)
- replication_progress = remote_replication_pair_info.get("REPLICATIONPROGRESS")
- self.record_deploy_process("sync_rep_page_fs_pair", str(replication_progress) + "%")
- if secres_access == SecresAccess.ReadAndWrite and running_status == ReplicationRunningStatus.Split:
+ remote_replication_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ page_fs_pair_id
+ )
+ )
+ replication_progress = remote_replication_pair_info.get(
+ "REPLICATIONPROGRESS"
+ )
+ self.record_deploy_process(
+ "sync_rep_page_fs_pair", str(replication_progress) + "%"
+ )
+ if (
+ secres_access == SecresAccess.ReadAndWrite
+ and running_status == ReplicationRunningStatus.Split
+ ):
LOG.info("Remote replication pair[%s] ready.", dbstor_page_fs_name)
self.record_deploy_process("sync_rep_page_fs_pair", "success")
page_fs_pair_ready_flag = True
@@ -978,26 +1261,47 @@ class DRDeploy(object):
mysql_metadata_in_cantian = self.dr_deploy_info.get("mysql_metadata_in_cantian")
metadata_fs_name = self.dr_deploy_info.get("storage_metadata_fs")
metadata_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(
- metadata_fs_name)
- metadata_fs_pair_info = None if not mysql_metadata_in_cantian else metadata_fs_info
- if metadata_fs_info and not mysql_metadata_in_cantian and not metadata_fs_ready_flag:
+ metadata_fs_name
+ )
+ metadata_fs_pair_info = (
+ None if not mysql_metadata_in_cantian else metadata_fs_info
+ )
+ if (
+ metadata_fs_info
+ and not mysql_metadata_in_cantian
+ and not metadata_fs_ready_flag
+ ):
metadata_fs_id = metadata_fs_info.get("ID")
- metadata_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(metadata_fs_id)
+ metadata_fs_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info(metadata_fs_id)
+ )
self.record_deploy_process("create_rep_meta_fs_pair", "success")
if metadata_fs_pair_info:
# 当已经设置从端可读写状态,且为分裂状态时,表示当前同步完成
meta_fs_pair_id = metadata_fs_pair_info[0].get("ID")
secres_access = metadata_fs_pair_info[0].get("SECRESACCESS")
running_status = metadata_fs_pair_info[0].get("RUNNINGSTATUS")
- remote_replication_pair_info = self.dr_deploy_opt. \
- query_remote_replication_pair_info_by_pair_id(meta_fs_pair_id)
- replication_progress = remote_replication_pair_info.get("REPLICATIONPROGRESS")
- self.record_deploy_process("sync_rep_meta_fs_pair", str(replication_progress) + "%")
- if secres_access == SecresAccess.ReadAndWrite and running_status == ReplicationRunningStatus.Split:
+ remote_replication_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ meta_fs_pair_id
+ )
+ )
+ replication_progress = remote_replication_pair_info.get(
+ "REPLICATIONPROGRESS"
+ )
+ self.record_deploy_process(
+ "sync_rep_meta_fs_pair", str(replication_progress) + "%"
+ )
+ if (
+ secres_access == SecresAccess.ReadAndWrite
+ and running_status == ReplicationRunningStatus.Split
+ ):
LOG.info("Remote replication pair[%s] ready.", metadata_fs_name)
self.record_deploy_process("sync_rep_meta_fs_pair", "success")
metadata_fs_ready_flag = True
- self.record_disaster_recovery_info("meta_fs_pair_id", meta_fs_pair_id)
+ self.record_disaster_recovery_info(
+ "meta_fs_pair_id", meta_fs_pair_id
+ )
return metadata_fs_pair_info, metadata_fs_ready_flag, metadata_fs_info
def create_nfs_share_and_client(self, fs_info: dict) -> None:
@@ -1012,11 +1316,7 @@ class DRDeploy(object):
return
fs_id = fs_info.get("ID")
fs_name = fs_info.get("NAME")
- share_data = {
- "SHAREPATH": f"/{fs_name}/",
- "vstoreId": "0",
- "FSID": fs_id
- }
+ share_data = {"SHAREPATH": f"/{fs_name}/", "vstoreId": "0", "FSID": fs_id}
share_info = self.dr_deploy_opt.storage_opt.query_nfs_info(fs_id)
if not share_info:
parent_id = self.dr_deploy_opt.storage_opt.create_nfs_share(share_data)
@@ -1028,9 +1328,11 @@ class DRDeploy(object):
"ROOTSQUASH": 1,
"PARENTID": parent_id,
"vstoreId": "0",
- "NAME": "*"
+ "NAME": "*",
}
- client_info = self.dr_deploy_opt.storage_opt.query_nfs_share_auth_client(parent_id)
+ client_info = self.dr_deploy_opt.storage_opt.query_nfs_share_auth_client(
+ parent_id
+ )
if not client_info:
self.dr_deploy_opt.storage_opt.add_nfs_client(client_data)
@@ -1059,17 +1361,26 @@ class DRDeploy(object):
cert_encrypt_pwd = ""
if self.dr_deploy_info.get("mes_ssl_switch"):
cert_encrypt_pwd = input()
- cmd = "echo -e \"%s\\n%s\\n%s\\n%s\\n%s\"|sh %s/install.sh %s" \
- % (dbstor_user, dbstor_pwd,
- cantian_pwd, comfirm_cantian_pwd, cert_encrypt_pwd,
- ctl_file_path, DEFAULT_PARAM_FILE)
+ cmd = 'echo -e "%s\\n%s\\n%s\\n%s\\n%s"|sh %s/install.sh %s' % (
+ dbstor_user,
+ dbstor_pwd,
+ cantian_pwd,
+ comfirm_cantian_pwd,
+ cert_encrypt_pwd,
+ ctl_file_path,
+ DEFAULT_PARAM_FILE,
+ )
_, output, stderr = exec_popen(cmd, timeout=600)
if "install success" not in output:
err_pattern = re.compile(".*ERROR.*")
_err = err_pattern.findall(output + stderr)
- err_msg = "Failed to execute install, details:\n%s, for details see " \
- "/opt/cantian/log/deploy/deploy.log" % "\n".join(_err)
- self.record_deploy_process("standby_install", "failed", code=-1, description=err_msg)
+ err_msg = (
+ "Failed to execute install, details:\n%s, for details see "
+ "/opt/cantian/log/deploy/deploy.log" % "\n".join(_err)
+ )
+ self.record_deploy_process(
+ "standby_install", "failed", code=-1, description=err_msg
+ )
self.update_install_status(node_id, "install", "failed")
raise Exception(err_msg)
self.update_install_status(node_id, "install", "success")
@@ -1080,7 +1391,9 @@ class DRDeploy(object):
if self.dr_deploy_info.get("cantian_in_container") == "1":
return
# 判断是否是单进程,单进程要安装mysql
- install_json_path = os.path.join(CURRENT_PATH, "../../cantian/install_config.json")
+ install_json_path = os.path.join(
+ CURRENT_PATH, "../../cantian/install_config.json"
+ )
install_json_data = read_json_config(install_json_path)
if install_json_data.get("M_RUNING_MODE") == "cantiand_with_mysql_in_cluster":
# 单进程需要在拉起前安装mysql
@@ -1106,40 +1419,66 @@ class DRDeploy(object):
ulog_ready, page_ready, meta_ready = True, True, True
while True:
try:
- ulog_ready = self.do_sync_hyper_metro_filesystem_pair(self.ulog_fs_pair_id)
+ ulog_ready = self.do_sync_hyper_metro_filesystem_pair(
+ self.ulog_fs_pair_id
+ )
except Exception as err:
- self.record_deploy_process("sync_metro_fs_pair", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "sync_metro_fs_pair", "failed", code=-1, description=str(err)
+ )
raise err
try:
- page_ready = self.do_sync_remote_replication_filesystem_pair(self.page_fs_pair_id, True)
+ page_ready = self.do_sync_remote_replication_filesystem_pair(
+ self.page_fs_pair_id, True
+ )
except Exception as err:
- self.record_deploy_process("sync_rep_page_fs_pair", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "sync_rep_page_fs_pair", "failed", code=-1, description=str(err)
+ )
raise err
if not self.metadata_in_cantian:
try:
- meta_ready = self.do_sync_remote_replication_filesystem_pair(self.meta_fs_pair_id, False)
+ meta_ready = self.do_sync_remote_replication_filesystem_pair(
+ self.meta_fs_pair_id, False
+ )
except Exception as err:
- self.record_deploy_process("sync_rep_meta_fs_pair", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "sync_rep_meta_fs_pair", "failed", code=-1, description=str(err)
+ )
raise err
if ulog_ready and page_ready and meta_ready:
break
time.sleep(60)
try:
- self.dr_deploy_opt.split_remote_replication_filesystem_pair(self.page_fs_pair_id)
+ self.dr_deploy_opt.split_remote_replication_filesystem_pair(
+ self.page_fs_pair_id
+ )
self.do_remote_replication_filesystem_pair_cancel_secondary_write_lock(
- self.page_fs_pair_id, True)
+ self.page_fs_pair_id, True
+ )
except Exception as err:
- self.record_deploy_process("cancel_rep_page_fs_secondary_write_lock",
- "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "cancel_rep_page_fs_secondary_write_lock",
+ "failed",
+ code=-1,
+ description=str(err),
+ )
raise err
if not self.metadata_in_cantian:
try:
- self.dr_deploy_opt.split_remote_replication_filesystem_pair(self.meta_fs_pair_id)
+ self.dr_deploy_opt.split_remote_replication_filesystem_pair(
+ self.meta_fs_pair_id
+ )
self.do_remote_replication_filesystem_pair_cancel_secondary_write_lock(
- self.meta_fs_pair_id, False)
+ self.meta_fs_pair_id, False
+ )
except Exception as err:
- self.record_deploy_process("cancel_rep_meta_fs_secondary_write_lock",
- "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "cancel_rep_meta_fs_secondary_write_lock",
+ "failed",
+ code=-1,
+ description=str(err),
+ )
raise err
def copy_param_file_to_metadata(self):
@@ -1159,7 +1498,9 @@ class DRDeploy(object):
if return_code:
err_msg = f"Execution of chown command failed, output: {output}, stderr: {stderr}"
LOG.error(err_msg)
- self.record_deploy_process("dr_deploy", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "dr_deploy", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
dbstor_del_command = (
f'su -s /bin/bash - "{run_user}" -c \''
@@ -1182,7 +1523,9 @@ class DRDeploy(object):
if return_code:
err_msg = f"Execution of dbstor command failed, output: {output}, stderr: {stderr}"
LOG.error(err_msg)
- self.record_deploy_process("dr_deploy", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "dr_deploy", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
LOG.info(f"Successfully executed: {dbstor_command}")
@@ -1193,7 +1536,10 @@ class DRDeploy(object):
if os.path.exists(config_path):
# 删除文件
os.remove(config_path)
- shutil.copy(os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"), share_path)
+ shutil.copy(
+ os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"),
+ share_path,
+ )
except Exception as _err:
LOG.info(f"copy dr_deploy_param failed")
@@ -1216,7 +1562,9 @@ class DRDeploy(object):
if not self.mysql_cmd or not self.mysql_user:
err_msg = "Mysql_pwd or mysql_user is None, please check."
LOG.error(err_msg)
- self.record_deploy_process("dr_deploy", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "dr_deploy", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
if self.mysql_pwd is None:
self.mysql_pwd = input()
@@ -1227,18 +1575,28 @@ class DRDeploy(object):
self.do_flush_table_with_read_lock()
dbstor_page_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs")
metadata_fs_name = self.dr_deploy_info.get("storage_metadata_fs")
- self.sync_speed = int(SPEED.get(self.dr_deploy_info.get("sync_speed", "medium")))
+ self.sync_speed = int(
+ SPEED.get(self.dr_deploy_info.get("sync_speed", "medium"))
+ )
self.deploy_hyper_metro_pair()
try:
- self.page_fs_pair_id = self.deploy_remote_replication_pair(dbstor_page_fs_name, True)
+ self.page_fs_pair_id = self.deploy_remote_replication_pair(
+ dbstor_page_fs_name, True
+ )
except Exception as err:
- self.record_deploy_process("create_rep_page_fs_pair", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "create_rep_page_fs_pair", "failed", code=-1, description=str(err)
+ )
raise err
if not self.metadata_in_cantian:
try:
- self.meta_fs_pair_id = self.deploy_remote_replication_pair(metadata_fs_name, False)
+ self.meta_fs_pair_id = self.deploy_remote_replication_pair(
+ metadata_fs_name, False
+ )
except Exception as err:
- self.record_deploy_process("create_rep_meta_fs_pair", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "create_rep_meta_fs_pair", "failed", code=-1, description=str(err)
+ )
raise err
self.active_dr_deploy_and_sync()
self.do_unlock_instance_for_backup()
@@ -1263,20 +1621,27 @@ class DRDeploy(object):
wait_time = 0
metadata_fs_info = None
while True:
- ulog_fs_pair_info, ulog_fs_pair_ready_flag = \
+ ulog_fs_pair_info, ulog_fs_pair_ready_flag = (
self.standby_check_ulog_fs_pair_ready(ulog_fs_pair_ready_flag)
- page_fs_pair_info, page_fs_pair_ready_flag = \
+ )
+ page_fs_pair_info, page_fs_pair_ready_flag = (
self.standby_check_page_fs_pair_ready(page_fs_pair_ready_flag)
+ )
if self.deploy_mode != "dbstor":
- metadata_fs_pair_info, metadata_fs_ready_flag, metadata_fs_info = \
+ metadata_fs_pair_info, metadata_fs_ready_flag, metadata_fs_info = (
self.standby_check_metadata_fs_pair_ready(metadata_fs_ready_flag)
- fs_ready = ulog_fs_pair_info and page_fs_pair_info and metadata_fs_pair_info
+ )
+ fs_ready = (
+ ulog_fs_pair_info and page_fs_pair_info and metadata_fs_pair_info
+ )
else:
fs_ready = ulog_fs_pair_info and page_fs_pair_info
if fs_ready and not is_installed_flag:
- LOG.info("Filesystem created successfully, start to install Cantian engine.")
+ LOG.info(
+ "Filesystem created successfully, start to install Cantian engine."
+ )
self.record_deploy_process("standby_install", "running")
if self.deploy_mode != "dbstor" and metadata_fs_info:
@@ -1289,17 +1654,28 @@ class DRDeploy(object):
else:
if wait_time > FS_CREAT_TIMEOUT and not is_installed_flag:
err_msg = "Wait for the filesystem creation timeout, please check."
- self.record_deploy_process("standby_install", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "standby_install", "failed", code=-1, description=err_msg
+ )
LOG.error(err_msg)
raise Exception(err_msg)
- LOG.info("Waiting until the DR is successfully set up, waited [%s]s", wait_time)
- pair_ready = ulog_fs_pair_ready_flag and page_fs_pair_ready_flag and metadata_fs_ready_flag
+ LOG.info(
+ "Waiting until the DR is successfully set up, waited [%s]s",
+ wait_time,
+ )
+ pair_ready = (
+ ulog_fs_pair_ready_flag
+ and page_fs_pair_ready_flag
+ and metadata_fs_ready_flag
+ )
if is_installed_flag and pair_ready:
self.record_deploy_process("standby_start", "running")
try:
self.standby_do_start()
except Exception as err:
- self.record_deploy_process("standby_start", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "standby_start", "failed", code=-1, description=str(err)
+ )
raise err
self.record_deploy_process("standby_start", "success")
break
@@ -1315,9 +1691,12 @@ class DRDeploy(object):
deploy --site=[standby/active] --mysql_cmd='/usr/local/mysql/bin/mysql' --mysql_user=root
:return:
"""
+
def _execute():
action_parse = argparse.ArgumentParser()
- action_parse.add_argument("--site", dest="site", choices=["standby", "active"], required=True)
+ action_parse.add_argument(
+ "--site", dest="site", choices=["standby", "active"], required=True
+ )
action_parse.add_argument("--mysql_cmd", dest="mysql_cmd", required=False)
action_parse.add_argument("--mysql_user", dest="mysql_user", required=False)
args = action_parse.parse_args()
@@ -1335,23 +1714,34 @@ class DRDeploy(object):
else:
self.standby_execute()
except Exception as err:
- self.record_deploy_process("dr_deploy", "failed", code=-1, description=str(err))
+ self.record_deploy_process(
+ "dr_deploy", "failed", code=-1, description=str(err)
+ )
if self.backup_lock_shell is not None:
self.do_unlock_instance_for_backup()
- LOG.error("Dr deploy execute failed, traceback:%s", traceback.format_exc())
+ LOG.error(
+ "Dr deploy execute failed, traceback:%s", traceback.format_exc()
+ )
raise err
finally:
self.dr_deploy_opt.storage_opt.logout()
# 安装部署完成后记录加密密码到配置文件
encrypted_pwd = KmcResolve.kmc_resolve_password("encrypted", self.dm_passwd)
self.record_disaster_recovery_info("dm_pwd", encrypted_pwd)
- os.chmod(os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"), mode=0o644)
+ os.chmod(
+ os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"),
+ mode=0o644,
+ )
try:
- shutil.copy(os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"), "/opt/cantian/config/")
+ shutil.copy(
+ os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"),
+ "/opt/cantian/config/",
+ )
except Exception as _err:
LOG.info(f"copy dr_deploy_param failed")
self.copy_param_file_to_metadata()
self.restart_cantian_exporter()
+
try:
_execute()
except Exception as err:
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_common.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_common.py
index e3030c51555df18231973c15a4729eae422b1203..8ebc68fa00b0b5d134e7818d27e2c16a88b6d7e6 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_common.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_common.py
@@ -27,9 +27,7 @@ class RemoteStorageOPT(object):
"device_id": self.remote_device_id,
"url": Constant.CREATE_VSTORE.replace("{deviceId}", "xxx"),
"method": "GET",
- "body": {
- "ID": f"{vstore_id}"
- }
+ "body": {"ID": f"{vstore_id}"},
}
res = self.rest_client.normal_request(url, data=data, method="post")
err_msg = "Failed to query remote storage vstore info,vstoreId[%s]" % vstore_id
@@ -44,7 +42,7 @@ class RemoteStorageOPT(object):
"device_id": self.remote_device_id,
"url": Constant.QUERY_SYSTEM_INFO.replace("{deviceId}", "xxx"),
"method": "GET",
- "body": {}
+ "body": {},
}
res = self.rest_client.normal_request(url, data=data, method="post")
err_msg = "Failed to query remote storage system info"
@@ -59,35 +57,43 @@ class RemoteStorageOPT(object):
"device_id": self.remote_device_id,
"url": remote_url.replace("{deviceId}", "xxx"),
"method": "GET",
- "body": {
- "vstoreId": vstore_id
- }
+ "body": {"vstoreId": vstore_id},
}
res = self.rest_client.normal_request(url, data=data, method="post")
- err_msg = "Failed to query remote storage filesystem num,vstoreId[%s]" % vstore_id
+ err_msg = (
+ "Failed to query remote storage filesystem num,vstoreId[%s]" % vstore_id
+ )
rsp_data = StorageInf.omstask_result_parse(err_msg, res)
- LOG.info("Success to query remote storage filesystem num, vstoreId[%s]", vstore_id)
+ LOG.info(
+ "Success to query remote storage filesystem num, vstoreId[%s]", vstore_id
+ )
return rsp_data
def query_remote_filesystem_info(self, fs_name: str, vstore_id: str):
url = Constant.REMOTE_EXECUTE
data = {
"device_id": self.remote_device_id,
- "url": Constant.CREATE_FS.replace("{deviceId}", "xxx") + "?filter=NAME::%s" % fs_name,
+ "url": Constant.CREATE_FS.replace("{deviceId}", "xxx")
+ + "?filter=NAME::%s" % fs_name,
"method": "GET",
- "body": {
- "vstoreId": vstore_id
- }
+ "body": {"vstoreId": vstore_id},
}
res = self.rest_client.normal_request(url, data=data, method="post")
- err_msg = "Failed to query remote filesystem[%s] info, vstore_id[%s]" % (fs_name, vstore_id)
+ err_msg = "Failed to query remote filesystem[%s] info, vstore_id[%s]" % (
+ fs_name,
+ vstore_id,
+ )
rsp_data = StorageInf.omstask_result_parse(err_msg, res)
fs_info = dict()
for filesystem_info in rsp_data:
if filesystem_info.get("NAME") == fs_name:
fs_info = filesystem_info
break
- LOG.info("Success to query remote filesystem[%s] info, vstore_id[%s]", fs_name, vstore_id)
+ LOG.info(
+ "Success to query remote filesystem[%s] info, vstore_id[%s]",
+ fs_name,
+ vstore_id,
+ )
return fs_info
def query_remote_storage_pool_info(self, pool_id: str) -> dict:
@@ -100,9 +106,7 @@ class RemoteStorageOPT(object):
"device_id": self.remote_device_id,
"url": remote_url.replace("{deviceId}", "xxx"),
"method": "GET",
- "body": {
- "ID": pool_id
- }
+ "body": {"ID": pool_id},
}
res = self.rest_client.normal_request(url, data=data, method="post")
err_msg = "Failed to query remote storage pool info,poolId[%s]" % pool_id
@@ -121,9 +125,14 @@ class KmcResolve(object):
:return:
"""
run_user = get_env_info("cantian_user")
- resolve_file_path = os.path.join(CURRENT_PATH, "../../cantian_common/crypte_adapter.py")
- cmd = "su -s /bin/bash - %s -c \"export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} " \
- "&& echo -e %s | python3 -B %s %s\"" % (run_user, plain_text, resolve_file_path, mode)
+ resolve_file_path = os.path.join(
+ CURRENT_PATH, "../../cantian_common/crypte_adapter.py"
+ )
+ cmd = (
+ 'su -s /bin/bash - %s -c "export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} '
+ '&& echo -e %s | python3 -B %s %s"'
+ % (run_user, plain_text, resolve_file_path, mode)
+ )
return_code, output, stderr = exec_popen(cmd)
if return_code == 1:
raise Exception("resolve password failed.")
@@ -173,9 +182,7 @@ class DRDeployCommon(object):
"""
LOG.info("Start to query vstore file system num.")
url = Constant.QUERY_FILE_SYSTEM_NUM.format(deviceId=self.device_id)
- data = {
- "vstoreId": vstore_id
- }
+ data = {"vstoreId": vstore_id}
res = self.rest_client.normal_request(url, data=data, method="get")
err_msg = "Failed to query vstore file system num"
rsp_data = StorageInf.result_parse(err_msg, res)
@@ -224,28 +231,28 @@ class DRDeployCommon(object):
return rsp_data
def query_hyper_metro_filesystem_pair_info(self, filesystem_id: str) -> list:
- url = (Constant.QUERY_HYPER_METRO_FILE_SYSTEM_PAIR + "?ASSOCIATEOBJTYPE=40&ASSOCIATEOBJID={fs_id}"). \
- format(deviceId=self.device_id, fs_id=filesystem_id)
+ url = (
+ Constant.QUERY_HYPER_METRO_FILE_SYSTEM_PAIR
+ + "?ASSOCIATEOBJTYPE=40&ASSOCIATEOBJID={fs_id}"
+ ).format(deviceId=self.device_id, fs_id=filesystem_id)
res = self.rest_client.normal_request(url, "get")
err_msg = "Failed to query hyper metro file system pair info"
rsp_data = StorageInf.result_parse(err_msg, res)
return rsp_data
-
+
def query_hyper_metro_filesystem_count_info(self, vstore_id: str) -> list:
- url = (Constant.QUERY_HYPER_METRO_FILE_SYSTEM_COUNT).format(deviceId=self.device_id)
- data = {
- "vstoreId": vstore_id
- }
+ url = (Constant.QUERY_HYPER_METRO_FILE_SYSTEM_COUNT).format(
+ deviceId=self.device_id
+ )
+ data = {"vstoreId": vstore_id}
res = self.rest_client.normal_request(url, data=data, method="get")
err_msg = "Failed to query hyper metro file system pair count info"
rsp_data = StorageInf.result_parse(err_msg, res)
return rsp_data
-
+
def query_ulog_filesystem_info_list(self, vstore_id: str) -> list:
url = Constant.HYPER_METRO_FILESYSTEM_PAIR.format(deviceId=self.device_id)
- data = {
- "vstoreId": vstore_id
- }
+ data = {"vstoreId": vstore_id}
res = self.rest_client.normal_request(url, data=data, method="get")
err_msg = "Failed to query ulog file system pair info list"
rsp_data = StorageInf.result_parse(err_msg, res)
@@ -257,9 +264,13 @@ class DRDeployCommon(object):
:param pair_id:
:return:
"""
- url = Constant.DELETE_HYPER_METRO_PAIR.format(deviceId=self.device_id, id=pair_id)
+ url = Constant.DELETE_HYPER_METRO_PAIR.format(
+ deviceId=self.device_id, id=pair_id
+ )
res = self.rest_client.normal_request(url, "get")
- err_msg = "Failed to query hyper metro filesystem pair info by pair id[%s]" % pair_id
+ err_msg = (
+ "Failed to query hyper metro filesystem pair info by pair id[%s]" % pair_id
+ )
rsp_data = StorageInf.result_parse(err_msg, res)
return rsp_data
@@ -269,10 +280,14 @@ class DRDeployCommon(object):
:param filesystem_id: 文件系统id
:return: list
"""
- url = (Constant.QUERY_REPLICATION_FILE_SYSTEM_PAIR + "?ASSOCIATEOBJTYPE=40&ASSOCIATEOBJID={fs_id}") \
- .format(deviceId=self.device_id, fs_id=filesystem_id)
+ url = (
+ Constant.QUERY_REPLICATION_FILE_SYSTEM_PAIR
+ + "?ASSOCIATEOBJTYPE=40&ASSOCIATEOBJID={fs_id}"
+ ).format(deviceId=self.device_id, fs_id=filesystem_id)
res = self.rest_client.normal_request(url, "get")
- err_msg = "Failed to query remote replication filesystem[%s] info" % filesystem_id
+ err_msg = (
+ "Failed to query remote replication filesystem[%s] info" % filesystem_id
+ )
rsp_data = StorageInf.result_parse(err_msg, res)
return rsp_data
@@ -282,9 +297,14 @@ class DRDeployCommon(object):
:param pair_id: 远程复制pair id
:return:
"""
- url = Constant.REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT.format(deviceId=self.device_id, id=pair_id)
+ url = Constant.REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT.format(
+ deviceId=self.device_id, id=pair_id
+ )
res = self.rest_client.normal_request(url, "get")
- err_msg = "Failed to query remote replication file system info by pair id[%s]" % pair_id
+ err_msg = (
+ "Failed to query remote replication file system info by pair id[%s]"
+ % pair_id
+ )
rsp_data = StorageInf.result_parse(err_msg, res)
return rsp_data
@@ -295,8 +315,10 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Start to query filesystem for replication info.")
- url = (Constant.QUERY_FILESYSTEM_FOR_REPLICATION + "?rmtDeviceId={remote_device_id}&RSSType=24"). \
- format(deviceId=self.device_id, remote_device_id=remote_device_id)
+ url = (
+ Constant.QUERY_FILESYSTEM_FOR_REPLICATION
+ + "?rmtDeviceId={remote_device_id}&RSSType=24"
+ ).format(deviceId=self.device_id, remote_device_id=remote_device_id)
res = self.rest_client.normal_request(url, "get")
err_msg = "Failed to query filesystem for replication info"
rsp_data = StorageInf.result_parse(err_msg, res)
@@ -315,8 +337,13 @@ class DRDeployCommon(object):
description = rsp_data.get("description")
current_step_index = description.get("currentStepIndex")
step_count = description.get("stepCount")
- LOG.info("Task[%s] status [%s], running process[%s/%s]",
- name, task_status, current_step_index, step_count)
+ LOG.info(
+ "Task[%s] status [%s], running process[%s/%s]",
+ name,
+ task_status,
+ current_step_index,
+ step_count,
+ )
if task_status == "success":
break
if task_status in ["executing", "wait"]:
@@ -328,8 +355,15 @@ class DRDeployCommon(object):
LOG.error(err_msg)
raise Exception(err_msg)
- @retry(retry_times=3, wait_times=20, log=LOG, task="create_filesystem_hyper_metro_domain")
- def create_filesystem_hyper_metro_domain(self, dev_name: str, dev_esn: str, dev_id: str, domain_name: str) -> dict:
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="create_filesystem_hyper_metro_domain",
+ )
+ def create_filesystem_hyper_metro_domain(
+ self, dev_name: str, dev_esn: str, dev_id: str, domain_name: str
+ ) -> dict:
"""
创建文件系统双活域
:param dev_name: 远端设备名称
@@ -345,12 +379,8 @@ class DRDeployCommon(object):
"isShareAuthenticationSync": False,
"DESCRIPTION": "",
"REMOTEDEVICES": [
- {
- "devId": dev_id,
- "devESN": dev_esn,
- "devName": dev_name
- }
- ]
+ {"devId": dev_id, "devESN": dev_esn, "devName": dev_name}
+ ],
}
url = Constant.HYPER_METRO_DOMAIN.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="post")
@@ -360,7 +390,9 @@ class DRDeployCommon(object):
return rsp_data
@retry(retry_times=3, wait_times=20, log=LOG, task="create_hyper_metro_vstore_pair")
- def create_hyper_metro_vstore_pair(self, domain_id: str, local_vstore_id: str, remote_vstore_id: str) -> dict:
+ def create_hyper_metro_vstore_pair(
+ self, domain_id: str, local_vstore_id: str, remote_vstore_id: str
+ ) -> dict:
"""
创建双活租户pair
:param domain_id: 文件系统双活域id
@@ -375,7 +407,7 @@ class DRDeployCommon(object):
"REMOTEVSTOREID": remote_vstore_id,
"REPTYPE": "1",
"PREFERREDMODE": "0",
- "isNetworkSync": False
+ "isNetworkSync": False,
}
url = Constant.HYPER_METRO_VSTORE_PAIR.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="post")
@@ -384,8 +416,12 @@ class DRDeployCommon(object):
LOG.info("Success to create hyper metro vstore pair.")
return rsp_data
- @retry(retry_times=3, wait_times=20, log=LOG, task="create_hyper_metro_filesystem_pair")
- def create_hyper_metro_filesystem_pair(self, filesystem_id: str, pool_id: str, vstore_pair_id: str) -> dict:
+ @retry(
+ retry_times=3, wait_times=20, log=LOG, task="create_hyper_metro_filesystem_pair"
+ )
+ def create_hyper_metro_filesystem_pair(
+ self, filesystem_id: str, pool_id: str, vstore_pair_id: str
+ ) -> dict:
"""
调用omtask接口创建双活文件系统
:param filesystem_id: 本端文件系统id
@@ -397,7 +433,7 @@ class DRDeployCommon(object):
data = {
"vstorePairID": vstore_pair_id,
"remoteStoragePoolId": pool_id,
- "objs": [filesystem_id]
+ "objs": [filesystem_id],
}
url = Constant.CREATE_HYPER_METRO_FILESYSTEM_PAIR
res = self.rest_client.normal_request(url, data=data, method="post")
@@ -406,8 +442,15 @@ class DRDeployCommon(object):
LOG.info("Success to create hyper metro filesystem pair.")
return rsp_data
- @retry(retry_times=3, wait_times=20, log=LOG, task="modify_hyper_metro_filesystem_pair_sync_speed")
- def modify_hyper_metro_filesystem_pair_sync_speed(self, vstore_pair_id: str, speed: int) -> None:
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="modify_hyper_metro_filesystem_pair_sync_speed",
+ )
+ def modify_hyper_metro_filesystem_pair_sync_speed(
+ self, vstore_pair_id: str, speed: int
+ ) -> None:
"""
修改同步速度
:param vstore_pair_id:
@@ -415,17 +458,19 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Start to modify hyper metro filesystem pair speed to [%d].", speed)
- data = {
- "ID": vstore_pair_id,
- "SPEED": speed
- }
+ data = {"ID": vstore_pair_id, "SPEED": speed}
url = Constant.HYPER_METRO_FILESYSTEM_PAIR.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="put")
err_msg = "Failed to modify hyper metro filesystem pair speed"
StorageInf.result_parse(err_msg, res)
LOG.info("Start to modify hyper metro filesystem pair speed.")
- @retry(retry_times=3, wait_times=20, log=LOG, task="create_remote_replication_filesystem_pair")
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="create_remote_replication_filesystem_pair",
+ )
def create_remote_replication_filesystem_pair(self, **pair_args) -> dict:
"""
@@ -459,14 +504,9 @@ class DRDeployCommon(object):
"syncPair": True,
"syncSnapPolicy": 0,
"createType": 0,
- "reservedConsistencySnapSwitch": 0
+ "reservedConsistencySnapSwitch": 0,
},
- "objs": [
- {
- "id": local_fs_id,
- "vstoreId": 0
- }
- ]
+ "objs": [{"id": local_fs_id, "vstoreId": 0}],
}
if remote_name_rule == 2:
data["replication"]["namePrefix"] = RepFileSystemNameRule.NamePrefix
@@ -475,10 +515,17 @@ class DRDeployCommon(object):
res = self.rest_client.normal_request(url, data=data, method="post")
err_msg = "Failed to create remote replication filesystem pair"
rsp_data = StorageInf.omstask_result_parse(err_msg, res)
- LOG.info("Success to create remote replication filesystem[%s] pair.", local_fs_id)
+ LOG.info(
+ "Success to create remote replication filesystem[%s] pair.", local_fs_id
+ )
return rsp_data
- @retry(retry_times=3, wait_times=20, log=LOG, task="split_remote_replication_filesystem_pair")
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="split_remote_replication_filesystem_pair",
+ )
def split_remote_replication_filesystem_pair(self, pair_id: str) -> dict:
"""
分裂远程复制pair
@@ -486,27 +533,32 @@ class DRDeployCommon(object):
:return: {}
"""
LOG.info("Start to split remote replication filesystem pair[%s].", pair_id)
- data = {
- "ID": pair_id
- }
- url = Constant.SPLIT_REMOTE_REPLICATION_FILESYSTEM_PAIR.format(deviceId=self.device_id)
+ data = {"ID": pair_id}
+ url = Constant.SPLIT_REMOTE_REPLICATION_FILESYSTEM_PAIR.format(
+ deviceId=self.device_id
+ )
res = self.rest_client.normal_request(url, data=data, method="put")
err_msg = "Failed to split remote replication filesystem pair"
rsp_data = StorageInf.result_parse(err_msg, res)
LOG.info("Success to split remote replication filesystem pair")
return rsp_data
- @retry(retry_times=3, wait_times=20, log=LOG, task="remote_replication_filesystem_pair_cancel_secondary_write_lock")
- def remote_replication_filesystem_pair_cancel_secondary_write_lock(self, pair_id: str) -> dict:
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="remote_replication_filesystem_pair_cancel_secondary_write_lock",
+ )
+ def remote_replication_filesystem_pair_cancel_secondary_write_lock(
+ self, pair_id: str
+ ) -> dict:
"""
取消远程复制从资源写保护
:param pair_id: 远程复制id
:return:
"""
LOG.info("Start to cancel secondary write lock.")
- data = {
- "ID": pair_id
- }
+ data = {"ID": pair_id}
url = Constant.CANCEL_SECONDARY_WRITE_LOCK.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="put")
err_msg = "Failed to to cancel secondary write lock"
@@ -514,17 +566,22 @@ class DRDeployCommon(object):
LOG.info("Success to cancel secondary write lock.")
return rsp_data
- @retry(retry_times=3, wait_times=20, log=LOG, task="remote_replication_filesystem_pair_set_secondary_write_lock")
- def remote_replication_filesystem_pair_set_secondary_write_lock(self, pair_id: str) -> dict:
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="remote_replication_filesystem_pair_set_secondary_write_lock",
+ )
+ def remote_replication_filesystem_pair_set_secondary_write_lock(
+ self, pair_id: str
+ ) -> dict:
"""
设置远程复制从资源写保护
:param pair_id: 远程复制iD
:return:
"""
LOG.info("Start to set secondary write lock.")
- data = {
- "ID": pair_id
- }
+ data = {"ID": pair_id}
url = Constant.SET_SECONDARY_WRITE_LOCK.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="put")
err_msg = "Failed to set secondary write lock."
@@ -532,8 +589,15 @@ class DRDeployCommon(object):
LOG.info("Success to set secondary write lock.")
return rsp_data
- @retry(retry_times=3, wait_times=20, log=LOG, task="sync_remote_replication_filesystem_pair")
- def sync_remote_replication_filesystem_pair(self, pair_id: str, vstore_id: str, is_full_copy: bool) -> None:
+ @retry(
+ retry_times=3,
+ wait_times=20,
+ log=LOG,
+ task="sync_remote_replication_filesystem_pair",
+ )
+ def sync_remote_replication_filesystem_pair(
+ self, pair_id: str, vstore_id: str, is_full_copy: bool
+ ) -> None:
"""
触发远程复制
:param pair_id: 远程复制pair id
@@ -542,19 +606,19 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Start to sync remote replication filesystem pair.")
- data = {
- "ID": pair_id,
- "vstoreId": vstore_id,
- "isFullCopy": is_full_copy
- }
- url = Constant.SYNC_REMOTE_REPLICATION_FILESYSTEM_PAIR.format(deviceId=self.device_id)
+ data = {"ID": pair_id, "vstoreId": vstore_id, "isFullCopy": is_full_copy}
+ url = Constant.SYNC_REMOTE_REPLICATION_FILESYSTEM_PAIR.format(
+ deviceId=self.device_id
+ )
res = self.rest_client.normal_request(url, data=data, method="put")
err_msg = "Failed to sync remote replication filesystem pair"
rsp_data = StorageInf.result_parse(err_msg, res)
LOG.info("Success to sync remote replication filesystem pair.")
return rsp_data
- def delete_remote_replication_filesystem_pair(self, pair_id, is_local_del=False) -> dict:
+ def delete_remote_replication_filesystem_pair(
+ self, pair_id, is_local_del=False
+ ) -> dict:
"""
删除远程复制
:param pair_id: 远程复制pair id
@@ -562,18 +626,19 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Start to delete remote replication filesystem pair[%s].", pair_id)
- data = {
- "ISLOCALDELETE": is_local_del,
- "TOSYNCSRWHENDELETE": False
- }
- url = Constant.REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT.format(deviceId=self.device_id, id=pair_id)
+ data = {"ISLOCALDELETE": is_local_del, "TOSYNCSRWHENDELETE": False}
+ url = Constant.REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT.format(
+ deviceId=self.device_id, id=pair_id
+ )
res = self.rest_client.normal_request(url, data=data, method="delete")
err_msg = "Start to delete remote replication filesystem pair[%s]" % pair_id
rsp_data = StorageInf.result_parse(err_msg, res)
LOG.info("Success to delete remote replication filesystem pair[%s].", pair_id)
return rsp_data
- def delete_hyper_metro_filesystem_pair(self, pair_id: str, vstore_id: str, is_local_del=False) -> dict:
+ def delete_hyper_metro_filesystem_pair(
+ self, pair_id: str, vstore_id: str, is_local_del=False
+ ) -> dict:
"""
删除双活pair
:param pair_id: 双活pair id
@@ -582,11 +647,10 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Start to delete hyper metro filesystem pair")
- url = Constant.DELETE_HYPER_METRO_PAIR.format(deviceId=self.device_id, id=pair_id)
- data = {
- "ISLOCALDELETE": is_local_del,
- "vstoreId": vstore_id
- }
+ url = Constant.DELETE_HYPER_METRO_PAIR.format(
+ deviceId=self.device_id, id=pair_id
+ )
+ data = {"ISLOCALDELETE": is_local_del, "vstoreId": vstore_id}
res = self.rest_client.normal_request(url, data=data, method="delete")
err_mgs = "Failed to to delete hyper metro filesystem pair"
rsp_data = StorageInf.result_parse(err_mgs, res)
@@ -602,10 +666,10 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Start to delete hyper metro vstore pair")
- url = Constant.DELETE_HYPER_METRO_VSTORE_PAIR.format(deviceId=self.device_id, id=pair_id)
- data = {
- "isLocalDelete": is_local_del
- }
+ url = Constant.DELETE_HYPER_METRO_VSTORE_PAIR.format(
+ deviceId=self.device_id, id=pair_id
+ )
+ data = {"isLocalDelete": is_local_del}
res = self.rest_client.normal_request(url, data=data, method="delete")
err_msg = "Failed to delete hyper metro vstore pair"
rsp_data = StorageInf.result_parse(err_msg, res)
@@ -618,36 +682,54 @@ class DRDeployCommon(object):
:param domain_id: 文件系统双活域id
:return:
"""
- LOG.info("Start to split filesystem hyper metro domain, domain_id[%s]", domain_id)
- data = {
- "ID": domain_id
- }
- url = Constant.SPLIT_FILESYSTEM_HYPER_METRO_DOMAIN.format(deviceId=self.device_id)
+ LOG.info(
+ "Start to split filesystem hyper metro domain, domain_id[%s]", domain_id
+ )
+ data = {"ID": domain_id}
+ url = Constant.SPLIT_FILESYSTEM_HYPER_METRO_DOMAIN.format(
+ deviceId=self.device_id
+ )
res = self.rest_client.normal_request(url, data=data, method="post")
- err_msg = "Failed to split filesystem hyper metro domain, domain_id[%s]" % domain_id
+ err_msg = (
+ "Failed to split filesystem hyper metro domain, domain_id[%s]" % domain_id
+ )
rsp_data = StorageInf.result_parse(err_msg, res)
- LOG.info("Success to split filesystem hyper metro domain, domain_id[%s]", domain_id)
+ LOG.info(
+ "Success to split filesystem hyper metro domain, domain_id[%s]", domain_id
+ )
return rsp_data
- def delete_filesystem_hyper_metro_domain(self, domain_id: str, is_local_del=False) -> dict:
+ def delete_filesystem_hyper_metro_domain(
+ self, domain_id: str, is_local_del=False
+ ) -> dict:
"""
删除文件系统双活域
:param domain_id: 文件系统双活域id
:param is_local_del: 是否执行本端删除,默认False,不执行。当删除失败错误码为1077674506时,使用本地删除
:return:
"""
- LOG.info("Start to delete filesystem hyper metro domain, "
- "domain_id[%s], is_local_del[%s].", domain_id, is_local_del)
- data = {
- "ISLOCALDELETE": is_local_del
- }
- url = Constant.DELETE_FILESYSTEM_HYPER_METRO_DOMAIN.format(deviceId=self.device_id, id=domain_id)
+ LOG.info(
+ "Start to delete filesystem hyper metro domain, "
+ "domain_id[%s], is_local_del[%s].",
+ domain_id,
+ is_local_del,
+ )
+ data = {"ISLOCALDELETE": is_local_del}
+ url = Constant.DELETE_FILESYSTEM_HYPER_METRO_DOMAIN.format(
+ deviceId=self.device_id, id=domain_id
+ )
res = self.rest_client.normal_request(url, data=data, method="delete")
- err_msg = "Failed to delete filesystem hyper metro domain, " \
- "domain_id[%s], is_local_del[%s]." % (domain_id, is_local_del)
+ err_msg = (
+ "Failed to delete filesystem hyper metro domain, "
+ "domain_id[%s], is_local_del[%s]." % (domain_id, is_local_del)
+ )
rsp_data = StorageInf.result_parse(err_msg, res)
- LOG.info("Success to delete filesystem hyper metro domain, "
- "domain_id[%s], is_local_del[%s].", domain_id, is_local_del)
+ LOG.info(
+ "Success to delete filesystem hyper metro domain, "
+ "domain_id[%s], is_local_del[%s].",
+ domain_id,
+ is_local_del,
+ )
return rsp_data
def swap_role_replication_pair(self, pair_id: str, vstore_id=0) -> None:
@@ -658,10 +740,7 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Swap replication pair[%s] role start", pair_id)
- data = {
- "ID": pair_id,
- "vstoreId": vstore_id
- }
+ data = {"ID": pair_id, "vstoreId": vstore_id}
url = Constant.SWAP_ROLE_REPLICATION_PAIR.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="put")
err_msg = "Swap replication pair[%s] role failed" % pair_id
@@ -675,32 +754,42 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Swap role fs hyper metro domain[%s] start.", domain_id)
- data = {
- "ID": domain_id
- }
+ data = {"ID": domain_id}
url = Constant.SWAP_ROLE_FS_HYPER_METRO_DOMAIN.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="post")
err_msg = "Swap role fs hyper metro domain[%s] failed" % domain_id
StorageInf.result_parse(err_msg, res)
LOG.info("Swap role fs hyper metro domain[%s] success.", domain_id)
- def change_fs_hyper_metro_domain_second_access(self, domain_id: str, access: str) -> None:
+ def change_fs_hyper_metro_domain_second_access(
+ self, domain_id: str, access: str
+ ) -> None:
"""
设置/取消从资源保护
:param access: 1: 禁止访问 2: 读写
:param domain_id:
:return:
"""
- LOG.info("Change fs hyper metro domain[%s] second access[%s] start.", domain_id, access)
- data = {
- "ID": domain_id,
- "access": access
- }
- url = Constant.CHANGE_FS_HYPER_METRO_DOMAIN_SECOND_ACCESS.format(deviceId=self.device_id)
+ LOG.info(
+ "Change fs hyper metro domain[%s] second access[%s] start.",
+ domain_id,
+ access,
+ )
+ data = {"ID": domain_id, "access": access}
+ url = Constant.CHANGE_FS_HYPER_METRO_DOMAIN_SECOND_ACCESS.format(
+ deviceId=self.device_id
+ )
res = self.rest_client.normal_request(url, data=data, method="post")
- err_msg = "Change fs hyper metro domain[%s] second access[%s] failed." % (domain_id, access)
+ err_msg = "Change fs hyper metro domain[%s] second access[%s] failed." % (
+ domain_id,
+ access,
+ )
StorageInf.result_parse(err_msg, res)
- LOG.info("Change fs hyper metro domain[%s] second access[%s] success.", domain_id, access)
+ LOG.info(
+ "Change fs hyper metro domain[%s] second access[%s] success.",
+ domain_id,
+ access,
+ )
def join_fs_hyper_metro_domain(self, domain_id: str) -> None:
"""
@@ -709,9 +798,7 @@ class DRDeployCommon(object):
:return:
"""
LOG.info("Join fs hyper metro domain[%s] start", domain_id)
- data = {
- "ID": domain_id
- }
+ data = {"ID": domain_id}
url = Constant.JOIN_FS_HYPER_METRO_DOMAIN.format(deviceId=self.device_id)
res = self.rest_client.normal_request(url, data=data, method="post")
err_msg = "Failed to join fs hyper metro domain[%s]" % domain_id
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_full_sync.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_full_sync.py
index e8f236f4a4e0ccf24aaa1cd08de3c07eb8e86d3c..53905eef3101cf339775d8027bf8714026c312ae 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_full_sync.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_full_sync.py
@@ -7,8 +7,14 @@ import os
import stat
import time
-from utils.config.rest_constant import SecresAccess, HealthStatus, ReplicationRunningStatus, MetroDomainRunningStatus, \
- ConfigRole, Constant
+from utils.config.rest_constant import (
+ SecresAccess,
+ HealthStatus,
+ ReplicationRunningStatus,
+ MetroDomainRunningStatus,
+ ConfigRole,
+ Constant,
+)
from storage_operate.dr_deploy_operate.dr_deploy import DRDeploy
from logic.common_func import get_status
from logic.common_func import exec_popen
@@ -16,14 +22,18 @@ from om_log import DR_DEPLOY_LOG as LOG
from get_config_info import get_env_info
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
-FULL_SYNC_PROGRESS = os.path.join(CURRENT_PATH, "../../../config/full_sync_progress.json")
-ZSQL_INI_PATH = '/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini'
+FULL_SYNC_PROGRESS = os.path.join(
+ CURRENT_PATH, "../../../config/full_sync_progress.json"
+)
+ZSQL_INI_PATH = "/mnt/dbdata/local/cantian/tmp/data/cfg/ctsql.ini"
LOCK_INSTANCE = "lock instance for backup;"
UNLOCK_INSTANCE = "unlock instance;"
FLUSH_TABLE = "flush table with read lock;"
UNLOCK_TABLE = "unlock tables;"
-FULL_CHECK_POINT_CMD = 'echo -e %s | su -s /bin/bash - %s -c \'source ~/.bashrc && ' \
- 'ctsql sys@127.0.0.1:1611 -q -c "alter system checkpoint global;"\''
+FULL_CHECK_POINT_CMD = (
+ "echo -e %s | su -s /bin/bash - %s -c 'source ~/.bashrc && "
+ 'ctsql sys@127.0.0.1:1611 -q -c "alter system checkpoint global;"\''
+)
class FullSyncRepPair(DRDeploy):
@@ -51,8 +61,10 @@ class FullSyncRepPair(DRDeploy):
"""
check_time = 100
LOG.info("Check cantian status.")
- cmd = "su -s /bin/bash - cantian -c \"cms stat | " \
- "grep -v STAT | awk '{print \$3, \$6}'\""
+ cmd = (
+ 'su -s /bin/bash - cantian -c "cms stat | '
+ "grep -v STAT | awk '{print \$3, \$6}'\""
+ )
while check_time:
time.sleep(10)
check_time -= 10
@@ -63,7 +75,10 @@ class FullSyncRepPair(DRDeploy):
raise Exception(err_msg)
cms_stat = output.split("\n")
if len(cms_stat) < 2:
- err_msg = "Current cluster status is abnormal, output:%s, stderr:%s" % (output, stderr)
+ err_msg = "Current cluster status is abnormal, output:%s, stderr:%s" % (
+ output,
+ stderr,
+ )
LOG.error(err_msg)
raise Exception(err_msg)
online = True
@@ -72,7 +87,11 @@ class FullSyncRepPair(DRDeploy):
if online != "ONLINE" or work_stat != "1":
online = False
if not online:
- LOG.info("Current cluster status is abnormal, output:%s, stderr:%s", output, stderr)
+ LOG.info(
+ "Current cluster status is abnormal, output:%s, stderr:%s",
+ output,
+ stderr,
+ )
continue
else:
break
@@ -110,7 +129,7 @@ class FullSyncRepPair(DRDeploy):
"do_flush_table_with_read_lock": "default",
"sync_rep_page_fs_pair": "default",
"cancel_rep_page_fs_secondary_write_lock": "default",
- "do_unlock_instance_for_backup": "default"
+ "do_unlock_instance_for_backup": "default",
}
standby_record_dict = {
"sync_rep_page_fs_pair": "default",
@@ -118,26 +137,23 @@ class FullSyncRepPair(DRDeploy):
"cantian_disaster_recovery_status": "default",
}
self.metadata_in_cantian = self.dr_deploy_info.get("mysql_metadata_in_cantian")
- dr_record_dict = active_record_dict if self.site == "active" else standby_record_dict
+ dr_record_dict = (
+ active_record_dict if self.site == "active" else standby_record_dict
+ )
if not self.metadata_in_cantian:
- dr_record_dict.update({
- "create_rep_meta_fs_pair": "default",
- "sync_rep_meta_fs_pair": "default",
- "cancel_rep_meta_fs_secondary_write_lock": "default"
- })
- dr_record_dict.update({"full_sync": "default"})
- result = {
- "data": dr_record_dict,
- "error":
+ dr_record_dict.update(
{
- "code": 0,
- "description": ""
+ "create_rep_meta_fs_pair": "default",
+ "sync_rep_meta_fs_pair": "default",
+ "cancel_rep_meta_fs_secondary_write_lock": "default",
}
- }
+ )
+ dr_record_dict.update({"full_sync": "default"})
+ result = {"data": dr_record_dict, "error": {"code": 0, "description": ""}}
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(self.record_progress_file, flags, modes), 'w') as fp:
+ with os.fdopen(os.open(self.record_progress_file, flags, modes), "w") as fp:
json.dump(result, fp, indent=4)
def do_full_sync(self, pair_id: str) -> None:
@@ -152,14 +168,19 @@ class FullSyncRepPair(DRDeploy):
:param pair_id:
:return:
"""
- remote_replication_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- pair_id=pair_id)
+ remote_replication_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ pair_id=pair_id
+ )
+ )
secres_access = remote_replication_pair_info.get("SECRESACCESS")
if secres_access == SecresAccess.ReadAndWrite:
- self.dr_deploy_opt.remote_replication_filesystem_pair_set_secondary_write_lock(pair_id=pair_id)
- self.dr_deploy_opt.sync_remote_replication_filesystem_pair(pair_id=self.page_fs_pair_id,
- vstore_id="0",
- is_full_copy=True)
+ self.dr_deploy_opt.remote_replication_filesystem_pair_set_secondary_write_lock(
+ pair_id=pair_id
+ )
+ self.dr_deploy_opt.sync_remote_replication_filesystem_pair(
+ pair_id=self.page_fs_pair_id, vstore_id="0", is_full_copy=True
+ )
def query_full_sync_status(self, pair_id: str) -> tuple:
"""
@@ -167,68 +188,98 @@ class FullSyncRepPair(DRDeploy):
:param pair_id:
:return:
"""
- remote_replication_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- pair_id=pair_id)
+ remote_replication_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ pair_id=pair_id
+ )
+ )
replication_progress = remote_replication_pair_info.get("REPLICATIONPROGRESS")
start_time = remote_replication_pair_info.get("STARTTIME")
end_time = remote_replication_pair_info.get("ENDTIME")
- replication_pair_health_status = remote_replication_pair_info.get("HEALTHSTATUS")
- replication_pair_running_status = remote_replication_pair_info.get("RUNNINGSTATUS")
+ replication_pair_health_status = remote_replication_pair_info.get(
+ "HEALTHSTATUS"
+ )
+ replication_pair_running_status = remote_replication_pair_info.get(
+ "RUNNINGSTATUS"
+ )
secres_access = remote_replication_pair_info.get("SECRESACCESS")
- LOG.info("Sync remote replication filesystem pair[%s], health status:[%s], "
- "running status[%s], progress[%s%%], start time[%s]",
- pair_id,
- get_status(replication_pair_health_status, HealthStatus),
- get_status(replication_pair_running_status, ReplicationRunningStatus),
- replication_progress,
- datetime.datetime.fromtimestamp(int(start_time)))
- if replication_progress == "100" and \
- replication_pair_running_status == ReplicationRunningStatus.Normal and \
- replication_pair_health_status == HealthStatus.Normal:
- LOG.info("Success to sync remote replication filesystem pair[%s], end time[%s]",
- pair_id,
- datetime.datetime.fromtimestamp(int(end_time)))
+ LOG.info(
+ "Sync remote replication filesystem pair[%s], health status:[%s], "
+ "running status[%s], progress[%s%%], start time[%s]",
+ pair_id,
+ get_status(replication_pair_health_status, HealthStatus),
+ get_status(replication_pair_running_status, ReplicationRunningStatus),
+ replication_progress,
+ datetime.datetime.fromtimestamp(int(start_time)),
+ )
+ if (
+ replication_progress == "100"
+ and replication_pair_running_status == ReplicationRunningStatus.Normal
+ and replication_pair_health_status == HealthStatus.Normal
+ ):
+ LOG.info(
+ "Success to sync remote replication filesystem pair[%s], end time[%s]",
+ pair_id,
+ datetime.datetime.fromtimestamp(int(end_time)),
+ )
if int(start_time) - int(end_time) > Constant.FULL_SYNC_MAX_TIME:
- LOG.info("Do sync remote replication filesystem[%s] pair of full copy." % pair_id)
- self.dr_deploy_opt.sync_remote_replication_filesystem_pair(pair_id=pair_id, vstore_id=0,
- is_full_copy=False)
+ LOG.info(
+ "Do sync remote replication filesystem[%s] pair of full copy."
+ % pair_id
+ )
+ self.dr_deploy_opt.sync_remote_replication_filesystem_pair(
+ pair_id=pair_id, vstore_id=0, is_full_copy=False
+ )
return False, replication_progress, secres_access
return True, replication_progress, secres_access
return False, replication_progress, secres_access
def standby_check_status(self):
LOG.info("Check standby cluster status.")
- cmd = "source ~/.bashrc && su -s /bin/bash - cantian -c " \
- "\"cms stat | awk '{print \$3, \$(NF-1), \$NF}'\""
+ cmd = (
+ "source ~/.bashrc && su -s /bin/bash - cantian -c "
+ "\"cms stat | awk '{print \$3, \$(NF-1), \$NF}'\""
+ )
return_code, output, stderr = exec_popen(cmd)
if return_code:
err_msg = "Cms stat command execute failed, details:%s" % output + stderr
LOG.error(err_msg)
- self.record_deploy_process("full_sync", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "full_sync", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
cms_stat = output.split("\n")
if len(cms_stat) < 3:
err_msg = "Cluster stat is abnormal, details:%s" % output
LOG.error(err_msg)
- self.record_deploy_process("full_sync", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "full_sync", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
online_flag = True
stat_change_time = 0
for status in cms_stat[1:]:
node_stat, _date, _time = status.split(" ")
- datetime_obj = datetime.datetime.strptime(_date + " " + _time, "%Y-%m-%d %H:%M:%S.%f")
+ datetime_obj = datetime.datetime.strptime(
+ _date + " " + _time, "%Y-%m-%d %H:%M:%S.%f"
+ )
timestamp = int(datetime_obj.timestamp())
- stat_change_time = timestamp if timestamp > stat_change_time else stat_change_time
+ stat_change_time = (
+ timestamp if timestamp > stat_change_time else stat_change_time
+ )
if node_stat == "UNKNOWN":
online_flag = False
- LOG.info("Standby cluster status, online[%s], change time[%s]", online_flag, stat_change_time)
+ LOG.info(
+ "Standby cluster status, online[%s], change time[%s]",
+ online_flag,
+ stat_change_time,
+ )
return online_flag, stat_change_time
def standby_cms_res_opt(self, action="start"):
self.record_deploy_process("standby_start", "running")
LOG.info("Standby stop by cms command.")
- cmd = "su -s /bin/bash - cantian -c " \
- "\"source ~/.bashrc && cms res -stop db\""
+ cmd = "su -s /bin/bash - cantian -c " '"source ~/.bashrc && cms res -stop db"'
return_code, output, stderr = exec_popen(cmd, timeout=600)
if return_code:
err_msg = "Cantian stop failed, error:%s." % output + stderr
@@ -236,13 +287,17 @@ class FullSyncRepPair(DRDeploy):
LOG.info("Stop cantian by cms command success.")
time.sleep(60)
LOG.info("Standby start by cms command.")
- cmd = "su -s /bin/bash - cantian -c " \
- "\"source ~/.bashrc && cms res -%s db\"" % action
+ cmd = (
+ "su -s /bin/bash - cantian -c "
+ '"source ~/.bashrc && cms res -%s db"' % action
+ )
return_code, output, stderr = exec_popen(cmd)
if return_code:
err_msg = "Cantian %s failed, error:%s." % (action, output + stderr)
LOG.error(err_msg)
- self.record_deploy_process("standby_start", "failed", code=-1, description=err_msg)
+ self.record_deploy_process(
+ "standby_start", "failed", code=-1, description=err_msg
+ )
raise Exception(err_msg)
self.check_cluster_status()
LOG.info("Standby start by cms command success.")
@@ -262,7 +317,9 @@ class FullSyncRepPair(DRDeploy):
:return:
"""
domain_id = self.dr_deploy_info.get("hyper_domain_id")
- domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(domain_id=domain_id)
+ domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(
+ domain_id=domain_id
+ )
running_status = domain_info.get("RUNNINGSTATUS")
if running_status != MetroDomainRunningStatus.Normal:
LOG.error("metro domain is not normal, can not exec full sync.")
@@ -281,23 +338,37 @@ class FullSyncRepPair(DRDeploy):
LOG.info("Full sync replication pair of meta fs")
meta_pair_ready = True if self.metadata_in_cantian else False
while True:
- page_pair_ready, page_pair_progress, secres_access = self.query_full_sync_status(self.page_fs_pair_id)
+ page_pair_ready, page_pair_progress, secres_access = (
+ self.query_full_sync_status(self.page_fs_pair_id)
+ )
self.record_deploy_process("sync_rep_page_fs_pair", page_pair_progress)
if not self.metadata_in_cantian:
- meta_pair_ready, meta_pair_progress, secres_access = self.query_full_sync_status(self.meta_fs_pair_id)
+ meta_pair_ready, meta_pair_progress, secres_access = (
+ self.query_full_sync_status(self.meta_fs_pair_id)
+ )
self.record_deploy_process("sync_rep_meta_fs_pair", meta_pair_progress)
if page_pair_ready and meta_pair_ready:
break
time.sleep(60)
- self.dr_deploy_opt.split_remote_replication_filesystem_pair(self.page_fs_pair_id)
+ self.dr_deploy_opt.split_remote_replication_filesystem_pair(
+ self.page_fs_pair_id
+ )
self.record_deploy_process("sync_rep_page_fs_pair", "success")
- self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(self.page_fs_pair_id)
+ self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(
+ self.page_fs_pair_id
+ )
self.record_deploy_process("cancel_rep_page_fs_secondary_write_lock", "success")
if not self.metadata_in_cantian:
- self.dr_deploy_opt.split_remote_replication_filesystem_pair(self.meta_fs_pair_id)
+ self.dr_deploy_opt.split_remote_replication_filesystem_pair(
+ self.meta_fs_pair_id
+ )
self.record_deploy_process("sync_rep_page_fs_pair", "success")
- self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(self.meta_fs_pair_id)
- self.record_deploy_process("cancel_rep_meta_fs_secondary_write_lock", "success")
+ self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(
+ self.meta_fs_pair_id
+ )
+ self.record_deploy_process(
+ "cancel_rep_meta_fs_secondary_write_lock", "success"
+ )
self.do_unlock_instance_for_backup()
def full_sync_standby(self):
@@ -310,7 +381,9 @@ class FullSyncRepPair(DRDeploy):
:return:
"""
domain_id = self.dr_deploy_info.get("hyper_domain_id")
- domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(domain_id=domain_id)
+ domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(
+ domain_id=domain_id
+ )
running_status = domain_info.get("RUNNINGSTATUS")
if running_status != MetroDomainRunningStatus.Normal:
LOG.error("metro domain is not normal, can not exec full sync.")
@@ -321,22 +394,38 @@ class FullSyncRepPair(DRDeploy):
raise Exception("config role is not secondary, can not exec full sync.")
ready_flag = False
cantian_online, stat_change_time = self.standby_check_status()
- meta_access = SecresAccess.ReadAndWrite if self.metadata_in_cantian else SecresAccess.ReadOnly
- page_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- pair_id=self.page_fs_pair_id)
+ meta_access = (
+ SecresAccess.ReadAndWrite
+ if self.metadata_in_cantian
+ else SecresAccess.ReadOnly
+ )
+ page_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ pair_id=self.page_fs_pair_id
+ )
+ )
page_end_time = page_pair_info.get("ENDTIME")
page_access = page_pair_info.get("SECRESACCESS")
- if page_access == SecresAccess.ReadAndWrite and \
- page_end_time is not None and int(page_end_time) < stat_change_time:
+ if (
+ page_access == SecresAccess.ReadAndWrite
+ and page_end_time is not None
+ and int(page_end_time) < stat_change_time
+ ):
ready_flag = True
if not self.metadata_in_cantian:
ready_flag = False
- meta_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- self.meta_fs_pair_id)
+ meta_pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ self.meta_fs_pair_id
+ )
+ )
meta_end_time = meta_pair_info.get("ENDTIME")
meta_access = meta_pair_info.get("SECRESACCESS")
- if meta_access == SecresAccess.ReadAndWrite \
- and meta_end_time is not None and int(page_end_time) < stat_change_time:
+ if (
+ meta_access == SecresAccess.ReadAndWrite
+ and meta_end_time is not None
+ and int(page_end_time) < stat_change_time
+ ):
ready_flag = True
if not ready_flag:
self.wait_rep_pair_sync_end(meta_access)
@@ -350,13 +439,19 @@ class FullSyncRepPair(DRDeploy):
:return:
"""
while True:
- _, page_pair_progress, page_access = self.query_full_sync_status(self.page_fs_pair_id)
+ _, page_pair_progress, page_access = self.query_full_sync_status(
+ self.page_fs_pair_id
+ )
self.record_deploy_process("sync_rep_page_fs_pair", page_pair_progress)
if not self.metadata_in_cantian:
- _, meta_pair_progress, meta_access = self.query_full_sync_status(self.meta_fs_pair_id)
+ _, meta_pair_progress, meta_access = self.query_full_sync_status(
+ self.meta_fs_pair_id
+ )
self.record_deploy_process("sync_rep_meta_fs_pair", meta_pair_progress)
- if page_access == SecresAccess.ReadAndWrite and \
- meta_access == SecresAccess.ReadAndWrite:
+ if (
+ page_access == SecresAccess.ReadAndWrite
+ and meta_access == SecresAccess.ReadAndWrite
+ ):
self.record_deploy_process("sync_rep_page_fs_pair", "success")
if not self.metadata_in_cantian:
self.record_deploy_process("sync_rep_meta_fs_pair", "success")
@@ -365,7 +460,9 @@ class FullSyncRepPair(DRDeploy):
def execute(self):
action_parse = argparse.ArgumentParser()
- action_parse.add_argument("--site", dest="site", choices=["standby", "active"], required=True)
+ action_parse.add_argument(
+ "--site", dest="site", choices=["standby", "active"], required=True
+ )
action_parse.add_argument("--mysql_cmd", dest="mysql_cmd", required=False)
action_parse.add_argument("--mysql_user", dest="mysql_user", required=False)
args = action_parse.parse_args()
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py
index f7129d0527ce6ef3fc46f894bae9a25e95ce83ed..054b20e26dca4eb99f8796c579d7316ab9e12d2f 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py
@@ -13,24 +13,42 @@ from pre_install import PreInstall
from logic.storage_operate import StorageInf
from storage_operate.dr_deploy_operate.dr_deploy_common import DRDeployCommon
from storage_operate.dr_deploy_operate.dr_deploy_common import RemoteStorageOPT
-from utils.config.rest_constant import SystemRunningStatus, \
- HealthStatus, RemoteDeviceStatus, FilesystemRunningStatus, PoolStatus, PoolHealth, CANTIAN_DOMAIN_PREFIX, \
- RepFileSystemNameRule
+from utils.config.rest_constant import (
+ SystemRunningStatus,
+ HealthStatus,
+ RemoteDeviceStatus,
+ FilesystemRunningStatus,
+ PoolStatus,
+ PoolHealth,
+ CANTIAN_DOMAIN_PREFIX,
+ RepFileSystemNameRule,
+)
from om_log import LOGGER as LOG
from get_config_info import get_env_info
from obtains_lsid import LSIDGenerate
from cantian_common.mysql_shell import MysqlShell
-from logic.common_func import exec_popen, read_json_config, write_json_config, get_status
+from logic.common_func import (
+ exec_popen,
+ read_json_config,
+ write_json_config,
+ get_status,
+)
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
-DR_DEPLOY_PARAM_FILE = os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json")
+DR_DEPLOY_PARAM_FILE = os.path.join(
+ CURRENT_PATH, "../../../config/dr_deploy_param.json"
+)
DEPLOY_POLICY_FILE = os.path.join(CURRENT_PATH, "../../deploy_policy_config.json")
DEFAULT_PARAM_FILE = os.path.join(CURRENT_PATH, "../../config_params.json")
-DR_PROCESS_RECORD_FILE = os.path.join(CURRENT_PATH, "../../../config/dr_process_record.json")
+DR_PROCESS_RECORD_FILE = os.path.join(
+ CURRENT_PATH, "../../../config/dr_process_record.json"
+)
CANTIAN_STOP_SUCCESS_FLAG = os.path.join(CURRENT_PATH, "../../../config/.stop_success")
DEPLOY_PARAM_FILE = "/opt/cantian/config/deploy_param.json"
-DEPLOY_POLICY_CONFIG_FILE = os.path.join(CURRENT_PATH, "../../deploy_policy_config.json")
+DEPLOY_POLICY_CONFIG_FILE = os.path.join(
+ CURRENT_PATH, "../../deploy_policy_config.json"
+)
DOMAIN_LIMITS = 4
@@ -72,7 +90,11 @@ class DRDeployPreCheck(object):
部署前清理环境
:return:
"""
- file_list = [DR_PROCESS_RECORD_FILE, DR_DEPLOY_PARAM_FILE, CANTIAN_STOP_SUCCESS_FLAG]
+ file_list = [
+ DR_PROCESS_RECORD_FILE,
+ DR_DEPLOY_PARAM_FILE,
+ CANTIAN_STOP_SUCCESS_FLAG,
+ ]
for file in file_list:
if os.path.exists(file):
os.remove(file)
@@ -94,11 +116,17 @@ class DRDeployPreCheck(object):
_, sync_proc, _ = exec_popen(check_sync_cmd)
err_msg = ""
if deploy_proc:
- err_msg += "Dr deploy is executing, please check, details:\n%s" % deploy_proc
+ err_msg += (
+ "Dr deploy is executing, please check, details:\n%s" % deploy_proc
+ )
if undeploy_proc:
- err_msg += "Dr undeploy is executing, please check, details:\n%s" % undeploy_proc
+ err_msg += (
+ "Dr undeploy is executing, please check, details:\n%s" % undeploy_proc
+ )
if sync_proc:
- err_msg += "Dr full sync is executing, please check, details:\n%s" % sync_proc
+ err_msg += (
+ "Dr full sync is executing, please check, details:\n%s" % sync_proc
+ )
if err_msg:
raise Exception(err_msg)
@@ -109,13 +137,18 @@ class DRDeployPreCheck(object):
"""
err_msg = []
node_id = self.deploy_params.get("node_id")
- cmd = "su -s /bin/bash - %s -c \"cms stat | " \
- "grep -v NODE_ID | awk '{if(\$1==%s){print \$3,\$6,\$9}}'\"" % (self.run_user, node_id)
+ cmd = (
+ 'su -s /bin/bash - %s -c "cms stat | '
+ "grep -v NODE_ID | awk '{if(\$1==%s){print \$3,\$6,\$9}}'\""
+ % (self.run_user, node_id)
+ )
return_code, output, stderr = exec_popen(cmd)
if return_code == 1:
err_msg = ["Execute command[cms stat] failed, details:%s" % stderr]
else:
- cms_stat = [re.split(r"\s+", item.strip()) for item in output.strip().split("\n")]
+ cms_stat = [
+ re.split(r"\s+", item.strip()) for item in output.strip().split("\n")
+ ]
for index, item in enumerate(cms_stat):
if item[0].strip(" ") != "ONLINE":
err_msg.append("Node[%s] status is not ONLINE." % index)
@@ -146,16 +179,22 @@ class DRDeployPreCheck(object):
remote_system_info = remote_opt.query_remote_storage_system_info()
remote_mode_string = remote_system_info.get("productModeString")
if remote_mode_string != product_mode_string:
- err_msg.append("System mode is non-consistent, local mode[%s], remote mode[%s]" %
- (product_mode_string, remote_mode_string))
+ err_msg.append(
+ "System mode is non-consistent, local mode[%s], remote mode[%s]"
+ % (product_mode_string, remote_mode_string)
+ )
if health_status != HealthStatus.Normal:
- err_msg.append("System health status is not normal: health status[%s]." %
- get_status(health_status, HealthStatus))
+ err_msg.append(
+ "System health status is not normal: health status[%s]."
+ % get_status(health_status, HealthStatus)
+ )
if running_status != SystemRunningStatus.Normal:
- err_msg.append("System running status is not normal: running status[%s]." %
- get_status(running_status, SystemRunningStatus))
+ err_msg.append(
+ "System running status is not normal: running status[%s]."
+ % get_status(running_status, SystemRunningStatus)
+ )
LOG.info("Check storage system info success.")
return err_msg
@@ -177,17 +216,23 @@ class DRDeployPreCheck(object):
if item.get("SN") == remote_esn:
remote_device_info = item
if not remote_device_info:
- err_msg.append("Remote device esn[%s] is not correct, please check" % remote_esn)
+ err_msg.append(
+ "Remote device esn[%s] is not correct, please check" % remote_esn
+ )
return err_msg
self.remote_device_id = remote_device_info.get("ID")
health_status = remote_device_info.get("HEALTHSTATUS")
running_status = remote_device_info.get("RUNNINGSTATUS")
if health_status != HealthStatus.Normal:
- err_msg.append("Remote device health status is not normal: health status[%s]." %
- get_status(health_status, HealthStatus))
+ err_msg.append(
+ "Remote device health status is not normal: health status[%s]."
+ % get_status(health_status, HealthStatus)
+ )
if running_status != RemoteDeviceStatus.LinkUp:
- err_msg.append("Remote device running status is not normal: running status[%s]." %
- get_status(running_status, RemoteDeviceStatus))
+ err_msg.append(
+ "Remote device running status is not normal: running status[%s]."
+ % get_status(running_status, RemoteDeviceStatus)
+ )
LOG.info("Check remote device info success.")
return err_msg
@@ -207,11 +252,15 @@ class DRDeployPreCheck(object):
health_status = file_system_info.get("HEALTHSTATUS")
running_status = file_system_info.get("RUNNINGSTATUS")
if health_status != HealthStatus.Normal:
- err_msg.append("Filesystem[%s] health is not normal, status[%s]." %
- (fs_name, get_status(health_status, HealthStatus)))
+ err_msg.append(
+ "Filesystem[%s] health is not normal, status[%s]."
+ % (fs_name, get_status(health_status, HealthStatus))
+ )
if running_status != FilesystemRunningStatus.Online:
- err_msg.append("Filesystem[%s] running status is not normal, status[%s]." %
- (fs_name, get_status(running_status, FilesystemRunningStatus)))
+ err_msg.append(
+ "Filesystem[%s] running status is not normal, status[%s]."
+ % (fs_name, get_status(running_status, FilesystemRunningStatus))
+ )
LOG.info("Check master filesystem[%s] status success.", fs_name)
return err_msg
@@ -228,21 +277,30 @@ class DRDeployPreCheck(object):
remote_pool_id = self.local_conf_params.get("remote_pool_id")
self.remote_operate = RemoteStorageOPT(self.storage_opt, self.remote_device_id)
try:
- remote_pool_info = self.remote_operate.query_remote_storage_pool_info(pool_id=remote_pool_id)
+ remote_pool_info = self.remote_operate.query_remote_storage_pool_info(
+ pool_id=remote_pool_id
+ )
except Exception as _err:
if str(_err).find("1077949965") != -1:
err_msg.append("Standby pool[%s] is not exist." % remote_pool_id)
else:
- err_msg.append("Failed to query remote pool[%s] info, details:%s" % (remote_pool_id, _err))
+ err_msg.append(
+ "Failed to query remote pool[%s] info, details:%s"
+ % (remote_pool_id, _err)
+ )
if remote_pool_info:
running_status = remote_pool_info.get("RUNNINGSTATUS")
health_status = remote_pool_info.get("HEALTHSTATUS")
if running_status != PoolStatus.Online:
- err_msg.append("Pool running status is not online, current status:[%s]" %
- get_status(running_status, PoolStatus))
+ err_msg.append(
+ "Pool running status is not online, current status:[%s]"
+ % get_status(running_status, PoolStatus)
+ )
if health_status != PoolHealth.Normal:
- err_msg.append("Pool health status is not normal, current status:[%s]" %
- get_status(health_status, PoolHealth))
+ err_msg.append(
+ "Pool health status is not normal, current status:[%s]"
+ % get_status(health_status, PoolHealth)
+ )
return err_msg
def check_standby_filesystem(self) -> list:
@@ -265,41 +323,62 @@ class DRDeployPreCheck(object):
metadata_in_cantian = self.local_conf_params.get("mysql_metadata_in_cantian")
name_suffix = self.local_conf_params.get("name_suffix", "")
if name_suffix:
- dbstor_page_fs = RepFileSystemNameRule.NamePrefix + dbstor_page_fs + name_suffix
+ dbstor_page_fs = (
+ RepFileSystemNameRule.NamePrefix + dbstor_page_fs + name_suffix
+ )
if name_suffix and not metadata_in_cantian:
- metadata_fs_name = RepFileSystemNameRule.NamePrefix + metadata_fs_name + name_suffix
+ metadata_fs_name = (
+ RepFileSystemNameRule.NamePrefix + metadata_fs_name + name_suffix
+ )
remote_fs_vstore_id = self.remote_conf_params.get("dbstor_fs_vstore_id")
if self.ulog_fs_pair_id is None:
dbstor_fs = self.local_conf_params.get("storage_dbstor_fs")
- remote_ulog_fs_info = self.remote_operate.\
- query_remote_filesystem_info(fs_name=dbstor_fs, vstore_id=remote_fs_vstore_id)
+ remote_ulog_fs_info = self.remote_operate.query_remote_filesystem_info(
+ fs_name=dbstor_fs, vstore_id=remote_fs_vstore_id
+ )
if remote_ulog_fs_info:
- err_msg.append("Standby dbstor filesystem[%s] exist, filesystem id[%s]." %
- (dbstor_fs, remote_ulog_fs_info.get("ID")))
+ err_msg.append(
+ "Standby dbstor filesystem[%s] exist, filesystem id[%s]."
+ % (dbstor_fs, remote_ulog_fs_info.get("ID"))
+ )
else:
if cantian_in_container == "0":
- err_msg.append("Standby vstore[%s] exist filesystems." % remote_fs_vstore_id)
+ err_msg.append(
+ "Standby vstore[%s] exist filesystems." % remote_fs_vstore_id
+ )
if self.page_fs_pair_id is None:
- remote_dbstor_page_fs_info = self.remote_operate.\
- query_remote_filesystem_info(fs_name=dbstor_page_fs, vstore_id="0")
+ remote_dbstor_page_fs_info = (
+ self.remote_operate.query_remote_filesystem_info(
+ fs_name=dbstor_page_fs, vstore_id="0"
+ )
+ )
if remote_dbstor_page_fs_info:
- err_msg.append("Standby dbstor page filesystem[%s] exist, filesystem id[%s]." %
- (dbstor_page_fs, remote_dbstor_page_fs_info.get("ID")))
+ err_msg.append(
+ "Standby dbstor page filesystem[%s] exist, filesystem id[%s]."
+ % (dbstor_page_fs, remote_dbstor_page_fs_info.get("ID"))
+ )
else:
if cantian_in_container == "0":
- err_msg.append("Standby dbstor page filesystem[%s] exist." % dbstor_page_fs)
+ err_msg.append(
+ "Standby dbstor page filesystem[%s] exist." % dbstor_page_fs
+ )
if self.meta_fs_pair_id is None:
- remote_metadata_fs_info = self.remote_operate.\
- query_remote_filesystem_info(fs_name=metadata_fs_name, vstore_id="0")
+ remote_metadata_fs_info = self.remote_operate.query_remote_filesystem_info(
+ fs_name=metadata_fs_name, vstore_id="0"
+ )
if remote_metadata_fs_info and not metadata_in_cantian:
- err_msg.append("Standby metadata filesystem[%s] exist, filesystem id[%s]." %
- (metadata_fs_name, remote_metadata_fs_info.get("ID")))
+ err_msg.append(
+ "Standby metadata filesystem[%s] exist, filesystem id[%s]."
+ % (metadata_fs_name, remote_metadata_fs_info.get("ID"))
+ )
else:
if cantian_in_container == "0":
- err_msg.append("Standby metadata filesystem[%s] exist." % metadata_fs_name)
+ err_msg.append(
+ "Standby metadata filesystem[%s] exist." % metadata_fs_name
+ )
LOG.info("Check standby filesystem nums success.")
return err_msg
@@ -321,9 +400,15 @@ class DRDeployPreCheck(object):
nas_foundation = True
err_msg = []
if not nas_foundation:
- err_msg.append("NAS Foundation license is not Found or expired, license status: %s" % license_info)
+ err_msg.append(
+ "NAS Foundation license is not Found or expired, license status: %s"
+ % license_info
+ )
if not hyper_replication:
- err_msg.append("HyperReplication license is not Found or expired, license status: %s" % license_info)
+ err_msg.append(
+ "HyperReplication license is not Found or expired, license status: %s"
+ % license_info
+ )
LOG.info("Check license effectivity success.")
return err_msg
@@ -338,11 +423,15 @@ class DRDeployPreCheck(object):
LOG.info("Check disaster status start.")
dbstor_fs = self.local_conf_params.get("storage_dbstor_fs")
dbstor_fs_vstore_id = self.local_conf_params.get("dbstor_fs_vstore_id")
- remote_dbstor_fs_vstore_id = self.local_conf_params.get("remote_dbstor_fs_vstore_id")
+ remote_dbstor_fs_vstore_id = self.local_conf_params.get(
+ "remote_dbstor_fs_vstore_id"
+ )
dbstor_page_fs = self.local_conf_params.get("storage_dbstor_page_fs")
metadata_fs = self.local_conf_params.get("storage_metadata_fs")
metadata_in_cantian = self.local_conf_params.get("mysql_metadata_in_cantian")
- dbstor_fs_info = self.storage_opt.query_filesystem_info(dbstor_fs, dbstor_fs_vstore_id)
+ dbstor_fs_info = self.storage_opt.query_filesystem_info(
+ dbstor_fs, dbstor_fs_vstore_id
+ )
dbstor_fs_id = dbstor_fs_info.get("ID")
dbstor_page_fs_info = self.storage_opt.query_filesystem_info(dbstor_page_fs)
metadata_fs_info = self.storage_opt.query_filesystem_info(metadata_fs)
@@ -357,7 +446,9 @@ class DRDeployPreCheck(object):
else:
domain_name = self.local_conf_params.get("domain_name", "")
if domain_name == "":
- err_msg.append("The 'domain_name' parameter of the 'deploy_param.json' file is empty.")
+ err_msg.append(
+ "The 'domain_name' parameter of the 'deploy_param.json' file is empty."
+ )
domain_exist = False
for domain_info in domain_infos:
if domain_info.get("NAME") == domain_name:
@@ -374,52 +465,88 @@ class DRDeployPreCheck(object):
if domain_exist:
break
else:
- _err_msg = "Domain name[%s] is exist, " \
- "but remote esn[%s] matching failed." % (domain_name, remote_esn)
+ _err_msg = (
+ "Domain name[%s] is exist, "
+ "but remote esn[%s] matching failed."
+ % (domain_name, remote_esn)
+ )
err_msg.append(_err_msg)
else:
LOG.info("Domain name[%s] is not exist." % domain_name)
if len(domain_infos) >= DOMAIN_LIMITS and not domain_exist:
- err_msg.append("The number of HyperMetro domains has reached the upper limit %s." % DOMAIN_LIMITS)
- page_pair_info = self.deploy_operate.query_remote_replication_pair_info(page_fs_id)
+ err_msg.append(
+ "The number of HyperMetro domains has reached the upper limit %s."
+ % DOMAIN_LIMITS
+ )
+ page_pair_info = self.deploy_operate.query_remote_replication_pair_info(
+ page_fs_id
+ )
if page_pair_info:
if cantian_in_container == "0":
- err_msg.append("Filesystem[%s] replication pair is exist." % dbstor_page_fs)
+ err_msg.append(
+ "Filesystem[%s] replication pair is exist." % dbstor_page_fs
+ )
else:
- if len(page_pair_info) == 1 and page_pair_info[0].get("REMOTEDEVICEID") == self.remote_device_id:
+ if (
+ len(page_pair_info) == 1
+ and page_pair_info[0].get("REMOTEDEVICEID") == self.remote_device_id
+ ):
self.page_fs_pair_id = page_pair_info[0].get("ID")
else:
- _err_msg = ("Filesystem[%s] replication pair is exist, but match failed, "
- "details: %s") % (dbstor_page_fs, page_pair_info)
+ _err_msg = (
+ "Filesystem[%s] replication pair is exist, but match failed, "
+ "details: %s"
+ ) % (dbstor_page_fs, page_pair_info)
err_msg.append(_err_msg)
vstore_pair_infos = self.deploy_operate.query_hyper_metro_vstore_pair_info()
for vstore_pair_info in vstore_pair_infos:
exist_remote_vstoreid = vstore_pair_info.get("REMOTEVSTOREID")
exist_local_vstoreid = vstore_pair_info.get("LOCALVSTOREID")
- if exist_local_vstoreid == dbstor_fs_vstore_id and remote_dbstor_fs_vstore_id == exist_remote_vstoreid:
+ if (
+ exist_local_vstoreid == dbstor_fs_vstore_id
+ and remote_dbstor_fs_vstore_id == exist_remote_vstoreid
+ ):
if cantian_in_container == "0":
- err_msg.append("Vstore[%s] metro pair is exist." % dbstor_fs_vstore_id)
+ err_msg.append(
+ "Vstore[%s] metro pair is exist." % dbstor_fs_vstore_id
+ )
else:
if vstore_pair_info.get("DOMAINNAME") == domain_name:
domain_id = vstore_pair_info.get("DOMAINID")
if domain_id != self.hyper_domain_id:
- _err_msg = "Vstore[%s] metro pair is exist, " \
- "but domain id[%s] matching failed." % (dbstor_fs_vstore_id, domain_id)
+ _err_msg = (
+ "Vstore[%s] metro pair is exist, "
+ "but domain id[%s] matching failed."
+ % (dbstor_fs_vstore_id, domain_id)
+ )
err_msg.append(_err_msg)
self.vstore_pair_id = vstore_pair_info.get("ID")
- LOG.info("Vstore[%s] metro pair is exist." % dbstor_fs_vstore_id)
+ LOG.info(
+ "Vstore[%s] metro pair is exist." % dbstor_fs_vstore_id
+ )
else:
- _err_msg = "Vstore[%s] metro pair is exist, " \
- "but domain name[%s] matching failed." % (dbstor_fs_vstore_id, domain_name)
+ _err_msg = (
+ "Vstore[%s] metro pair is exist, "
+ "but domain name[%s] matching failed."
+ % (dbstor_fs_vstore_id, domain_name)
+ )
err_msg.append(_err_msg)
break
else:
- system_count = self.remote_operate.query_remote_storage_vstore_filesystem_num(remote_dbstor_fs_vstore_id)
+ system_count = (
+ self.remote_operate.query_remote_storage_vstore_filesystem_num(
+ remote_dbstor_fs_vstore_id
+ )
+ )
if system_count and system_count.get("COUNT") != "0":
- err_msg.append("Standby vstore[%s] exist filesystems, count[%s]"
- % (remote_dbstor_fs_vstore_id, system_count.get("COUNT")))
-
- ulog_pair_info = self.deploy_operate.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ err_msg.append(
+ "Standby vstore[%s] exist filesystems, count[%s]"
+ % (remote_dbstor_fs_vstore_id, system_count.get("COUNT"))
+ )
+
+ ulog_pair_info = self.deploy_operate.query_hyper_metro_filesystem_pair_info(
+ dbstor_fs_id
+ )
if ulog_pair_info:
if cantian_in_container == "0":
err_msg.append("Filesystem[%s] metro pair is exist." % dbstor_fs)
@@ -429,21 +556,34 @@ class DRDeployPreCheck(object):
self.ulog_fs_pair_id = pair_info.get("ID")
LOG.info("Filesystem[%s] metro pair is exist." % dbstor_fs)
else:
- _err_msg = "Filesystem[%s] metro pair is exist, " \
- "but domain name[%s] matching failed." % (dbstor_fs, domain_name)
+ _err_msg = (
+ "Filesystem[%s] metro pair is exist, "
+ "but domain name[%s] matching failed."
+ % (dbstor_fs, domain_name)
+ )
err_msg.append(_err_msg)
deploy_mode = self.local_conf_params.get("deploy_mode")
if metadata_in_cantian and deploy_mode != "dbstor":
- meta_pair_info = self.deploy_operate.query_remote_replication_pair_info(metadata_fs_id)
+ meta_pair_info = self.deploy_operate.query_remote_replication_pair_info(
+ metadata_fs_id
+ )
if meta_pair_info:
if cantian_in_container == "0":
- err_msg.append("Filesystem[%s] replication pair is exist." % metadata_fs)
+ err_msg.append(
+ "Filesystem[%s] replication pair is exist." % metadata_fs
+ )
else:
- if len(meta_pair_info) == 1 and meta_pair_info[0].get("REMOTEDEVICEID") == self.remote_device_id:
+ if (
+ len(meta_pair_info) == 1
+ and meta_pair_info[0].get("REMOTEDEVICEID")
+ == self.remote_device_id
+ ):
self.meta_fs_pair_id = meta_pair_info[0].get("ID")
else:
- err_msg.append("Filesystem[%s] replication pair is exist." % metadata_fs)
+ err_msg.append(
+ "Filesystem[%s] replication pair is exist." % metadata_fs
+ )
LOG.info("Check disaster status success.")
return err_msg
@@ -464,8 +604,10 @@ class DRDeployPreCheck(object):
"deploy_mode",
]
if not os.path.exists(DEPLOY_PARAM_FILE):
- _err_msg = "Deploy param file[%s] is not exists, " \
- "please check cantian is deployed." % DEPLOY_PARAM_FILE
+ _err_msg = (
+ "Deploy param file[%s] is not exists, "
+ "please check cantian is deployed." % DEPLOY_PARAM_FILE
+ )
LOG.error(_err_msg)
raise Exception(_err_msg)
deploy_mode = self.deploy_params.get("deploy_mode")
@@ -473,7 +615,9 @@ class DRDeployPreCheck(object):
check_list.append("storage_metadata_fs")
diff_list = []
for check_item in check_list:
- if self.deploy_params.get(check_item) != self.local_conf_params.get(check_item):
+ if self.deploy_params.get(check_item) != self.local_conf_params.get(
+ check_item
+ ):
diff_list.append(check_item)
if diff_list:
err_msg.append("Param check failed, different items[%s]" % diff_list)
@@ -495,21 +639,29 @@ class DRDeployPreCheck(object):
local_site = self.site
remote_site = ({"standby", "active"} - {self.site}).pop()
local_dr_deploy_param = conf_params.get("dr_deploy").get(local_site)
- local_dr_deploy_param["domain_name"] = conf_params.get("dr_deploy").get("domain_name", "")
+ local_dr_deploy_param["domain_name"] = conf_params.get("dr_deploy").get(
+ "domain_name", ""
+ )
remote_dr_deploy_param = conf_params.get("dr_deploy").get(remote_site)
- remote_dr_deploy_param["domain_name"] = conf_params.get("dr_deploy").get("domain_name", "")
+ remote_dr_deploy_param["domain_name"] = conf_params.get("dr_deploy").get(
+ "domain_name", ""
+ )
remote_pool_id = conf_params.get("dr_deploy").get("standby").get("pool_id")
- remote_dbstor_fs_vstore_id = conf_params.get("dr_deploy").get("standby").get("dbstor_fs_vstore_id")
+ remote_dbstor_fs_vstore_id = (
+ conf_params.get("dr_deploy").get("standby").get("dbstor_fs_vstore_id")
+ )
name_suffix = conf_params.get("dr_deploy").get("standby").get("name_suffix", "")
del conf_params["dr_deploy"]
self.local_conf_params = copy.deepcopy(conf_params)
self.local_conf_params.update(local_dr_deploy_param)
if self.site == "active":
- self.local_conf_params.update({
- "remote_pool_id": remote_pool_id,
- "remote_dbstor_fs_vstore_id": remote_dbstor_fs_vstore_id,
- "name_suffix": name_suffix
- })
+ self.local_conf_params.update(
+ {
+ "remote_pool_id": remote_pool_id,
+ "remote_dbstor_fs_vstore_id": remote_dbstor_fs_vstore_id,
+ "name_suffix": name_suffix,
+ }
+ )
self.remote_conf_params = copy.deepcopy(conf_params)
self.remote_conf_params.update(remote_dr_deploy_param)
deploy_policy = self.local_conf_params.get("deploy_policy", "default")
@@ -530,21 +682,31 @@ class DRDeployPreCheck(object):
"remote_pool_id": self.local_conf_params.get("remote_pool_id"),
"remote_cluster_name": self.local_conf_params.get("remote_cluster_name"),
"remote_device_id": self.remote_device_id,
- "remote_dbstor_fs_vstore_id": self.local_conf_params.get("remote_dbstor_fs_vstore_id"),
+ "remote_dbstor_fs_vstore_id": self.local_conf_params.get(
+ "remote_dbstor_fs_vstore_id"
+ ),
"domain_name": self.local_conf_params.get("domain_name"),
"hyper_domain_id": self.hyper_domain_id,
"vstore_pair_id": self.vstore_pair_id,
- "ulog_fs_pair_id": self.ulog_fs_pair_id
+ "ulog_fs_pair_id": self.ulog_fs_pair_id,
}
name_suffix = self.local_conf_params.get("name_suffix")
if name_suffix and self.site == "standby":
- self.local_conf_params["storage_dbstor_page_fs"] = RepFileSystemNameRule.NamePrefix + \
- self.local_conf_params[
- "storage_dbstor_page_fs"] + name_suffix
- if name_suffix and self.site == "standby" and not self.local_conf_params.get("mysql_metadata_in_cantian"):
- self.local_conf_params["mysql_metadata_in_cantian"] = RepFileSystemNameRule.NamePrefix + \
- self.local_conf_params[
- "mysql_metadata_in_cantian"] + name_suffix
+ self.local_conf_params["storage_dbstor_page_fs"] = (
+ RepFileSystemNameRule.NamePrefix
+ + self.local_conf_params["storage_dbstor_page_fs"]
+ + name_suffix
+ )
+ if (
+ name_suffix
+ and self.site == "standby"
+ and not self.local_conf_params.get("mysql_metadata_in_cantian")
+ ):
+ self.local_conf_params["mysql_metadata_in_cantian"] = (
+ RepFileSystemNameRule.NamePrefix
+ + self.local_conf_params["mysql_metadata_in_cantian"]
+ + name_suffix
+ )
self.local_conf_params.update(dr_params)
del self.local_conf_params["node_id"]
write_json_config(DR_DEPLOY_PARAM_FILE, self.local_conf_params)
@@ -552,14 +714,18 @@ class DRDeployPreCheck(object):
def init_opt(self):
local_login_ip = self.local_conf_params.get("dm_ip")
local_login_user = self.local_conf_params.get("dm_user")
- storage_operate = StorageInf((local_login_ip, local_login_user, self.dm_login_passwd))
+ storage_operate = StorageInf(
+ (local_login_ip, local_login_user, self.dm_login_passwd)
+ )
storage_operate.login()
self.storage_opt = storage_operate
self.deploy_operate = DRDeployCommon(storage_operate)
def parse_input_params(self):
parse_params = argparse.ArgumentParser()
- parse_params.add_argument("-s", "--site", dest="site", choices=['active', "standby"], required=True)
+ parse_params.add_argument(
+ "-s", "--site", dest="site", choices=["active", "standby"], required=True
+ )
parse_params.add_argument("-l", "--conf", dest="conf", required=True)
args = parse_params.parse_args()
self.site = args.site
@@ -572,25 +738,43 @@ class DRDeployPreCheck(object):
check_result = []
if self.site == "standby":
return check_result
- if not os.path.exists(os.path.join(CURRENT_PATH, "../../../config/deploy_param.json")):
- shutil.copy("/opt/cantian/config/deploy_param.json", os.path.join(CURRENT_PATH, "../../../config"))
+ if not os.path.exists(
+ os.path.join(CURRENT_PATH, "../../../config/deploy_param.json")
+ ):
+ shutil.copy(
+ "/opt/cantian/config/deploy_param.json",
+ os.path.join(CURRENT_PATH, "../../../config"),
+ )
return check_result
self.deploy_params = read_json_config(DEPLOY_PARAM_FILE)
check_result.extend(self.check_master_cantian_status())
- check_result.extend(self.check_file_system_status(
- fs_name=self.local_conf_params.get("storage_dbstor_page_fs"),
- vstore_id="0"))
- check_result.extend(self.check_file_system_status(
- fs_name=self.local_conf_params.get("storage_dbstor_fs"),
- vstore_id=self.local_conf_params.get("dbstor_fs_vstore_id")))
+ check_result.extend(
+ self.check_file_system_status(
+ fs_name=self.local_conf_params.get("storage_dbstor_page_fs"),
+ vstore_id="0",
+ )
+ )
+ check_result.extend(
+ self.check_file_system_status(
+ fs_name=self.local_conf_params.get("storage_dbstor_fs"),
+ vstore_id=self.local_conf_params.get("dbstor_fs_vstore_id"),
+ )
+ )
check_result.extend(self.check_active_exist_params())
if not self.local_conf_params.get("mysql_metadata_in_cantian"):
- check_result.extend(self.check_file_system_status(
- fs_name=self.local_conf_params.get("storage_metadata_fs"), vstore_id="0"))
+ check_result.extend(
+ self.check_file_system_status(
+ fs_name=self.local_conf_params.get("storage_metadata_fs"),
+ vstore_id="0",
+ )
+ )
speed_val_list = ["low", "medium", "high", "highest"]
sync_speed = self.local_conf_params.get("sync_speed", "medium")
if sync_speed not in speed_val_list:
- check_result.append("sync_speed[%s] is invalid, the option is %s" % (sync_speed, speed_val_list))
+ check_result.append(
+ "sync_speed[%s] is invalid, the option is %s"
+ % (sync_speed, speed_val_list)
+ )
return check_result
def check_nfs_lif_info(self):
@@ -604,29 +788,45 @@ class DRDeployPreCheck(object):
archive_logic_ip = self.local_conf_params.get("archive_logic_ip")
metadata_logic_ip = self.local_conf_params.get("metadata_logic_ip")
deploy_mode = self.local_conf_params.get("deploy_mode")
- mysql_metadata_in_cantian = self.local_conf_params.get("mysql_metadata_in_cantian")
- share_fs_info = self.storage_opt.query_filesystem_info(storage_share_fs, vstore_id="0")
+ mysql_metadata_in_cantian = self.local_conf_params.get(
+ "mysql_metadata_in_cantian"
+ )
+ share_fs_info = self.storage_opt.query_filesystem_info(
+ storage_share_fs, vstore_id="0"
+ )
if not share_fs_info:
check_result.append(err_msg % ("storage_share_fs", storage_share_fs))
if deploy_mode == "combined":
- meta_lif_info = self.storage_opt.query_logical_port_info(metadata_logic_ip, vstore_id="0")
+ meta_lif_info = self.storage_opt.query_logical_port_info(
+ metadata_logic_ip, vstore_id="0"
+ )
if not meta_lif_info:
check_result.append(err_msg % ("metadata_logic_ip", metadata_logic_ip))
- meta_fs_info = self.storage_opt.query_filesystem_info(storage_metadata_fs, vstore_id="0")
+ meta_fs_info = self.storage_opt.query_filesystem_info(
+ storage_metadata_fs, vstore_id="0"
+ )
if mysql_metadata_in_cantian and not meta_fs_info:
- check_result.append(err_msg % ("storage_metadata_fs", storage_metadata_fs))
+ check_result.append(
+ err_msg % ("storage_metadata_fs", storage_metadata_fs)
+ )
system_nfs_service = self.storage_opt.query_nfs_service(vstore_id="0")
support_v41 = system_nfs_service.get("SUPPORTV41")
if support_v41 == "false":
check_result.append("System vstore nfs service[v4.1] is not support.")
if db_type == "1" and deploy_mode == "combined":
- archive_lif_info = self.storage_opt.query_logical_port_info(archive_logic_ip, vstore_id="0")
+ archive_lif_info = self.storage_opt.query_logical_port_info(
+ archive_logic_ip, vstore_id="0"
+ )
if not archive_lif_info:
check_result.append(err_msg % ("archive_logic_ip", archive_logic_ip))
if db_type == "1":
- archive_fs_info = self.storage_opt.query_filesystem_info(storage_archive_fs, vstore_id="0")
+ archive_fs_info = self.storage_opt.query_filesystem_info(
+ storage_archive_fs, vstore_id="0"
+ )
if not archive_fs_info:
- check_result.append(err_msg % ("storage_archive_fs", storage_archive_fs))
+ check_result.append(
+ err_msg % ("storage_archive_fs", storage_archive_fs)
+ )
return check_result
def check_standby_params(self):
@@ -648,15 +848,21 @@ class DRDeployPreCheck(object):
check_result.append("Params check failed")
conf_params = read_json_config(self.conf)
dbstor_fs_vstore_id = conf_params.get("dbstor_fs_vstore_id")
- remote_dbstor_fs_vstore_id = conf_params.get("dr_deploy").get("standby").get("dbstor_fs_vstore_id")
+ remote_dbstor_fs_vstore_id = (
+ conf_params.get("dr_deploy").get("standby").get("dbstor_fs_vstore_id")
+ )
if dbstor_fs_vstore_id != remote_dbstor_fs_vstore_id:
- check_result.append("Inconsistent dbstor fs vstore id, %s and %s" % (dbstor_fs_vstore_id,
- remote_dbstor_fs_vstore_id))
+ check_result.append(
+ "Inconsistent dbstor fs vstore id, %s and %s"
+ % (dbstor_fs_vstore_id, remote_dbstor_fs_vstore_id)
+ )
return check_result
try:
self.deploy_operate.storage_opt.query_vstore_info(dbstor_fs_vstore_id)
except Exception as err:
- check_result.append("Vstore[%s] is not exist, details: %s" % (dbstor_fs_vstore_id, str(err)))
+ check_result.append(
+ "Vstore[%s] is not exist, details: %s" % (dbstor_fs_vstore_id, str(err))
+ )
return check_result
check_result.extend(self.check_nfs_lif_info())
LOG.info("Param check success")
@@ -667,8 +873,10 @@ class DRDeployPreCheck(object):
remote_cluster_name = self.local_conf_params.get("remote_cluster_name")
cluster_name = self.local_conf_params.get("cluster_name")
if cluster_name != remote_cluster_name:
- check_result.append("Inconsistent cluster names, remote[%s], local[%s]."
- % (remote_cluster_name, cluster_name))
+ check_result.append(
+ "Inconsistent cluster names, remote[%s], local[%s]."
+ % (remote_cluster_name, cluster_name)
+ )
return check_result
def check_standby_install(self):
@@ -686,7 +894,9 @@ class DRDeployPreCheck(object):
ctom_result_code, _, _ = exec_popen(check_ctom_cmd)
if not cantain_result_code or not ctom_result_code:
check_result.append("Cantian standby has been installed, please check!")
- install_json_path = os.path.join(CURRENT_PATH, "../../cantian/install_config.json")
+ install_json_path = os.path.join(
+ CURRENT_PATH, "../../cantian/install_config.json"
+ )
install_json_data = read_json_config(install_json_path)
root_dir = os.path.join(CURRENT_PATH, "../../../../")
if install_json_data.get("M_RUNING_MODE") == "cantiand_with_mysql_in_cluster":
@@ -719,7 +929,12 @@ class DRDeployPreCheck(object):
check_result.extend(self.check_disaster_exist())
check_result.extend(self.check_standby_filesystem())
if check_result:
- _err = "\n".join([" " * 8 + str(index + 1) + "." + err for index, err in enumerate(check_result)])
+ _err = "\n".join(
+ [
+ " " * 8 + str(index + 1) + "." + err
+ for index, err in enumerate(check_result)
+ ]
+ )
_err = "DR deploy pre_check failed, details:\n" + _err
raise Exception(str(_err))
finally:
@@ -760,7 +975,9 @@ class ParamCheck(object):
for i in range(3):
try:
time.sleep(10)
- mysql_shell = MysqlShell(self.mysql_cmd, user=self.mysql_user, password=mysql_pwd)
+ mysql_shell = MysqlShell(
+ self.mysql_cmd, user=self.mysql_user, password=mysql_pwd
+ )
mysql_shell.start_session()
mysql_shell.close_session()
break
@@ -775,9 +992,15 @@ class ParamCheck(object):
parse_params = argparse.ArgumentParser()
parse_params.add_argument("--action", dest="action", required=True)
parse_params.add_argument("--site", dest="site", required=False, default="")
- parse_params.add_argument("--mysql_cmd", dest="mysql_cmd", required=False, default="")
- parse_params.add_argument("--mysql_user", dest="mysql_user", required=False, default="")
- parse_params.add_argument("--display", dest="display", required=False, default="")
+ parse_params.add_argument(
+ "--mysql_cmd", dest="mysql_cmd", required=False, default=""
+ )
+ parse_params.add_argument(
+ "--mysql_user", dest="mysql_user", required=False, default=""
+ )
+ parse_params.add_argument(
+ "--display", dest="display", required=False, default=""
+ )
args = parse_params.parse_args()
self.action = args.action
self.site = args.site
@@ -785,6 +1008,8 @@ class ParamCheck(object):
self.mysql_user = args.mysql_user
dm_pwd = input()
self.check_dm_pwd(dm_pwd)
- if self.site == "active" and (self.action == "deploy" or self.action == "full_sync"):
+ if self.site == "active" and (
+ self.action == "deploy" or self.action == "full_sync"
+ ):
my_pwd = input()
self.check_mysql_pwd(my_pwd)
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py
index 3d1b8547e83c8d37bd65220ad6b61c61decc9429..d0c6987c7cea4b9c09f40cddef51c4328752754e 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py
@@ -10,12 +10,21 @@ from storage_operate.dr_deploy_operate.dr_deploy_common import DRDeployCommon
from logic.common_func import read_json_config
from logic.common_func import exec_popen
from logic.storage_operate import StorageInf
-from utils.config.rest_constant import HealthStatus, VstorePairConfigStatus, VstorePairRunningStatus, \
- MetroDomainRunningStatus, ReplicationRunningStatus
+from utils.config.rest_constant import (
+ HealthStatus,
+ VstorePairConfigStatus,
+ VstorePairRunningStatus,
+ MetroDomainRunningStatus,
+ ReplicationRunningStatus,
+)
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
-LOCAL_PROCESS_RECORD_FILE = os.path.join(CURRENT_PATH, "../../../config/dr_process_record.json")
-FULL_SYNC_PROGRESS = os.path.join(CURRENT_PATH, "../../../config/full_sync_progress.json")
+LOCAL_PROCESS_RECORD_FILE = os.path.join(
+ CURRENT_PATH, "../../../config/dr_process_record.json"
+)
+FULL_SYNC_PROGRESS = os.path.join(
+ CURRENT_PATH, "../../../config/full_sync_progress.json"
+)
DR_DEPLOY_CONFIG = os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json")
DR_STATUS = os.path.join(CURRENT_PATH, "../../../config/dr_status.json")
@@ -29,10 +38,14 @@ class DrDeployQuery(object):
data = process_data.get("data")
table = ""
table += "-" * 68 + "\n"
- table += "|" + "task".center(50, " ") + "|" + "status".center(15, " ") + "|" + "\n"
+ table += (
+ "|" + "task".center(50, " ") + "|" + "status".center(15, " ") + "|" + "\n"
+ )
table += "-" * 68 + "\n"
for key, value in data.items():
- table += "|" + key.center(50, " ") + "|" + value.center(15, " ") + "|" + "\n"
+ table += (
+ "|" + key.center(50, " ") + "|" + value.center(15, " ") + "|" + "\n"
+ )
error = process_data.get("error")
code = error.get("code")
if code != 0:
@@ -43,7 +56,7 @@ class DrDeployQuery(object):
err_list = []
for _err in err_msg_list:
for i in range(0, len(_err), 62):
- err_list.append(_err[i:i + 62])
+ err_list.append(_err[i : i + 62])
for _err in err_list:
table += "| " + _err.ljust(62, " ") + " |" + "\n"
table += "-" * 68 + "\n"
@@ -70,8 +83,10 @@ class DrDeployQuery(object):
data["dr_deploy"] = "failed"
error["code"] = -1
if description == "":
- error["description"] = "The process exits abnormally," \
- "see /opt/cantian/log/deploy/om_deploy/dr_deploy.log for more details."
+ error["description"] = (
+ "The process exits abnormally,"
+ "see /opt/cantian/log/deploy/om_deploy/dr_deploy.log for more details."
+ )
table_data = self.table_format(process_data)
json_data = json.dumps(process_data, indent=4)
return json_data if is_json_display else table_data
@@ -89,16 +104,30 @@ class DrStatusCheck(object):
def table_format(statuses: dict) -> str:
table = ""
table += "-" * 68 + "\n"
- table += "|" + "Component".center(50, " ") + "|" + "Status".center(15, " ") + "|\n"
+ table += (
+ "|" + "Component".center(50, " ") + "|" + "Status".center(15, " ") + "|\n"
+ )
table += "-" * 68 + "\n"
for key, value in statuses.items():
if key == "dr_status":
continue
- table += "|" + key.replace("_", " ").capitalize().center(50, " ") + "|" + value.center(15, " ") + "|\n"
+ table += (
+ "|"
+ + key.replace("_", " ").capitalize().center(50, " ")
+ + "|"
+ + value.center(15, " ")
+ + "|\n"
+ )
table += "-" * 68 + "\n"
- table += "|" + "DR Status".center(50, " ") + "|" + statuses["dr_status"].center(15, " ") + "|\n"
+ table += (
+ "|"
+ + "DR Status".center(50, " ")
+ + "|"
+ + statuses["dr_status"].center(15, " ")
+ + "|\n"
+ )
table += "-" * 68 + "\n"
return table
@@ -115,7 +144,9 @@ class DrStatusCheck(object):
def query_domain_status(self) -> str:
hyper_domain_id = self.dr_deploy_info.get("hyper_domain_id")
try:
- domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(hyper_domain_id)
+ domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(
+ hyper_domain_id
+ )
if domain_info:
domain_running_status = domain_info.get("RUNNINGSTATUS")
if domain_running_status == MetroDomainRunningStatus.Normal:
@@ -129,14 +160,24 @@ class DrStatusCheck(object):
def query_vstore_pair_status(self) -> str:
vstore_pair_id = self.dr_deploy_info.get("vstore_pair_id")
try:
- vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(vstore_pair_id)
+ vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(
+ vstore_pair_id
+ )
if vstore_pair_info:
vstore_running_status = vstore_pair_info.get("RUNNINGSTATUS")
- if (vstore_running_status == VstorePairRunningStatus.Normal and
- vstore_pair_info.get("HEALTHSTATUS") == HealthStatus.Normal):
- if vstore_pair_info.get("CONFIGSTATUS") == VstorePairConfigStatus.Synchronizing:
+ if (
+ vstore_running_status == VstorePairRunningStatus.Normal
+ and vstore_pair_info.get("HEALTHSTATUS") == HealthStatus.Normal
+ ):
+ if (
+ vstore_pair_info.get("CONFIGSTATUS")
+ == VstorePairConfigStatus.Synchronizing
+ ):
return "Running"
- if vstore_pair_info.get("CONFIGSTATUS") == VstorePairConfigStatus.Normal:
+ if (
+ vstore_pair_info.get("CONFIGSTATUS")
+ == VstorePairConfigStatus.Normal
+ ):
return "Normal"
else:
return "Abnormal"
@@ -147,15 +188,26 @@ class DrStatusCheck(object):
def query_ulog_fs_pair_status(self) -> str:
filesystem_pair_id = self.dr_deploy_info.get("ulog_fs_pair_id")
try:
- filesystem_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
- pair_id=filesystem_pair_id)
+ filesystem_pair_info = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
+ pair_id=filesystem_pair_id
+ )
+ )
if filesystem_pair_info:
ulog_fs_running_status = filesystem_pair_info.get("RUNNINGSTATUS")
- if (ulog_fs_running_status == VstorePairRunningStatus.Normal and
- filesystem_pair_info.get("HEALTHSTATUS") == HealthStatus.Normal):
- if filesystem_pair_info.get("CONFIGSTATUS") == VstorePairConfigStatus.Synchronizing:
+ if (
+ ulog_fs_running_status == VstorePairRunningStatus.Normal
+ and filesystem_pair_info.get("HEALTHSTATUS") == HealthStatus.Normal
+ ):
+ if (
+ filesystem_pair_info.get("CONFIGSTATUS")
+ == VstorePairConfigStatus.Synchronizing
+ ):
return "Runing"
- if filesystem_pair_info.get("CONFIGSTATUS") == VstorePairConfigStatus.Normal:
+ if (
+ filesystem_pair_info.get("CONFIGSTATUS")
+ == VstorePairConfigStatus.Normal
+ ):
return "Normal"
else:
return "Abnormal"
@@ -166,9 +218,13 @@ class DrStatusCheck(object):
def query_page_fs_pair_status(self) -> str:
dbstor_page_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs")
try:
- dbstor_page_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(dbstor_page_fs_name)
+ dbstor_page_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(
+ dbstor_page_fs_name
+ )
dbstor_page_fs_id = dbstor_page_fs_info.get("ID")
- page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(dbstor_page_fs_id)
+ page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(
+ dbstor_page_fs_id
+ )
if page_fs_pair_info:
if page_fs_pair_info[0].get("HEALTHSTATUS") == HealthStatus.Normal:
return "Normal"
@@ -188,11 +244,15 @@ class DrStatusCheck(object):
deploy_user = self.dr_deploy_info.get("deploy_user").strip().split(":")[0]
if deploy_mode == "dbstor":
storage_fs = self.dr_deploy_info.get("storage_share_fs")
- cmd = (f"su -s /bin/bash - {deploy_user} -c 'dbstor --query-file "
- f"--fs-name={storage_fs} --file-dir=/' | grep 'dr_deploy_param.json' | wc -l")
+ cmd = (
+ f"su -s /bin/bash - {deploy_user} -c 'dbstor --query-file "
+ f"--fs-name={storage_fs} --file-dir=/' | grep 'dr_deploy_param.json' | wc -l"
+ )
else:
storage_fs = self.dr_deploy_info.get("storage_metadata_fs")
- if os.path.exists(f"/mnt/dbdata/remote/metadata_{storage_fs}/dr_deploy_param.json"):
+ if os.path.exists(
+ f"/mnt/dbdata/remote/metadata_{storage_fs}/dr_deploy_param.json"
+ ):
return "Normal"
return "Abnormal"
code, count, err = exec_popen(cmd, timeout=180)
@@ -210,7 +270,9 @@ class DrStatusCheck(object):
os.makedirs(os.path.dirname(DR_STATUS), exist_ok=True)
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(DR_STATUS, flags, mode), "w", encoding='utf-8') as file:
+ with os.fdopen(
+ os.open(DR_STATUS, flags, mode), "w", encoding="utf-8"
+ ) as file:
json.dump(statuses, file, indent=4)
os.chmod(DR_STATUS, 0o640)
except Exception as e:
@@ -226,33 +288,53 @@ class DrStatusCheck(object):
"ulog_fs_pair_status": "Unknown",
"page_fs_pair_status": "Unknown",
"dr_status_file": "Unknown",
- "dr_status": "Abnormal"
+ "dr_status": "Abnormal",
}
statuses["domain_status"] = self.query_domain_status()
if statuses["domain_status"] in ["Unknown", "Abnormal"]:
self.update_dr_status_file(statuses)
- return json.dumps(statuses, indent=4) if is_json_display else self.table_format(statuses)
+ return (
+ json.dumps(statuses, indent=4)
+ if is_json_display
+ else self.table_format(statuses)
+ )
statuses["vstore_pair_status"] = self.query_vstore_pair_status()
if statuses["vstore_pair_status"] in ["Unknown", "Abnormal", "Running"]:
self.update_dr_status_file(statuses)
- return json.dumps(statuses, indent=4) if is_json_display else self.table_format(statuses)
+ return (
+ json.dumps(statuses, indent=4)
+ if is_json_display
+ else self.table_format(statuses)
+ )
statuses["ulog_fs_pair_status"] = self.query_ulog_fs_pair_status()
if statuses["ulog_fs_pair_status"] in ["Unknown", "Abnormal", "Running"]:
self.update_dr_status_file(statuses)
- return json.dumps(statuses, indent=4) if is_json_display else self.table_format(statuses)
+ return (
+ json.dumps(statuses, indent=4)
+ if is_json_display
+ else self.table_format(statuses)
+ )
statuses["page_fs_pair_status"] = self.query_page_fs_pair_status()
if statuses["page_fs_pair_status"] in ["Unknown", "Abnormal"]:
self.update_dr_status_file(statuses)
- return json.dumps(statuses, indent=4) if is_json_display else self.table_format(statuses)
+ return (
+ json.dumps(statuses, indent=4)
+ if is_json_display
+ else self.table_format(statuses)
+ )
statuses["dr_status_file"] = self.query_dr_status_file()
if statuses["dr_status_file"] in ["Abnormal"]:
self.update_dr_status_file(statuses)
- return json.dumps(statuses, indent=4) if is_json_display else self.table_format(statuses)
+ return (
+ json.dumps(statuses, indent=4)
+ if is_json_display
+ else self.table_format(statuses)
+ )
if all(status == "Normal" for status in list(statuses.values())[:-1]):
statuses["dr_status"] = "Normal"
@@ -261,7 +343,11 @@ class DrStatusCheck(object):
self.update_dr_status_file(statuses)
- return json.dumps(statuses, indent=4) if is_json_display else self.table_format(statuses)
+ return (
+ json.dumps(statuses, indent=4)
+ if is_json_display
+ else self.table_format(statuses)
+ )
class FullSyncProgress(DrDeployQuery):
@@ -290,8 +376,10 @@ class FullSyncProgress(DrDeployQuery):
data["full_sync"] = "failed"
error["code"] = -1
if description == "":
- error["description"] = "The process exits abnormally," \
- "see /opt/cantian/log/deploy/om_deploy/dr_deploy.log for more details."
+ error["description"] = (
+ "The process exits abnormally,"
+ "see /opt/cantian/log/deploy/om_deploy/dr_deploy.log for more details."
+ )
table_data = self.table_format(process_data)
json_data = json.dumps(process_data, indent=4)
return json_data if is_json_display else table_data
@@ -303,8 +391,12 @@ class ProgressQuery(object):
@staticmethod
def execute(action=None, display=None):
parse_params = argparse.ArgumentParser()
- parse_params.add_argument("--action", dest="action", required=False, default="deploy")
- parse_params.add_argument("--display", dest="display", required=False, default="json")
+ parse_params.add_argument(
+ "--action", dest="action", required=False, default="deploy"
+ )
+ parse_params.add_argument(
+ "--display", dest="display", required=False, default="json"
+ )
args = parse_params.parse_args()
if action is None:
action = args.action
@@ -321,4 +413,4 @@ class ProgressQuery(object):
result = full_sync_progress.execute(display)
else:
result = "Invalid input."
- print(result)
\ No newline at end of file
+ print(result)
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py
index 20ddcda64af5811cc7944258d44a3e4424b87f65..771b4d6fa67f5f53a145b718cc7a6e1f489e8d4b 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py
@@ -9,8 +9,15 @@ from logic.common_func import read_json_config, get_status, exec_popen
from logic.storage_operate import StorageInf
from storage_operate.dr_deploy_operate.dr_deploy_common import DRDeployCommon
from om_log import LOGGER as LOG
-from utils.config.rest_constant import DomainAccess, MetroDomainRunningStatus, VstorePairRunningStatus, HealthStatus, \
- ConfigRole, DataIntegrityStatus, ReplicationRunningStatus
+from utils.config.rest_constant import (
+ DomainAccess,
+ MetroDomainRunningStatus,
+ VstorePairRunningStatus,
+ HealthStatus,
+ ConfigRole,
+ DataIntegrityStatus,
+ ReplicationRunningStatus,
+)
from get_config_info import get_env_info
RUN_USER = get_env_info("cantian_user")
@@ -19,15 +26,17 @@ DR_DEPLOY_CONFIG = os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.j
DEPLOY_PARAMS_CONFIG = os.path.join(CURRENT_PATH, "../../../config/deploy_param.json")
LOGICREP_APPCTL_FILE = os.path.join(CURRENT_PATH, "../../logicrep/appctl.sh")
EXEC_SQL = os.path.join(CURRENT_PATH, "../../cantian_common/exec_sql.py")
-CANTIAN_DISASTER_RECOVERY_STATUS_CHECK = 'echo -e "select DATABASE_ROLE from DV_LRPL_DETAIL;" | '\
- 'su -s /bin/bash - %s -c \'source ~/.bashrc && '\
- 'export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && '\
- 'python3 -B %s\'' % (RUN_USER, EXEC_SQL)
+CANTIAN_DISASTER_RECOVERY_STATUS_CHECK = (
+ 'echo -e "select DATABASE_ROLE from DV_LRPL_DETAIL;" | '
+ "su -s /bin/bash - %s -c 'source ~/.bashrc && "
+ "export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && "
+ "python3 -B %s'" % (RUN_USER, EXEC_SQL)
+)
DBSTOR_CHECK_VERSION_FILE = "/opt/cantian/dbstor/tools/cs_baseline.sh"
def load_json_file(file_path):
- with open(file_path, 'r') as f:
+ with open(file_path, "r") as f:
return json.load(f)
@@ -55,8 +64,10 @@ class SwitchOver(object):
if check_time < 20:
check_count = 1
LOG.info("Check cantian status.")
- cmd = "su -s /bin/bash - %s -c \"cms stat | " \
- "grep -v STAT | awk '{print \$1, \$3, \$6}'\"" % self.run_user
+ cmd = (
+ 'su -s /bin/bash - %s -c "cms stat | '
+ "grep -v STAT | awk '{print \$1, \$3, \$6}'\"" % self.run_user
+ )
check_time_step = check_time // check_count
while check_time:
time.sleep(check_time_step)
@@ -68,7 +79,10 @@ class SwitchOver(object):
raise Exception(err_msg)
cms_stat = output.split("\n")
if len(cms_stat) < 2:
- err_msg = "Current cluster status is abnormal, output:%s, stderr:%s" % (output, stderr)
+ err_msg = "Current cluster status is abnormal, output:%s, stderr:%s" % (
+ output,
+ stderr,
+ )
LOG.error(err_msg)
raise Exception(err_msg)
online_flag = True
@@ -80,7 +94,11 @@ class SwitchOver(object):
if (online != "ONLINE" or work_stat != "1") and node_id == target_node:
online_flag = False
if not online_flag:
- LOG.info("Current cluster status is abnormal, output:%s, stderr:%s", output, stderr)
+ LOG.info(
+ "Current cluster status is abnormal, output:%s, stderr:%s",
+ output,
+ stderr,
+ )
continue
else:
break
@@ -94,19 +112,33 @@ class SwitchOver(object):
def query_sync_status(self):
while True:
- vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(self.vstore_pair_id)
+ vstore_pair_info = self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(
+ self.vstore_pair_id
+ )
health_status = vstore_pair_info.get("HEALTHSTATUS")
running_status = vstore_pair_info.get("RUNNINGSTATUS")
- LOG.info("Vstore pair sync running, running status[%s]",
- get_status(running_status, VstorePairRunningStatus))
- if running_status == VstorePairRunningStatus.Invalid or health_status == HealthStatus.Faulty:
- err_msg = "Hyper metro vstore pair status is not normal, " \
- "health_status[%s], running_status[%s], details: %s" % \
- (get_status(health_status, HealthStatus),
- get_status(running_status, VstorePairRunningStatus),
- vstore_pair_info)
+ LOG.info(
+ "Vstore pair sync running, running status[%s]",
+ get_status(running_status, VstorePairRunningStatus),
+ )
+ if (
+ running_status == VstorePairRunningStatus.Invalid
+ or health_status == HealthStatus.Faulty
+ ):
+ err_msg = (
+ "Hyper metro vstore pair status is not normal, "
+ "health_status[%s], running_status[%s], details: %s"
+ % (
+ get_status(health_status, HealthStatus),
+ get_status(running_status, VstorePairRunningStatus),
+ vstore_pair_info,
+ )
+ )
LOG.error(err_msg)
- if running_status == VstorePairRunningStatus.Normal and health_status == HealthStatus.Normal:
+ if (
+ running_status == VstorePairRunningStatus.Normal
+ and health_status == HealthStatus.Normal
+ ):
LOG.info("Vstore pair sync complete.")
break
time.sleep(60)
@@ -122,8 +154,10 @@ class SwitchOver(object):
def standby_cms_res_stop(self):
LOG.info("Standby stop by cms command.")
- cmd = "source ~/.bashrc && su -s /bin/bash - %s -c " \
- "\"cms res -stop db\"" % self.run_user
+ cmd = (
+ "source ~/.bashrc && su -s /bin/bash - %s -c "
+ '"cms res -stop db"' % self.run_user
+ )
return_code, output, stderr = exec_popen(cmd, timeout=600)
if return_code:
err_msg = "Cantian stop failed, error:%s." % output + stderr
@@ -132,8 +166,10 @@ class SwitchOver(object):
def standby_cms_res_start(self):
LOG.info("Standby start by cms command.")
- cmd = "source ~/.bashrc && su -s /bin/bash - %s -c " \
- "\"cms res -start db\"" % self.run_user
+ cmd = (
+ "source ~/.bashrc && su -s /bin/bash - %s -c "
+ '"cms res -start db"' % self.run_user
+ )
return_code, output, stderr = exec_popen(cmd, timeout=600)
if return_code:
err_msg = "Cantian start failed, error:%s." % output + stderr
@@ -151,8 +187,10 @@ class SwitchOver(object):
return outputs
def wait_res_stop(self):
- cmd = "su -s /bin/bash - %s -c \"cms stat | " \
- "grep -v STAT | awk '{print \$1, \$3}'\"" % self.run_user
+ cmd = (
+ 'su -s /bin/bash - %s -c "cms stat | '
+ "grep -v STAT | awk '{print \$1, \$3}'\"" % self.run_user
+ )
wait_time = 30
wait_time_step = 2
while wait_time:
@@ -196,8 +234,10 @@ class SwitchOver(object):
:return:
"""
LOG.info(f"Standby set iof[{iof}].")
- cmd = "su -s /bin/bash - %s -c \"source ~/.bashrc && "\
- "dbstor --io-forbidden %s\"" % (self.run_user, iof)
+ cmd = (
+ 'su -s /bin/bash - %s -c "source ~/.bashrc && '
+ 'dbstor --io-forbidden %s"' % (self.run_user, iof)
+ )
return_code, output, stderr = exec_popen(cmd, timeout=60)
if return_code:
err_msg = "set iof failed, error:%s." % output + stderr
@@ -224,7 +264,9 @@ class SwitchOver(object):
"""
LOG.info("Start querying the replay.")
while True:
- return_code, output, stderr = exec_popen(CANTIAN_DISASTER_RECOVERY_STATUS_CHECK, timeout=20)
+ return_code, output, stderr = exec_popen(
+ CANTIAN_DISASTER_RECOVERY_STATUS_CHECK, timeout=20
+ )
if return_code:
err_msg = "Query database role failed, error:%s." % output + stderr
LOG.error(err_msg)
@@ -258,41 +300,70 @@ class SwitchOver(object):
LOG.info("Active/standby switch start.")
self.check_cluster_status(target_node=self.node_id)
self.init_storage_opt()
- pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(self.ulog_fs_pair_id)
+ pair_info = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(
+ self.ulog_fs_pair_id
+ )
+ )
local_data_status = pair_info.get("LOCALDATASTATE")
remote_data_status = pair_info.get("REMOTEDATASTATE")
- if local_data_status == DataIntegrityStatus.inconsistent or remote_data_status == DataIntegrityStatus.inconsistent:
+ if (
+ local_data_status == DataIntegrityStatus.inconsistent
+ or remote_data_status == DataIntegrityStatus.inconsistent
+ ):
err_msg = "Data is inconsistent, please check."
LOG.error(err_msg)
raise Exception(err_msg)
- domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(self.hyper_domain_id)
+ domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(
+ self.hyper_domain_id
+ )
running_status = domain_info.get("RUNNINGSTATUS")
config_role = domain_info.get("CONFIGROLE")
- if running_status != MetroDomainRunningStatus.Normal and running_status != MetroDomainRunningStatus.Split:
- err_msg = "DR recover operation is not allowed in %s status." % \
- get_status(running_status, MetroDomainRunningStatus)
+ if (
+ running_status != MetroDomainRunningStatus.Normal
+ and running_status != MetroDomainRunningStatus.Split
+ ):
+ err_msg = "DR recover operation is not allowed in %s status." % get_status(
+ running_status, MetroDomainRunningStatus
+ )
LOG.error(err_msg)
raise Exception(err_msg)
- if config_role == ConfigRole.Primary and running_status == MetroDomainRunningStatus.Normal:
+ if (
+ config_role == ConfigRole.Primary
+ and running_status == MetroDomainRunningStatus.Normal
+ ):
self.standby_logicrep_stop()
self.standby_cms_res_stop()
self.wait_res_stop()
self.dr_deploy_opt.split_filesystem_hyper_metro_domain(self.hyper_domain_id)
self.dr_deploy_opt.change_fs_hyper_metro_domain_second_access(
- self.hyper_domain_id, DomainAccess.ReadAndWrite)
+ self.hyper_domain_id, DomainAccess.ReadAndWrite
+ )
self.dr_deploy_opt.swap_role_fs_hyper_metro_domain(self.hyper_domain_id)
- self.dr_deploy_opt.change_fs_hyper_metro_domain_second_access(self.hyper_domain_id, DomainAccess.ReadOnly)
+ self.dr_deploy_opt.change_fs_hyper_metro_domain_second_access(
+ self.hyper_domain_id, DomainAccess.ReadOnly
+ )
self.dr_deploy_opt.join_fs_hyper_metro_domain(self.hyper_domain_id)
self.query_sync_status()
- pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(self.page_fs_pair_id)
+ pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ self.page_fs_pair_id
+ )
+ )
page_role = pair_info.get("ISPRIMARY")
if page_role == "true":
self.dr_deploy_opt.swap_role_replication_pair(self.page_fs_pair_id)
else:
- LOG.info("Page fs rep pair is already standby site, pair_id[%s].", self.page_fs_pair_id)
+ LOG.info(
+ "Page fs rep pair is already standby site, pair_id[%s].",
+ self.page_fs_pair_id,
+ )
if not self.metadata_in_cantian:
- meta_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- self.meta_fs_pair_id)
+ meta_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ self.meta_fs_pair_id
+ )
+ )
meta_role = meta_info.get("ISPRIMARY")
if meta_role == "true":
self.dr_deploy_opt.swap_role_replication_pair(self.meta_fs_pair_id)
@@ -319,36 +390,46 @@ class DRRecover(SwitchOver):
"""
check_time_step = 2
LOG.info("Check cluster status.")
- cmd_srv = "su -s /bin/bash - %s -c \"cms stat -server | " \
- "grep -v SRV_READY | awk '{print \$1, \$2}'\"" % self.run_user
- cmd_voting = "su -s /bin/bash - %s -c \"cms node -connected | " \
- "grep -v VOTING | awk '{print \$1, \$NF}'\"" % self.run_user
-
+ cmd_srv = (
+ 'su -s /bin/bash - %s -c "cms stat -server | '
+ "grep -v SRV_READY | awk '{print \$1, \$2}'\"" % self.run_user
+ )
+ cmd_voting = (
+ 'su -s /bin/bash - %s -c "cms node -connected | '
+ "grep -v VOTING | awk '{print \$1, \$NF}'\"" % self.run_user
+ )
+
while check_time:
check_time -= check_time_step
# 检查所有节点cms正常
- srv_stat= self.query_cluster_status(cmd_srv)
+ srv_stat = self.query_cluster_status(cmd_srv)
ready_flag = False
- if len(srv_stat)>1:
+ if len(srv_stat) > 1:
ready_flag = True
for node_stat in srv_stat:
_, ready_stat = node_stat.split(" ")
if ready_stat == "FALSE":
ready_flag = False
if not ready_flag:
- LOG.info("Current cms server status is NOT ready, details (node_id, SRV_READY): %s", ';'.join(srv_stat))
+ LOG.info(
+ "Current cms server status is NOT ready, details (node_id, SRV_READY): %s",
+ ";".join(srv_stat),
+ )
time.sleep(check_time_step)
continue
cms_voting_stat = self.query_cluster_status(cmd_voting)
voting_flag = True
- if len(cms_voting_stat)>1:
+ if len(cms_voting_stat) > 1:
voting_flag = False
for node_stat in cms_voting_stat:
_, voting_stat = node_stat.split(" ")
if voting_stat == "TRUE":
voting_flag = True
if voting_flag:
- LOG.info("Current cms is voting, details (node_id, VOTING): %s", ';'.join(cms_voting_stat))
+ LOG.info(
+ "Current cms is voting, details (node_id, VOTING): %s",
+ ";".join(cms_voting_stat),
+ )
time.sleep(check_time_step)
continue
break
@@ -362,30 +443,42 @@ class DRRecover(SwitchOver):
if self.single_write == "1" or self.cantian_recover_type == "full_sync":
LOG.info("cantian_recover_type is %s", self.cantian_recover_type)
if running_status != ReplicationRunningStatus.Synchronizing:
- self.dr_deploy_opt.sync_remote_replication_filesystem_pair(pair_id=pair_id,
- vstore_id="0",
- is_full_copy=False)
+ self.dr_deploy_opt.sync_remote_replication_filesystem_pair(
+ pair_id=pair_id, vstore_id="0", is_full_copy=False
+ )
time.sleep(10)
- pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- pair_id)
+ pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ pair_id
+ )
+ )
running_status = pair_info.get("RUNNINGSTATUS")
while running_status == ReplicationRunningStatus.Synchronizing:
- pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- pair_id)
+ pair_info = (
+ self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
+ pair_id
+ )
+ )
running_status = pair_info.get("RUNNINGSTATUS")
replication_progress = pair_info.get("REPLICATIONPROGRESS")
- LOG.info(f"Page fs rep pair is synchronizing, current progress: {replication_progress}%, please wait...")
+ LOG.info(
+ f"Page fs rep pair is synchronizing, current progress: {replication_progress}%, please wait..."
+ )
time.sleep(10)
else:
LOG.info("Single write is disabled, no need to execute replication steps.")
self.repl_success_flag = True
self.dr_deploy_opt.split_remote_replication_filesystem_pair(pair_id)
- self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(pair_id)
+ self.dr_deploy_opt.remote_replication_filesystem_pair_cancel_secondary_write_lock(
+ pair_id
+ )
def standby_cms_purge_backup(self):
LOG.info("Standby purge backup by cms command.")
- cmd = "source ~/.bashrc && su -s /bin/bash - %s -c " \
- "\"ctbackup --purge-logs\"" % self.run_user
+ cmd = (
+ "source ~/.bashrc && su -s /bin/bash - %s -c "
+ '"ctbackup --purge-logs"' % self.run_user
+ )
return_code, output, stderr = exec_popen(cmd, timeout=600)
if return_code:
err_msg = "Execute command[ctbackup --purge-logs] failed."
@@ -406,12 +499,15 @@ class DRRecover(SwitchOver):
def rep_pair_recover(self, pair_id: str) -> None:
pair_info = self.dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(
- pair_id)
+ pair_id
+ )
page_role = pair_info.get("ISPRIMARY")
running_status = pair_info.get("RUNNINGSTATUS")
if page_role == "true":
self.dr_deploy_opt.swap_role_replication_pair(pair_id)
- self.dr_deploy_opt.remote_replication_filesystem_pair_set_secondary_write_lock(pair_id)
+ self.dr_deploy_opt.remote_replication_filesystem_pair_set_secondary_write_lock(
+ pair_id
+ )
self.execute_replication_steps(running_status, pair_id)
else:
LOG.info("Page fs rep pair is already standby site.")
@@ -419,12 +515,16 @@ class DRRecover(SwitchOver):
try:
self.check_cluster_status(log_type="info")
except Exception as _er:
- self.dr_deploy_opt.remote_replication_filesystem_pair_set_secondary_write_lock(pair_id)
+ self.dr_deploy_opt.remote_replication_filesystem_pair_set_secondary_write_lock(
+ pair_id
+ )
self.execute_replication_steps(running_status, pair_id)
else:
return
- elif running_status == ReplicationRunningStatus.Normal or \
- running_status == ReplicationRunningStatus.Synchronizing:
+ elif (
+ running_status == ReplicationRunningStatus.Normal
+ or running_status == ReplicationRunningStatus.Synchronizing
+ ):
self.execute_replication_steps(running_status, pair_id)
else:
err_msg = "Remote replication filesystem pair is not in normal status."
@@ -432,18 +532,26 @@ class DRRecover(SwitchOver):
raise Exception(err_msg)
def hyper_metro_status_check(self, running_status, config_role):
- if running_status != MetroDomainRunningStatus.Normal and running_status != MetroDomainRunningStatus.Split:
- err_msg = "DR recover operation is not allowed in %s status." % \
- get_status(running_status, MetroDomainRunningStatus)
+ if (
+ running_status != MetroDomainRunningStatus.Normal
+ and running_status != MetroDomainRunningStatus.Split
+ ):
+ err_msg = "DR recover operation is not allowed in %s status." % get_status(
+ running_status, MetroDomainRunningStatus
+ )
LOG.error(err_msg)
raise Exception(err_msg)
- if running_status == MetroDomainRunningStatus.Normal and config_role == ConfigRole.Primary:
- err_msg = "DR recover operation is not allowed in %s status." % \
- get_status(running_status, MetroDomainRunningStatus)
+ if (
+ running_status == MetroDomainRunningStatus.Normal
+ and config_role == ConfigRole.Primary
+ ):
+ err_msg = "DR recover operation is not allowed in %s status." % get_status(
+ running_status, MetroDomainRunningStatus
+ )
LOG.error(err_msg)
raise Exception(err_msg)
- def execute(self, cantian_recover_type = None):
+ def execute(self, cantian_recover_type=None):
"""
step:
1、检查当前双活域状态:
@@ -473,14 +581,17 @@ class DRRecover(SwitchOver):
LOG.info("DR recover start.")
self.check_cluster_status_for_recover()
self.init_storage_opt()
- domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(self.hyper_domain_id)
+ domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(
+ self.hyper_domain_id
+ )
running_status = domain_info.get("RUNNINGSTATUS")
config_role = domain_info.get("CONFIGROLE")
self.hyper_metro_status_check(running_status, config_role)
if running_status == MetroDomainRunningStatus.Split:
if config_role == ConfigRole.Primary:
self.dr_deploy_opt.change_fs_hyper_metro_domain_second_access(
- self.hyper_domain_id, DomainAccess.ReadAndWrite)
+ self.hyper_domain_id, DomainAccess.ReadAndWrite
+ )
self.dr_deploy_opt.swap_role_fs_hyper_metro_domain(self.hyper_domain_id)
try:
self.standby_cms_res_stop()
@@ -496,7 +607,8 @@ class DRRecover(SwitchOver):
raise Exception(err_msg)
self.single_write = self.do_dbstor_baseline()
self.dr_deploy_opt.change_fs_hyper_metro_domain_second_access(
- self.hyper_domain_id, DomainAccess.ReadOnly)
+ self.hyper_domain_id, DomainAccess.ReadOnly
+ )
try:
self.dr_deploy_opt.join_fs_hyper_metro_domain(self.hyper_domain_id)
except Exception as _er:
@@ -535,7 +647,9 @@ class FailOver(SwitchOver):
"""
LOG.info("Cancel secondary resource protection start.")
self.init_storage_opt()
- domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(self.hyper_domain_id)
+ domain_info = self.dr_deploy_opt.query_hyper_metro_domain_info(
+ self.hyper_domain_id
+ )
config_role = domain_info.get("CONFIGROLE")
if config_role == ConfigRole.Primary:
err_msg = "Fail over operation is not allowed in primary node."
@@ -545,17 +659,20 @@ class FailOver(SwitchOver):
if running_status == MetroDomainRunningStatus.Normal:
self.dr_deploy_opt.split_filesystem_hyper_metro_domain(self.hyper_domain_id)
self.dr_deploy_opt.change_fs_hyper_metro_domain_second_access(
- self.hyper_domain_id, DomainAccess.ReadAndWrite)
+ self.hyper_domain_id, DomainAccess.ReadAndWrite
+ )
try:
self.standby_cms_res_start()
except Exception as _er:
- err_msg ="Standby cms res start failed, error: {}".format(_er)
+ err_msg = "Standby cms res start failed, error: {}".format(_er)
LOG.error(err_msg)
try:
- self.check_cluster_status(target_node=self.node_id, log_type="info",check_time=300)
+ self.check_cluster_status(
+ target_node=self.node_id, log_type="info", check_time=300
+ )
except Exception as _er:
err_msg = "Check cluster status failed, error: {}".format(_er)
LOG.error(err_msg)
raise Exception(err_msg)
self.query_database_role()
- LOG.info("Cancel secondary resource protection success.")
\ No newline at end of file
+ LOG.info("Cancel secondary resource protection success.")
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py
index 1705c1ad1731075689684195e6515b05b6acc0ca..e284924f66df9a2a39c60c56e1f0e712c56682f0 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py
@@ -7,7 +7,9 @@ import time
import shutil
from storage_operate.dr_deploy_operate.dr_deploy_common import DRDeployCommon
-from storage_operate.dr_deploy_operate.dr_deploy_pre_check import CANTIAN_STOP_SUCCESS_FLAG
+from storage_operate.dr_deploy_operate.dr_deploy_pre_check import (
+ CANTIAN_STOP_SUCCESS_FLAG,
+)
from storage_operate.dr_deploy_operate.dr_deploy import DRDeploy
from logic.storage_operate import StorageInf
from logic.common_func import read_json_config
@@ -18,7 +20,9 @@ from get_config_info import get_env_info
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
CANTIAN_DEPLOY_CONFIG = os.path.join(CURRENT_PATH, "../../../config/deploy_param.json")
DR_DEPLOY_CONFIG = os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json")
-DR_DEPLOY_REMOTE_CONFIG = os.path.join(CURRENT_PATH, "../../../config/remote/dr_deploy_param.json")
+DR_DEPLOY_REMOTE_CONFIG = os.path.join(
+ CURRENT_PATH, "../../../config/remote/dr_deploy_param.json"
+)
UNINSTALL_TIMEOUT = 900
@@ -31,7 +35,7 @@ class UNDeploy(object):
self.dr_deploy_info = read_json_config(DR_DEPLOY_CONFIG)
self.deploy_params = read_json_config(CANTIAN_DEPLOY_CONFIG)
self.run_user = get_env_info("cantian_user")
-
+
def init_storage_opt(self):
dm_ip = self.dr_deploy_info.get("dm_ip")
dm_user = self.dr_deploy_info.get("dm_user")
@@ -54,31 +58,40 @@ class UNDeploy(object):
try:
self.dr_deploy_opt.delete_remote_replication_filesystem_pair(page_id)
except Exception as err:
- self.dr_deploy_opt.delete_remote_replication_filesystem_pair(page_id, is_local_del=True)
+ self.dr_deploy_opt.delete_remote_replication_filesystem_pair(
+ page_id, is_local_del=True
+ )
LOG.info("Delete Replication pair[id:%s] success", page_id)
def delete_filesystem(self, vstore_id, fs_name):
if self.site == "active":
return
else:
- fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(fs_name,
- vstore_id)
+ fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(
+ fs_name, vstore_id
+ )
if not fs_info:
LOG.info("Filesystem[%s] is not exist.", fs_name)
return
if fs_info.get("scheduleName") != "--":
# schedule cdp 存在,先删除
LOG.info("Delete schedule[%s]", fs_info.get("scheduleName"))
- self.dr_deploy_opt.storage_opt.delete_fs_cdp_schedule(fs_info.get("ID"),
- fs_info.get("TIMINGSNAPSHOTSCHEDULEID"),
- fs_info.get("scheduleName"),
- fs_info.get("vstoreId"))
+ self.dr_deploy_opt.storage_opt.delete_fs_cdp_schedule(
+ fs_info.get("ID"),
+ fs_info.get("TIMINGSNAPSHOTSCHEDULEID"),
+ fs_info.get("scheduleName"),
+ fs_info.get("vstoreId"),
+ )
LOG.info("Delete schedule[%s] success!", fs_info.get("scheduleName"))
fs_id = fs_info.get("ID")
- nfs_share_info = self.storage_opt.query_nfs_info(fs_id=fs_id, vstore_id=vstore_id)
+ nfs_share_info = self.storage_opt.query_nfs_info(
+ fs_id=fs_id, vstore_id=vstore_id
+ )
if nfs_share_info:
nfs_share_id = nfs_share_info[0].get("ID")
- self.storage_opt.delete_nfs_share(nfs_share_id=nfs_share_id, vstore_id=vstore_id)
+ self.storage_opt.delete_nfs_share(
+ nfs_share_id=nfs_share_id, vstore_id=vstore_id
+ )
LOG.info("Delete file system %s nfs share success!", fs_name)
self.storage_opt.delete_file_system(fs_id)
LOG.info("Delete file system %s success!", fs_name)
@@ -95,23 +108,32 @@ class UNDeploy(object):
storage_dbstor_fs = self.dr_deploy_info.get("storage_dbstor_fs")
if not storage_dbstor_fs:
return
- dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(storage_dbstor_fs,
- dbstor_fs_vstore_id)
+ dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(
+ storage_dbstor_fs, dbstor_fs_vstore_id
+ )
if not dbstor_fs_info:
LOG.info("Filesystem[%s] is not exist.", storage_dbstor_fs)
return
# 双活文件系统id
dbstor_fs_id = dbstor_fs_info.get("ID")
# 通过双活文件系统id查询双活文件系统pair id
- hyper_filesystem_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ hyper_filesystem_pair_info = (
+ self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id)
+ )
if hyper_filesystem_pair_info:
hyper_filesystem_pair_id = hyper_filesystem_pair_info[0].get("ID")
try:
- self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(hyper_filesystem_pair_id, dbstor_fs_vstore_id)
+ self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(
+ hyper_filesystem_pair_id, dbstor_fs_vstore_id
+ )
except Exception as err:
- self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(hyper_filesystem_pair_id, dbstor_fs_vstore_id,
- is_local_del=True)
- LOG.info("Delete Hyper Metro filesystem pair id[%s] success", hyper_filesystem_pair_id)
+ self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(
+ hyper_filesystem_pair_id, dbstor_fs_vstore_id, is_local_del=True
+ )
+ LOG.info(
+ "Delete Hyper Metro filesystem pair id[%s] success",
+ hyper_filesystem_pair_id,
+ )
LOG.info("Delete Hyper Metro filesystem pair id success")
def delete_hyper_metro_filesystem_vstore_id(self):
@@ -122,25 +144,39 @@ class UNDeploy(object):
if not hyper_metro_vstore_pair_id:
return False
try:
- self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(hyper_metro_vstore_pair_id)
+ self.dr_deploy_opt.query_hyper_metro_vstore_pair_info(
+ hyper_metro_vstore_pair_id
+ )
except Exception as err:
if "1073781761" in str(err):
- LOG.info("Hyper Metro pair id[%s] is not exist.", hyper_metro_vstore_pair_id)
+ LOG.info(
+ "Hyper Metro pair id[%s] is not exist.", hyper_metro_vstore_pair_id
+ )
return False
else:
raise err
dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id")
- file_system_count = self.dr_deploy_opt.query_hyper_metro_filesystem_count_info(dbstor_fs_vstore_id)
+ file_system_count = self.dr_deploy_opt.query_hyper_metro_filesystem_count_info(
+ dbstor_fs_vstore_id
+ )
if file_system_count and file_system_count.get("COUNT") != "0":
- msg = "Delete Hyper Metro pair id[id:%s], " \
- "but there are also other pair file systems" % dbstor_fs_vstore_id
+ msg = (
+ "Delete Hyper Metro pair id[id:%s], "
+ "but there are also other pair file systems" % dbstor_fs_vstore_id
+ )
LOG.info(msg)
return False
try:
- self.dr_deploy_opt.delete_hyper_metro_vstore_pair(hyper_metro_vstore_pair_id)
+ self.dr_deploy_opt.delete_hyper_metro_vstore_pair(
+ hyper_metro_vstore_pair_id
+ )
except Exception as err:
- self.dr_deploy_opt.delete_hyper_metro_vstore_pair(hyper_metro_vstore_pair_id, is_local_del=True)
- LOG.info("Delete Hyper Metro pair id[id:%s] success", hyper_metro_vstore_pair_id)
+ self.dr_deploy_opt.delete_hyper_metro_vstore_pair(
+ hyper_metro_vstore_pair_id, is_local_del=True
+ )
+ LOG.info(
+ "Delete Hyper Metro pair id[id:%s] success", hyper_metro_vstore_pair_id
+ )
return True
def delete_hyper_metro_domain(self):
@@ -162,9 +198,13 @@ class UNDeploy(object):
try:
self.dr_deploy_opt.delete_filesystem_hyper_metro_domain(hyper_domain_id)
except Exception as err:
- self.dr_deploy_opt.delete_filesystem_hyper_metro_domain(hyper_domain_id, is_local_del=True)
+ self.dr_deploy_opt.delete_filesystem_hyper_metro_domain(
+ hyper_domain_id, is_local_del=True
+ )
metadata_dr_deploy_path = f"/mnt/dbdata/remote/metadata_{self.dr_deploy_info.get('storage_metadata_fs')}"
- metadata_dr_deploy_config = os.path.join(metadata_dr_deploy_path, "dr_deploy_param.json")
+ metadata_dr_deploy_config = os.path.join(
+ metadata_dr_deploy_path, "dr_deploy_param.json"
+ )
dr_deploy_config = f"/opt/cantian/config/dr_deploy_param.json"
if os.path.exists(dr_deploy_config):
os.remove(dr_deploy_config)
@@ -174,7 +214,9 @@ class UNDeploy(object):
def delete_replication(self):
action_parse = argparse.ArgumentParser()
- action_parse.add_argument("--site", dest="site", choices=["standby", "active"], required=True)
+ action_parse.add_argument(
+ "--site", dest="site", choices=["standby", "active"], required=True
+ )
args = action_parse.parse_args()
self.site = args.site
page_fs_pair_id = self.dr_deploy_info.get("page_fs_pair_id")
@@ -189,7 +231,9 @@ class UNDeploy(object):
def delete_hyper(self):
action_parse = argparse.ArgumentParser()
- action_parse.add_argument("--site", dest="site", choices=["standby", "active"], required=True)
+ action_parse.add_argument(
+ "--site", dest="site", choices=["standby", "active"], required=True
+ )
args = action_parse.parse_args()
self.site = args.site
# 删除双活文件系统pair id
@@ -209,7 +253,10 @@ class UNDeploy(object):
cmd = "sh %s/stop.sh;last_cmd=$?" % ctl_file_path
_, output, stderr = exec_popen(cmd, timeout=3600)
if "Stop Cantian Engine success." not in output:
- err_msg = "Failed to execute stop, stderr:%s, output:%s" % (stderr, output)
+ err_msg = "Failed to execute stop, stderr:%s, output:%s" % (
+ stderr,
+ output,
+ )
raise Exception(err_msg)
if not os.path.exists(install_record_file):
return
@@ -222,15 +269,20 @@ class UNDeploy(object):
cmd = "sh %s/uninstall.sh override" % ctl_file_path
_, output, stderr = exec_popen(cmd, timeout=180)
if "uninstall finished" not in output:
- err_msg = "Failed to execute uninstall, stderr:%s, output:%s" % (stderr, output)
+ err_msg = "Failed to execute uninstall, stderr:%s, output:%s" % (
+ stderr,
+ output,
+ )
raise Exception(err_msg)
if os.path.exists("/opt/cantian"):
shutil.rmtree("/opt/cantian")
def wait_remote_node_exec(self, node_id, timeout):
wait_time = 0
- cmd = "su -s /bin/bash - %s -c \"cms stat | " \
- "grep -v STAT | awk '{print \$1, \$3, \$6}'\"" % self.run_user
+ cmd = (
+ 'su -s /bin/bash - %s -c "cms stat | '
+ "grep -v STAT | awk '{print \$1, \$3, \$6}'\"" % self.run_user
+ )
while timeout:
return_code, output, stderr = exec_popen(cmd, timeout=100)
cms_stat = output.split("\n")
@@ -268,26 +320,38 @@ class UNDeploy(object):
if self.deploy_params.get("deploy_mode") == "dbstor":
fs_name = self.deploy_params.get("storage_share_fs")
- clean_cmd = f"su -s /bin/bash - {self.run_user} -c \"dbstor --delete-file --fs-name={fs_name} " \
- "--file-name=/dr_deploy_param.json\""
+ clean_cmd = (
+ f'su -s /bin/bash - {self.run_user} -c "dbstor --delete-file --fs-name={fs_name} '
+ '--file-name=/dr_deploy_param.json"'
+ )
else:
fs_name = self.deploy_params.get("storage_metadata_fs")
- clean_cmd = f"rm -rf /mnt/dbdata/remote/metadata_{fs_name}/dr_deploy_param.json"
+ clean_cmd = (
+ f"rm -rf /mnt/dbdata/remote/metadata_{fs_name}/dr_deploy_param.json"
+ )
try:
ret_code, output, stderr = exec_popen(clean_cmd)
if ret_code:
LOG.info(f"Failed to execute command '{clean_cmd}', error: {stderr}")
except Exception as e:
- LOG.info(f"Exception occurred while executing command '{clean_cmd}': {str(e)}")
+ LOG.info(
+ f"Exception occurred while executing command '{clean_cmd}': {str(e)}"
+ )
def standby_uninstall(self, node_id, uninstall_cantian_flag):
- if self.site == "standby" and os.path.exists(CANTIAN_DEPLOY_CONFIG) and uninstall_cantian_flag:
+ if (
+ self.site == "standby"
+ and os.path.exists(CANTIAN_DEPLOY_CONFIG)
+ and uninstall_cantian_flag
+ ):
self.do_stop()
LOG.info("Stop Cantian engine success.")
if node_id == "0":
LOG.info("Start to delete dr deploy!")
rep_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs")
- mysql_metadata_in_cantian = self.dr_deploy_info.get("mysql_metadata_in_cantian")
+ mysql_metadata_in_cantian = self.dr_deploy_info.get(
+ "mysql_metadata_in_cantian"
+ )
metadata_fs = self.dr_deploy_info.get("storage_metadata_fs")
self.delete_replication()
self.delete_filesystem(vstore_id="0", fs_name=rep_fs_name)
@@ -301,7 +365,11 @@ class UNDeploy(object):
except Exception as err:
LOG.info("Standby site delete hyper system failed: %s", str(err))
self.clean_dr_config_file()
- if self.site == "standby" and os.path.exists(CANTIAN_DEPLOY_CONFIG) and uninstall_cantian_flag:
+ if (
+ self.site == "standby"
+ and os.path.exists(CANTIAN_DEPLOY_CONFIG)
+ and uninstall_cantian_flag
+ ):
if node_id == "0":
self.wait_remote_node_exec("1", UNINSTALL_TIMEOUT)
self.do_uninstall()
@@ -320,7 +388,9 @@ class UNDeploy(object):
self.init_storage_opt()
node_id = self.deploy_params.get("node_id")
action_parse = argparse.ArgumentParser()
- action_parse.add_argument("--site", dest="site", choices=["standby", "active"], required=True)
+ action_parse.add_argument(
+ "--site", dest="site", choices=["standby", "active"], required=True
+ )
args = action_parse.parse_args()
self.site = args.site
# 告警提示,是否确认卸载;是否卸载Cantian
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/install_mysql.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/install_mysql.py
index 29f4e7edaf4873ede3d30239f6815cb4b377b665..da1300ab637a751cf9b5b57b14cc2248383668ab 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/install_mysql.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/install_mysql.py
@@ -12,7 +12,8 @@ from om_log import DR_DEPLOY_LOG as LOG
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(CURRENT_PATH, "../../../../"))
MYSQL_INSTALL_PATH = "/opt/cantian/mysql/install"
-WORKDIR = '/opt/cantian/image/cantian_connector/cantian-connector-mysql/mysql_bin/'
+WORKDIR = "/opt/cantian/image/cantian_connector/cantian-connector-mysql/mysql_bin/"
+
def get_file_name(file_patten):
files = os.listdir(ROOT_DIR)
@@ -43,7 +44,7 @@ def execute_meta():
except Exception as e:
LOG.error("Obtain u_id or g_id failed, details: %s", str(e))
raise e
- file_name = get_file_name(r'Mysql_server_single.*.tgz')
+ file_name = get_file_name(r"Mysql_server_single.*.tgz")
source_path = os.path.join(ROOT_DIR, file_name)
try:
with tarfile.open(source_path) as tar_ref:
@@ -52,27 +53,39 @@ def execute_meta():
LOG.error("Extractall failed, details: %s", str(e))
raise e
LOG.info("Begin to copy mysql files, deploy mode meta.")
- cmd = "rm -rf %s/lib/plugin/ha_ctc.so" % os.path.join(WORKDIR, 'Mysql_server/mysql')
+ cmd = "rm -rf %s/lib/plugin/ha_ctc.so" % os.path.join(WORKDIR, "Mysql_server/mysql")
return_code, stdout, stderr = exec_popen(cmd, timeout=30)
- LOG.info("Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]" % (cmd, return_code, stdout, stderr))
- cmd = "cp -arf %s %s" % (os.path.join(WORKDIR, 'Mysql_server/mysql'), WORKDIR)
+ LOG.info(
+ "Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]"
+ % (cmd, return_code, stdout, stderr)
+ )
+ cmd = "cp -arf %s %s" % (os.path.join(WORKDIR, "Mysql_server/mysql"), WORKDIR)
return_code, stdout, stderr = exec_popen(cmd, timeout=180)
- LOG.info("Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]" % (cmd, return_code, stdout, stderr))
- cmd = "cp -arf %s %s" % (os.path.join(WORKDIR, 'mysql'), MYSQL_INSTALL_PATH)
+ LOG.info(
+ "Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]"
+ % (cmd, return_code, stdout, stderr)
+ )
+ cmd = "cp -arf %s %s" % (os.path.join(WORKDIR, "mysql"), MYSQL_INSTALL_PATH)
return_code, stdout, stderr = exec_popen(cmd, timeout=180)
- LOG.info("Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]" % (cmd, return_code, stdout, stderr))
+ LOG.info(
+ "Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]"
+ % (cmd, return_code, stdout, stderr)
+ )
cmd = f"cp -pf {MYSQL_INSTALL_PATH}/mysql/bin/mysql /usr/bin/"
return_code, stdout, stderr = exec_popen(cmd, timeout=30)
- LOG.info("Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]" % (cmd, return_code, stdout, stderr))
- os.chown('/opt/cantian/mysql/install/mysql', u_id, g_id)
- for root, dirs, files in os.walk('/opt/cantian/mysql/install/mysql'):
+ LOG.info(
+ "Execute cmd[%s], return_code[%s], stdout[%s], stderr[%s]"
+ % (cmd, return_code, stdout, stderr)
+ )
+ os.chown("/opt/cantian/mysql/install/mysql", u_id, g_id)
+ for root, dirs, files in os.walk("/opt/cantian/mysql/install/mysql"):
for d in dirs:
os.chown(os.path.join(root, d), u_id, g_id)
for f in files:
os.chown(os.path.join(root, f), u_id, g_id)
- if os.path.exists('/usr/local/mysql'):
- shutil.rmtree('/usr/local/mysql')
- shutil.copytree('/opt/cantian/mysql/install/mysql', '/usr/local/mysql')
+ if os.path.exists("/usr/local/mysql"):
+ shutil.rmtree("/usr/local/mysql")
+ shutil.copytree("/opt/cantian/mysql/install/mysql", "/usr/local/mysql")
LOG.info("Success to install mysql, deploy mode meta.")
@@ -92,19 +105,19 @@ def execute_nometa():
except Exception as e:
LOG.error("Obtain u_id or g_id failed, details: %s", str(e))
raise e
- file_name = get_file_name(r'mysql_release_.*.tar.gz')
+ file_name = get_file_name(r"mysql_release_.*.tar.gz")
source_path = os.path.join(ROOT_DIR, file_name)
with tarfile.open(source_path) as tar_ref:
tar_ref.extractall(WORKDIR)
- tar_ref.extractall('/opt/cantian/mysql/install/')
- os.chown('/opt/cantian/mysql/install/mysql', u_id, g_id)
- for root, dirs, files in os.walk('/opt/cantian/mysql/install/mysql'):
+ tar_ref.extractall("/opt/cantian/mysql/install/")
+ os.chown("/opt/cantian/mysql/install/mysql", u_id, g_id)
+ for root, dirs, files in os.walk("/opt/cantian/mysql/install/mysql"):
for d in dirs:
os.chown(os.path.join(root, d), u_id, g_id)
for f in files:
os.chown(os.path.join(root, f), u_id, g_id)
- shutil.copyfile('/opt/cantian/mysql/install/mysql/bin/mysql', '/usr/bin/mysql')
- if os.path.exists('/usr/local/mysql'):
- shutil.rmtree('/usr/local/mysql')
- shutil.copytree('/opt/cantian/mysql/install/mysql', '/usr/local/mysql')
+ shutil.copyfile("/opt/cantian/mysql/install/mysql/bin/mysql", "/usr/bin/mysql")
+ if os.path.exists("/usr/local/mysql"):
+ shutil.rmtree("/usr/local/mysql")
+ shutil.copytree("/opt/cantian/mysql/install/mysql", "/usr/local/mysql")
LOG.info("Success to install mysql, deploy mode nometa.")
diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py
index e4a9473f03891c8157eb34dda1f02e74e6f45dbc..7b967f64c13ae7eb5b2c249444fe6ad96a1082c4 100644
--- a/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py
+++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py
@@ -25,7 +25,9 @@ class UpdateDRParams(object):
self.storage_metadata_fs = self.deploy_params.get("storage_metadata_fs")
self.storage_share_fs = self.deploy_params.get("storage_share_fs")
self.cluster_name = self.deploy_params.get("cluster_name")
- self.mysql_metadata_in_cantian = self.deploy_params.get("mysql_metadata_in_cantian")
+ self.mysql_metadata_in_cantian = self.deploy_params.get(
+ "mysql_metadata_in_cantian"
+ )
self.dbstor_fs_vstore_id = self.deploy_params.get("dbstor_fs_vstore_id")
self.deploy_mode = self.deploy_params.get("deploy_mode")
@@ -35,8 +37,10 @@ class UpdateDRParams(object):
容灾告警需要重启cantian_exporter
:return:
"""
- cmd = "ps -ef | grep \"python3 /opt/cantian/ct_om/service/cantian_exporter/exporter/execute.py\"" \
- " | grep -v grep | awk '{print $2}' | xargs kill -9"
+ cmd = (
+ 'ps -ef | grep "python3 /opt/cantian/ct_om/service/cantian_exporter/exporter/execute.py"'
+ " | grep -v grep | awk '{print $2}' | xargs kill -9"
+ )
exec_popen(cmd)
def copy_dr_deploy_param_file(self):
@@ -118,13 +122,18 @@ class UpdateDRParams(object):
if not current_real_path.startswith(target_path):
try:
- shutil.copy(DEPLOY_PARAM_FILE, os.path.join(CURRENT_PATH, "../../../config"))
+ shutil.copy(
+ DEPLOY_PARAM_FILE, os.path.join(CURRENT_PATH, "../../../config")
+ )
except Exception as _err:
LOG.info(f"copy DEPLOY_PARAM_FILE failed")
encrypted_pwd = KmcResolve.kmc_resolve_password("encrypted", dm_passwd)
dr_deploy_params["dm_pwd"] = encrypted_pwd
write_json_config(DR_DEPLOY_CONFIG, dr_deploy_params)
- os.chmod(os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"), mode=0o644)
+ os.chmod(
+ os.path.join(CURRENT_PATH, "../../../config/dr_deploy_param.json"),
+ mode=0o644,
+ )
if not current_real_path.startswith(target_path):
try:
shutil.copy(DR_DEPLOY_CONFIG, "/opt/cantian/config")
@@ -149,7 +158,9 @@ class UpdateDRParams(object):
dr_deploy_opt = DRDeployCommon(storage_operate)
LOG.info(f"begin to check hyper metro domain[{hyper_domain_id}]")
dr_deploy_opt.query_hyper_metro_domain_info(hyper_domain_id)
- LOG.info(f"begin to check hyper metro vstore pair[{hyper_metro_vstore_pair_id}]")
+ LOG.info(
+ f"begin to check hyper metro vstore pair[{hyper_metro_vstore_pair_id}]"
+ )
dr_deploy_opt.query_hyper_metro_vstore_pair_info(hyper_metro_vstore_pair_id)
LOG.info(f"begin to check hyper metro filesystem pair[{ulog_fs_pair_id}]")
dr_deploy_opt.query_hyper_metro_filesystem_pair_info_by_pair_id(ulog_fs_pair_id)
@@ -158,4 +169,3 @@ class UpdateDRParams(object):
if not self.mysql_metadata_in_cantian:
LOG.info(f"begin to check remote replication pair[{meta_fs_pair_id}]")
dr_deploy_opt.query_remote_replication_pair_info_by_pair_id(meta_fs_pair_id)
-
diff --git a/pkg/deploy/action/storage_operate/dr_operate_interface.py b/pkg/deploy/action/storage_operate/dr_operate_interface.py
index 80cfb3562aa627c1ce6e7f07a6b4545814f3423b..1582e3528fc185b17d05b1b3834f26bd18fcf17f 100644
--- a/pkg/deploy/action/storage_operate/dr_operate_interface.py
+++ b/pkg/deploy/action/storage_operate/dr_operate_interface.py
@@ -3,23 +3,32 @@
import traceback
import sys
from om_log import LOGGER as LOG
-from storage_operate.dr_deploy_operate.dr_deploy_pre_check import DRDeployPreCheck, ParamCheck
+from storage_operate.dr_deploy_operate.dr_deploy_pre_check import (
+ DRDeployPreCheck,
+ ParamCheck,
+)
from storage_operate.dr_deploy_operate.dr_deploy import DRDeploy
from storage_operate.dr_deploy_operate.dr_undeploy import UNDeploy
from storage_operate.dr_deploy_operate.dr_deploy_progress_query import ProgressQuery
-from storage_operate.dr_deploy_operate.dr_deploy_switchover import SwitchOver, DRRecover, FailOver
+from storage_operate.dr_deploy_operate.dr_deploy_switchover import (
+ SwitchOver,
+ DRRecover,
+ FailOver,
+)
from storage_operate.dr_deploy_operate.update_dr_params import UpdateDRParams
-HELP_MSG = ("example:\n"
- " sh appctl.sh dr_operate pre_check active/standby --conf=config_file_path\n"
- " sh appctl.sh dr_operate deploy standby/active -"
- "-mysql_cmd='/usr/local/mysql/bin/mysql' --mysql_user=myuser\n"
- " sh appctl.sh dr_operate progress_query --action=deploy --display=table/json\n"
- " sh appctl.sh dr_operate undeploy active/standby\n"
- " sh appctl.sh dr_operate switch_over\n"
- " sh appctl.sh dr_operate recover [full_sync]\n"
- " sh appctl.sh dr_operate fail_over\n"
- " sh appctl.sh dr_operate update_conf\n")
+HELP_MSG = (
+ "example:\n"
+ " sh appctl.sh dr_operate pre_check active/standby --conf=config_file_path\n"
+ " sh appctl.sh dr_operate deploy standby/active -"
+ "-mysql_cmd='/usr/local/mysql/bin/mysql' --mysql_user=myuser\n"
+ " sh appctl.sh dr_operate progress_query --action=deploy --display=table/json\n"
+ " sh appctl.sh dr_operate undeploy active/standby\n"
+ " sh appctl.sh dr_operate switch_over\n"
+ " sh appctl.sh dr_operate recover [full_sync]\n"
+ " sh appctl.sh dr_operate fail_over\n"
+ " sh appctl.sh dr_operate update_conf\n"
+)
class DRDeployOperate(object):
@@ -87,8 +96,9 @@ class DRDeployOperate(object):
def main():
- err_msg = "Failed to parse the DR setup command, deploy operate commands " \
- + HELP_MSG
+ err_msg = (
+ "Failed to parse the DR setup command, deploy operate commands " + HELP_MSG
+ )
dr_deploy_operate = DRDeployOperate()
if len(sys.argv) <= 1:
raise Exception(err_msg)
diff --git a/pkg/deploy/action/storage_operate/migrate_file_system.py b/pkg/deploy/action/storage_operate/migrate_file_system.py
index 658e07174d6eb763812bf203e24abfc80a91b901..e905be3e35c5171e76356ba1d63d5bcda94863c8 100644
--- a/pkg/deploy/action/storage_operate/migrate_file_system.py
+++ b/pkg/deploy/action/storage_operate/migrate_file_system.py
@@ -41,7 +41,9 @@ class MigrateFileSystem(StorageInf):
"""
kerberos_type = self.new_config_info.get("kerberos_key")
params = f" -o sec={kerberos_type},timeo=50,nosuid,nodev "
- self.mount_file_system(file_system_name, logic_ip, prefix="share", params=params)
+ self.mount_file_system(
+ file_system_name, logic_ip, prefix="share", params=params
+ )
def create_share_file_system_nfs_share(self, share_fs_id, share_path, vstore_id):
"""
@@ -54,7 +56,7 @@ class MigrateFileSystem(StorageInf):
data = {
"SHAREPATH": f"/{share_path}/",
"vstoreId": vstore_id,
- "FSID": share_fs_id
+ "FSID": share_fs_id,
}
return self.create_nfs_share(data)
@@ -65,7 +67,7 @@ class MigrateFileSystem(StorageInf):
"ROOTSQUASH": 1,
"PARENTID": parent_id,
"vstoreId": vstore_id,
- "NAME": client_name
+ "NAME": client_name,
}
self.add_nfs_client(data)
@@ -76,6 +78,7 @@ class MigrateFileSystem(StorageInf):
:param vstore_id: 租户ID
:return:
"""
+
def _exec_cmd():
_cmd = f"change vstore view id={vstore_id}"
res = self.ssh_client.execute_cmd(_cmd, expect=":/>", timeout=10)
@@ -95,6 +98,7 @@ class MigrateFileSystem(StorageInf):
err_msg = "Execute cmd[%s], details:%s" % (_cmd, res)
LOGGER.error(err_msg)
raise Exception(err_msg)
+
self.ssh_client.create_client()
try:
_exec_cmd()
@@ -112,17 +116,24 @@ class MigrateFileSystem(StorageInf):
clone_file_system_name = self.new_config_info.get("storage_share_fs")
clone_file_system_share_logic_ip = self.new_config_info.get("share_logic_ip")
clone_share_file_system_vstore_id = self.new_config_info.get("vstore_id")
- logic_port_info = self.query_logical_port_info(clone_file_system_share_logic_ip,
- vstore_id=clone_share_file_system_vstore_id)
+ logic_port_info = self.query_logical_port_info(
+ clone_file_system_share_logic_ip,
+ vstore_id=clone_share_file_system_vstore_id,
+ )
if logic_port_info is None:
- err_msg = "Logic port info[%s] is not exist" % clone_file_system_share_logic_ip
+ err_msg = (
+ "Logic port info[%s] is not exist" % clone_file_system_share_logic_ip
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
- clone_file_system_info = self.query_filesystem_info(clone_file_system_name,
- vstore_id=clone_share_file_system_vstore_id)
+ clone_file_system_info = self.query_filesystem_info(
+ clone_file_system_name, vstore_id=clone_share_file_system_vstore_id
+ )
if clone_file_system_info:
- err_msg = "Clone share file system[%s] is exist, details: %s " % (clone_file_system_name,
- clone_file_system_info)
+ err_msg = "Clone share file system[%s] is exist, details: %s " % (
+ clone_file_system_name,
+ clone_file_system_info,
+ )
LOGGER.error(err_msg)
raise Exception(err_msg)
LOGGER.info("Success to check migrate share fs info")
@@ -148,41 +159,59 @@ class MigrateFileSystem(StorageInf):
file_system_info = self.query_filesystem_info(file_system_name, vstore_id=0)
file_system_id = file_system_info.get("ID")
- clone_file_system_info = self.query_filesystem_info(file_system_name,
- vstore_id=clone_file_system_vstore_id)
+ clone_file_system_info = self.query_filesystem_info(
+ file_system_name, vstore_id=clone_file_system_vstore_id
+ )
if not clone_file_system_info:
- clone_file_system_info = self.create_clone_file_system(file_system_id,
- file_system_name,
- clone_file_system_vstore_id)
+ clone_file_system_info = self.create_clone_file_system(
+ file_system_id, file_system_name, clone_file_system_vstore_id
+ )
clone_file_system_id = clone_file_system_info.get("ID")
split_status = clone_file_system_info.get("SPLITSTATUS")
split_enable = clone_file_system_info.get("SPLITENABLE")
if int(split_status) == 1 and split_enable == "false":
- self.split_clone_file_system(clone_file_system_id, action=1, vstore_id=clone_file_system_vstore_id)
- self.query_split_clone_file_system_process(clone_file_system_name, vstore_id=clone_file_system_vstore_id)
- clone_file_system_nfs_info = self.query_nfs_info(clone_file_system_id,
- vstore_id=clone_file_system_vstore_id)
+ self.split_clone_file_system(
+ clone_file_system_id,
+ action=1,
+ vstore_id=clone_file_system_vstore_id,
+ )
+ self.query_split_clone_file_system_process(
+ clone_file_system_name, vstore_id=clone_file_system_vstore_id
+ )
+ clone_file_system_nfs_info = self.query_nfs_info(
+ clone_file_system_id, vstore_id=clone_file_system_vstore_id
+ )
if clone_file_system_nfs_info:
clone_share_file_system_nfs_id = clone_file_system_nfs_info[0].get("ID")
- self.delete_nfs_share(clone_share_file_system_nfs_id, vstore_id=clone_file_system_vstore_id)
+ self.delete_nfs_share(
+ clone_share_file_system_nfs_id,
+ vstore_id=clone_file_system_vstore_id,
+ )
clone_file_system_nfs_share_id = self.create_share_file_system_nfs_share(
- clone_file_system_id, file_system_name, clone_file_system_vstore_id)
+ clone_file_system_id, file_system_name, clone_file_system_vstore_id
+ )
share_nfs_info = self.query_nfs_info(file_system_id, vstore_id=0)
if not share_nfs_info:
err_msg = "Failed to query fs[%s] nfs share info." % file_system_name
LOGGER.error(err_msg)
raise Exception(err_msg)
share_nfs_id = share_nfs_info[0].get("ID")
- share_nfs_client_config = self.query_nfs_share_auth_client(share_nfs_id, vstore_id=0)
+ share_nfs_client_config = self.query_nfs_share_auth_client(
+ share_nfs_id, vstore_id=0
+ )
share_nfs_client_name = share_nfs_client_config[0].get("NAME")
- self.add_share_file_system_nfs_client(clone_file_system_nfs_share_id,
- clone_file_system_vstore_id,
- share_nfs_client_name)
+ self.add_share_file_system_nfs_client(
+ clone_file_system_nfs_share_id,
+ clone_file_system_vstore_id,
+ share_nfs_client_name,
+ )
time.sleep(2)
self.umount_share_file_system(file_system_name)
clone_file_system_logic_ip = self.new_config_info.get(logic_ip_key)
time.sleep(2)
- self.mount_share_file_system(clone_file_system_name, clone_file_system_logic_ip)
+ self.mount_share_file_system(
+ clone_file_system_name, clone_file_system_logic_ip
+ )
self.config_mandatory_lock_switch(clone_file_system_vstore_id)
def rollback(self):
@@ -205,16 +234,22 @@ class MigrateFileSystem(StorageInf):
self.umount_share_file_system(clone_file_system_name)
time.sleep(2)
self.mount_share_file_system(file_system_name, file_system_logic_ip)
- clone_file_system_info = self.query_filesystem_info(clone_file_system_name,
- vstore_id=clone_share_file_system_vstore_id)
+ clone_file_system_info = self.query_filesystem_info(
+ clone_file_system_name, vstore_id=clone_share_file_system_vstore_id
+ )
if not clone_file_system_info:
return
clone_file_system_id = clone_file_system_info.get("ID")
- clone_file_system_nfs_share_info = self.query_nfs_info(clone_file_system_id,
- clone_share_file_system_vstore_id)
+ clone_file_system_nfs_share_info = self.query_nfs_info(
+ clone_file_system_id, clone_share_file_system_vstore_id
+ )
if clone_file_system_nfs_share_info:
- clone_file_system_nfs_share_id = clone_file_system_nfs_share_info[0].get("ID")
- self.delete_nfs_share(clone_file_system_nfs_share_id, clone_share_file_system_vstore_id)
+ clone_file_system_nfs_share_id = clone_file_system_nfs_share_info[
+ 0
+ ].get("ID")
+ self.delete_nfs_share(
+ clone_file_system_nfs_share_id, clone_share_file_system_vstore_id
+ )
self.delete_file_system(clone_file_system_id)
@@ -223,11 +258,11 @@ def check_version(old_config):
return True
config_dir = os.path.dirname(old_config)
versions_path = os.path.join(config_dir, "..", "versions.yml")
- with open(versions_path, 'r', encoding='utf-8') as file:
+ with open(versions_path, "r", encoding="utf-8") as file:
source_version_info = file.readlines()
- version = ''
+ version = ""
for line in source_version_info:
- if 'Version:' in line:
+ if "Version:" in line:
version = line.split()[-1]
return version.startswith("2.0.0")
@@ -244,7 +279,9 @@ def main():
ip_addr = input()
user_name = input()
passwd = input()
- migrate_file_system = MigrateFileSystem(ip_addr, user_name, passwd, new_config, old_config)
+ migrate_file_system = MigrateFileSystem(
+ ip_addr, user_name, passwd, new_config, old_config
+ )
migrate_file_system.login()
try:
getattr(migrate_file_system, action)()
diff --git a/pkg/deploy/action/storage_operate/split_dbstore_fs.py b/pkg/deploy/action/storage_operate/split_dbstore_fs.py
index 1a32c4435bcc5d6bbd5f43fa8604b779ef9ebf6c..130839a2fcfdef811502cddb884c434f88b70230 100644
--- a/pkg/deploy/action/storage_operate/split_dbstore_fs.py
+++ b/pkg/deploy/action/storage_operate/split_dbstore_fs.py
@@ -89,7 +89,9 @@ class StorageFileSystemSplit(StorageInf):
2、将ulog_root_dir内部目录上移一层,然后删除ulog_root_di
:return:
"""
- log_namespace_path = f"/mnt/dbdata/remote/{self.storage_dbstor_fs}/{self.namespace}"
+ log_namespace_path = (
+ f"/mnt/dbdata/remote/{self.storage_dbstor_fs}/{self.namespace}"
+ )
page_pool_root_dir = os.path.join(log_namespace_path, "page_pool_root_dir")
ulog_root_dir = os.path.join(log_namespace_path, "ulog_root_dir")
if os.path.exists(page_pool_root_dir):
@@ -107,7 +109,9 @@ class StorageFileSystemSplit(StorageInf):
3、将page_pool_root_dir内的目录上移后,删除page_pool_root_dir
:return:
"""
- page_namespace_path = f"/mnt/dbdata/remote/{self.storage_dbstor_page_fs}/{self.namespace}"
+ page_namespace_path = (
+ f"/mnt/dbdata/remote/{self.storage_dbstor_page_fs}/{self.namespace}"
+ )
namespace_file = os.path.join(page_namespace_path, self.namespace)
page_pool_root_dir = os.path.join(page_namespace_path, "page_pool_root_dir")
ulog_root_dir = os.path.join(page_namespace_path, "ulog_root_dir")
@@ -126,7 +130,7 @@ class StorageFileSystemSplit(StorageInf):
"ROOTSQUASH": 1,
"PARENTID": share_id,
"vstoreId": self.vstore_id,
- "NAME": "*"
+ "NAME": "*",
}
self.add_nfs_client(data)
@@ -144,7 +148,7 @@ class StorageFileSystemSplit(StorageInf):
data = {
"SHAREPATH": f"/{share_path}/",
"vstoreId": self.vstore_id,
- "FSID": clone_fs_id
+ "FSID": clone_fs_id,
}
share_id = self.create_nfs_share(data)
return share_id
@@ -156,8 +160,11 @@ class StorageFileSystemSplit(StorageInf):
:param clone_fs_id:克隆文件系统ID
:return:
"""
- LOGGER.info("Begin to clear dbstor nfs share.fs_name:[%s], clone_fs_name:[%s]", self.storage_dbstor_fs,
- self.storage_dbstor_page_fs)
+ LOGGER.info(
+ "Begin to clear dbstor nfs share.fs_name:[%s], clone_fs_name:[%s]",
+ self.storage_dbstor_fs,
+ self.storage_dbstor_page_fs,
+ )
for _id in [fs_id, clone_fs_id]:
share_info = self.query_nfs_info(_id, vstore_id=self.vstore_id)
if share_info:
@@ -167,7 +174,9 @@ class StorageFileSystemSplit(StorageInf):
def pre_upgrade(self):
LOGGER.info("Begin to check dbstor page fs info")
- page_file_system_info = self.query_filesystem_info(self.storage_dbstor_page_fs, vstore_id=self.vstore_id)
+ page_file_system_info = self.query_filesystem_info(
+ self.storage_dbstor_page_fs, vstore_id=self.vstore_id
+ )
if page_file_system_info:
err_msg = "File system [%s] is exist." % self.storage_dbstor_page_fs
LOGGER.error(err_msg)
@@ -190,12 +199,18 @@ class StorageFileSystemSplit(StorageInf):
10、删除共享
:return:
"""
- fs_info = self.query_filesystem_info(self.storage_dbstor_fs, vstore_id=self.vstore_id)
+ fs_info = self.query_filesystem_info(
+ self.storage_dbstor_fs, vstore_id=self.vstore_id
+ )
fs_id = fs_info.get("ID")
- clone_fs_info = self.query_filesystem_info(self.storage_dbstor_page_fs, vstore_id=self.vstore_id)
+ clone_fs_info = self.query_filesystem_info(
+ self.storage_dbstor_page_fs, vstore_id=self.vstore_id
+ )
if not clone_fs_info:
- clone_fs_info = self.create_clone_file_system(fs_id, self.storage_dbstor_page_fs, vstore_id=self.vstore_id)
+ clone_fs_info = self.create_clone_file_system(
+ fs_id, self.storage_dbstor_page_fs, vstore_id=self.vstore_id
+ )
clone_fs_id = clone_fs_info.get("ID")
split_status = clone_fs_info.get("SPLITSTATUS")
@@ -204,10 +219,14 @@ class StorageFileSystemSplit(StorageInf):
if int(split_status) == 1 and split_enable == "false":
self.split_clone_file_system(clone_fs_id)
- self.query_split_clone_file_system_process(self.storage_dbstor_page_fs, vstore_id=self.vstore_id)
+ self.query_split_clone_file_system_process(
+ self.storage_dbstor_page_fs, vstore_id=self.vstore_id
+ )
- for _fs_id, _fs_name in [(fs_id, self.storage_dbstor_fs),
- (clone_fs_id, self.storage_dbstor_page_fs)]:
+ for _fs_id, _fs_name in [
+ (fs_id, self.storage_dbstor_fs),
+ (clone_fs_id, self.storage_dbstor_page_fs),
+ ]:
_share_id = self.create_clone_share(_fs_id, _fs_name)
self.add_clone_share_client(_share_id)
@@ -216,7 +235,9 @@ class StorageFileSystemSplit(StorageInf):
self.clear_dbstor_nfs_share(_fs_id, clone_fs_id)
def rollback(self):
- page_fs_info = self.query_filesystem_info(self.storage_dbstor_page_fs, vstore_id=self.vstore_id)
+ page_fs_info = self.query_filesystem_info(
+ self.storage_dbstor_page_fs, vstore_id=self.vstore_id
+ )
if not page_fs_info:
return
file_system_id = page_fs_info.get("ID")
@@ -231,10 +252,16 @@ class StorageFileSystemSplit(StorageInf):
cantian_user = i.split("=")[1] if "cantian_user" in i else cantian_user
cantian_group = i.split("=")[1] if "cantian_group" in i else cantian_group
dbstor_fs_path = f"/mnt/dbdata/remote/{self.storage_dbstor_fs}/{self.namespace}"
- dbstor_page_fs_path = f"/mnt/dbdata/remote/{self.storage_dbstor_page_fs}/{self.namespace}"
- LOGGER.info("Start change owner of %s and %s", dbstor_fs_path, dbstor_page_fs_path)
- cmd = f"chown -hR {cantian_user}:{cantian_group} {dbstor_fs_path} &&" \
- f" chown -hR {cantian_user}:{cantian_group} {dbstor_page_fs_path}"
+ dbstor_page_fs_path = (
+ f"/mnt/dbdata/remote/{self.storage_dbstor_page_fs}/{self.namespace}"
+ )
+ LOGGER.info(
+ "Start change owner of %s and %s", dbstor_fs_path, dbstor_page_fs_path
+ )
+ cmd = (
+ f"chown -hR {cantian_user}:{cantian_group} {dbstor_fs_path} &&"
+ f" chown -hR {cantian_user}:{cantian_group} {dbstor_page_fs_path}"
+ )
return_code, _, stderr = exec_popen(cmd)
if return_code:
err_msg = f"Failed chown {dbstor_fs_path} {dbstor_page_fs_path}, details:{stderr}"
@@ -246,7 +273,7 @@ def check_version():
try:
version_check.read_source_version_info()
except Exception as _err:
- err_msg = 'obtain source version failed with error: %s', str(_err)
+ err_msg = "obtain source version failed with error: %s", str(_err)
raise Exception(err_msg) from _err
return version_check.source_version.startswith("2.0.0")
diff --git a/pkg/deploy/action/update_config.py b/pkg/deploy/action/update_config.py
index 4672d1c375be7181a4cf9c6c965be1d34f939e4a..be0ae1ff339cddbc14c7c372315ea1961b9468bc 100644
--- a/pkg/deploy/action/update_config.py
+++ b/pkg/deploy/action/update_config.py
@@ -27,8 +27,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
pobj.stdin.write(cmd.encode())
pobj.stdin.write(os.linesep.encode())
for value in values:
@@ -53,15 +58,17 @@ def get_ctencrypt_passwd(passwd):
file_path = "/opt/cantian/action/cantian/install_config.json"
flags = os.O_RDONLY
modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(file_path, flags, modes), 'r') as fp:
+ with os.fdopen(os.open(file_path, flags, modes), "r") as fp:
json_data = json.load(fp)
- install_path = json_data['R_INSTALL_PATH'].strip()
+ install_path = json_data["R_INSTALL_PATH"].strip()
cmd = "source ~/.bashrc && %s/bin/ctencrypt -e PBKDF2" % install_path
values = [passwd, passwd]
ret_code, stdout, stderr = _exec_popen(cmd, values)
if ret_code:
- raise OSError("Failed to encrypt password of user [sys]."
- " Error: %s" % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to encrypt password of user [sys]."
+ " Error: %s" % (stderr + os.linesep + stderr)
+ )
# Example of output:
# Please enter password to encrypt:
# *********
@@ -127,7 +134,7 @@ def update_dbstor_conf(action, key, value=None):
"/opt/cantian/cms/dbstor/conf/dbs/dbstor_config_tool_1.ini",
"/opt/cantian/cms/dbstor/conf/dbs/dbstor_config_tool_2.ini",
"/opt/cantian/cms/dbstor/conf/dbs/dbstor_config_tool_3.ini",
- "/opt/cantian/cms/dbstor/conf/dbs/dbstor_config.ini"
+ "/opt/cantian/cms/dbstor/conf/dbs/dbstor_config.ini",
]
opt_dbstor_config = "/opt/cantian/dbstor/tools/dbstor_config.ini"
file_list.append(opt_dbstor_config)
@@ -190,42 +197,56 @@ def update_cms_ini_conf(action, key, value):
def update_ctsql_config(action, key, value):
ctsql_passwd = input()
encrypt_passwd = get_ctencrypt_passwd(ctsql_passwd)
- update_cmd = f'source ~/.bashrc && echo -e {ctsql_passwd} | ctsql sys@127.0.0.1:1611 -q -c ' \
- f'"alter system set _sys_password=\'{encrypt_passwd}\'"'
+ update_cmd = (
+ f"source ~/.bashrc && echo -e {ctsql_passwd} | ctsql sys@127.0.0.1:1611 -q -c "
+ f"\"alter system set _sys_password='{encrypt_passwd}'\""
+ )
ret_code, stdout, stderr = _exec_popen(update_cmd)
stderr = str(stderr)
stderr.replace(ctsql_passwd, "****")
if ret_code:
- raise OSError("Failed to encrypt password of user [sys]."
- " Error: %s" % (stderr + os.linesep + stderr))
+ raise OSError(
+ "Failed to encrypt password of user [sys]."
+ " Error: %s" % (stderr + os.linesep + stderr)
+ )
if "Succeed" not in stdout:
raise Exception("Update ctsql _sys_passwd failed")
def update_ctsql_passwd(action, key, value):
def _check_passwd():
- check_cmd = f'source ~/.bashrc && echo -e {ctsql_passwd} | ctsql sys@127.0.0.1:1611 -q -c ' \
- '"select version();"'
+ check_cmd = (
+ f"source ~/.bashrc && echo -e {ctsql_passwd} | ctsql sys@127.0.0.1:1611 -q -c "
+ '"select version();"'
+ )
ret_code, stdout, stderr = _exec_popen(check_cmd)
stderr = str(stderr)
stderr.replace(ctsql_passwd, "*****")
if ret_code:
- raise Exception("Check passwd failed, please ensure that the password is entered correctly.")
+ raise Exception(
+ "Check passwd failed, please ensure that the password is entered correctly."
+ )
+
file_path = "/opt/cantian/action/cantian/install_config.json"
- with open(file_path, 'r') as fp:
+ with open(file_path, "r") as fp:
json_data = json.load(fp)
- data_path = json_data['D_DATA_PATH'].strip()
+ data_path = json_data["D_DATA_PATH"].strip()
sys.path.append(os.path.join(CUR_PATH, "dbstor"))
from kmc_adapter import CApiWrapper
+
primary_keystore = "/opt/cantian/common/config/primary_keystore_bak.ks"
standby_keystore = "/opt/cantian/common/config/standby_keystore_bak.ks"
kmc_adapter = CApiWrapper(primary_keystore, standby_keystore)
kmc_adapter.initialize()
ctsql_passwd = getpass.getpass("please input new passwd:").strip()
ctencrypt_passwd = kmc_adapter.encrypt(ctsql_passwd.strip())
- split_env = os.environ['LD_LIBRARY_PATH'].split(":")
- filtered_env = [single_env for single_env in split_env if "/opt/cantian/dbstor/lib" not in single_env]
- os.environ['LD_LIBRARY_PATH'] = ":".join(filtered_env)
+ split_env = os.environ["LD_LIBRARY_PATH"].split(":")
+ filtered_env = [
+ single_env
+ for single_env in split_env
+ if "/opt/cantian/dbstor/lib" not in single_env
+ ]
+ os.environ["LD_LIBRARY_PATH"] = ":".join(filtered_env)
_check_passwd()
_conf_files = os.path.join(data_path, "cfg", "*sql.ini")
conf_file = glob.glob(_conf_files)[0]
@@ -242,11 +263,24 @@ def update_ctsql_passwd(action, key, value):
def main():
update_parse = argparse.ArgumentParser()
- update_parse.add_argument("-c", "--component", dest="component",
- choices=["dbstor", "cms", "cantian", "cantian_ini", "cms_ini", "ctsql", "ctsql_pwd"],
- required=True)
- update_parse.add_argument("-a", "--action", dest="action", choices=["del", "add", "update"],
- required=True)
+ update_parse.add_argument(
+ "-c",
+ "--component",
+ dest="component",
+ choices=[
+ "dbstor",
+ "cms",
+ "cantian",
+ "cantian_ini",
+ "cms_ini",
+ "ctsql",
+ "ctsql_pwd",
+ ],
+ required=True,
+ )
+ update_parse.add_argument(
+ "-a", "--action", dest="action", choices=["del", "add", "update"], required=True
+ )
update_parse.add_argument("-k", "--key", dest="key", required=True)
update_parse.add_argument("-v", "--value", dest="value", required=False)
args = update_parse.parse_args()
diff --git a/pkg/deploy/action/upgrade_version_check.py b/pkg/deploy/action/upgrade_version_check.py
index 916a03a1b154d6d9597de0b38b6d5dd875bc3661..488ecb46a94733656494cb6804aba3a53a5285c1 100644
--- a/pkg/deploy/action/upgrade_version_check.py
+++ b/pkg/deploy/action/upgrade_version_check.py
@@ -6,20 +6,22 @@ from string import digits
from om_log import LOGGER as LOG
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
-VERSION_PREFIX = 'Version:'
-SUB_VERSION_PREFIX = ('B', 'SP')
+VERSION_PREFIX = "Version:"
+SUB_VERSION_PREFIX = ("B", "SP")
class UpgradeVersionCheck:
def __init__(self, white_list=None):
self.white_list_file = white_list
- self.source_version_file = str(Path('/opt/cantian/versions.yml'))
- self.white_list_dict = {} # 格式:{SOURCE-VERSION: [UPGRADE-MODE, CHANGE_SYSTEM]}
- self.source_version = ''
+ self.source_version_file = str(Path("/opt/cantian/versions.yml"))
+ self.white_list_dict = (
+ {}
+ ) # 格式:{SOURCE-VERSION: [UPGRADE-MODE, CHANGE_SYSTEM]}
+ self.source_version = ""
def process_white_list(self):
- with open(self.white_list_file, 'r', encoding='utf-8') as file:
+ with open(self.white_list_file, "r", encoding="utf-8") as file:
white_list_info = file.readlines()
for white_list_detail in white_list_info[1:]:
@@ -27,8 +29,8 @@ class UpgradeVersionCheck:
self.white_list_dict[details[0]] = [details[1], details[2]]
def read_source_version_info(self):
- version = ''
- with open(self.source_version_file, 'r', encoding='utf-8') as file:
+ version = ""
+ with open(self.source_version_file, "r", encoding="utf-8") as file:
source_version_info = file.readlines()
for line in source_version_info:
@@ -40,51 +42,61 @@ class UpgradeVersionCheck:
def source_version_check(self):
result = ""
for white_list_version, white_list_detail in self.white_list_dict.items():
- *white_main_version, white_sub_version = white_list_version.split('.')
- *source_main_version, source_sub_version = self.source_version.split('.')
+ *white_main_version, white_sub_version = white_list_version.split(".")
+ *source_main_version, source_sub_version = self.source_version.split(".")
if source_main_version != white_main_version:
continue
- if white_sub_version == '*' or white_sub_version == source_sub_version:
- result = "{} {} {}".format(self.source_version, white_list_detail[0], white_list_detail[1])
+ if white_sub_version == "*" or white_sub_version == source_sub_version:
+ result = "{} {} {}".format(
+ self.source_version, white_list_detail[0], white_list_detail[1]
+ )
break
- if '-' in white_sub_version:
- min_version, max_version = white_sub_version.split('-')
- trans_map = str.maketrans('', '', digits)
+ if "-" in white_sub_version:
+ min_version, max_version = white_sub_version.split("-")
+ trans_map = str.maketrans("", "", digits)
source_pre_fix = source_sub_version.translate(trans_map)
- if source_pre_fix not in SUB_VERSION_PREFIX: # 源版本号开头不是B或者SPH返回结果为空
+ if (
+ source_pre_fix not in SUB_VERSION_PREFIX
+ ): # 源版本号开头不是B或者SPH返回结果为空
break
- sub_version_min_num = min_version.replace(source_pre_fix, '')
- sub_version_max_num = max_version.replace(source_pre_fix, '')
- sub_source_version_num = source_sub_version.replace(source_pre_fix, '')
- if sub_version_min_num.isdigit() \
- and sub_version_max_num.isdigit() \
- and int(sub_version_max_num) >= int(sub_source_version_num) >= int(sub_version_min_num):
- result = "{} {} {}".format(self.source_version, white_list_detail[0], white_list_detail[1])
+ sub_version_min_num = min_version.replace(source_pre_fix, "")
+ sub_version_max_num = max_version.replace(source_pre_fix, "")
+ sub_source_version_num = source_sub_version.replace(source_pre_fix, "")
+ if (
+ sub_version_min_num.isdigit()
+ and sub_version_max_num.isdigit()
+ and int(sub_version_max_num)
+ >= int(sub_source_version_num)
+ >= int(sub_version_min_num)
+ ):
+ result = "{} {} {}".format(
+ self.source_version, white_list_detail[0], white_list_detail[1]
+ )
break
return result
-if __name__ == '__main__':
+if __name__ == "__main__":
white_list_input = sys.argv[1]
version_check = UpgradeVersionCheck(white_list_input)
try:
version_check.process_white_list()
except Exception as err:
- LOG.error(f'obtain source version white list failed with error: {str(err)}')
- exit('')
+ LOG.error(f"obtain source version white list failed with error: {str(err)}")
+ exit("")
try:
version_check.read_source_version_info()
except Exception as err:
- LOG.error(f'obtain source version failed with error: {str(err)}')
- exit('')
+ LOG.error(f"obtain source version failed with error: {str(err)}")
+ exit("")
try:
print(version_check.source_version_check())
except Exception as err:
- LOG.error(f'source version check failed with error: {str(err)}')
- exit('')
+ LOG.error(f"source version check failed with error: {str(err)}")
+ exit("")
diff --git a/pkg/deploy/action/utils/client/response_parse.py b/pkg/deploy/action/utils/client/response_parse.py
index f1871a38de35b3574cabe88b8ca17fac6ba5cdbc..535a07ae5fb5fc53712839c141ed3a8e0f7b9829 100644
--- a/pkg/deploy/action/utils/client/response_parse.py
+++ b/pkg/deploy/action/utils/client/response_parse.py
@@ -1,5 +1,6 @@
# coding=utf-8
+
class ResponseParse(object):
def __init__(self, res):
"""
@@ -15,11 +16,11 @@ class ResponseParse(object):
if status_code == 200:
res = self.res.json()
if "error" in res:
- ret_result = res.get('error')
+ ret_result = res.get("error")
else:
- ret_result = res.get('result')
- error_code = ret_result['code']
- error_des = ret_result['description']
+ ret_result = res.get("result")
+ error_code = ret_result["code"]
+ error_des = ret_result["description"]
if error_des is None or error_code == 0:
error_des = "success"
return status_code, int(error_code), error_des
@@ -32,10 +33,10 @@ class ResponseParse(object):
if status_code == 200:
rsp_code = 0
if "error" in self.res.json():
- ret_result = self.res.json().get('error')
+ ret_result = self.res.json().get("error")
if "result" in self.res.json():
- ret_result = self.res.json().get('result')
- ret_data = self.res.json().get('data')
+ ret_result = self.res.json().get("result")
+ ret_data = self.res.json().get("data")
return rsp_code, ret_result, ret_data
def get_omtask_rsp_data(self):
@@ -46,8 +47,8 @@ class ResponseParse(object):
if status_code == 200:
rsp_code = 0
if "error" in self.res.json():
- ret_result = self.res.json().get('error')
+ ret_result = self.res.json().get("error")
if "result" in self.res.json():
- ret_result = self.res.json().get('result')
- ret_data = self.res.json().get('data')
+ ret_result = self.res.json().get("result")
+ ret_data = self.res.json().get("data")
return rsp_code, ret_result, ret_data
diff --git a/pkg/deploy/action/utils/client/rest_client.py b/pkg/deploy/action/utils/client/rest_client.py
index 76ff37d31d4fa93692c8dc7176c66a1c5ed95fb3..fae060849eb0f8faf105eaa660e84352c0ddf430 100644
--- a/pkg/deploy/action/utils/client/rest_client.py
+++ b/pkg/deploy/action/utils/client/rest_client.py
@@ -21,7 +21,11 @@ NORMAL_STATE, ABNORMAL_STATE = 0, 1
def get_cur_timestamp():
utc_now = datetime.utcnow()
- return utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None).strftime('%Y%m%d%H%M%S')
+ return (
+ utc_now.replace(tzinfo=timezone.utc)
+ .astimezone(tz=None)
+ .strftime("%Y%m%d%H%M%S")
+ )
class ExecutionError(Exception):
@@ -41,67 +45,81 @@ class RestClient:
def gen_timestamp():
utc_now = datetime.utcnow()
cur_time = utc_now.replace(tzinfo=timezone.utc).astimezone(tz=None)
- return str(cur_time.strftime('%Y%m%d%H%M%S'))
+ return str(cur_time.strftime("%Y%m%d%H%M%S"))
@staticmethod
def exception_handler(err_msg=None, cur_mode=None):
- err_info = '[current_mode] {}, [err_info] {}'.format(cur_mode, err_msg)
+ err_info = "[current_mode] {}, [err_info] {}".format(cur_mode, err_msg)
LOG.error(err_info)
raise ExecutionError(err_info)
@staticmethod
def response_parse(res_data):
status_code = res_data.status_code
- err_code, err_details = -1, 'failed'
+ err_code, err_details = -1, "failed"
if status_code == 200:
exec_res = res_data.json()
- err_code, err_details = \
- exec_res.get('error').get('code'), exec_res.get('error').get('description')
+ err_code, err_details = exec_res.get("error").get("code"), exec_res.get(
+ "error"
+ ).get("description")
return status_code, int(err_code), err_details
def update_cookies(self, res):
- res_body, set_cookie = res.json().get('data'), res.headers.get('Set-Cookie')
+ res_body, set_cookie = res.json().get("data"), res.headers.get("Set-Cookie")
- self.token, self.device_id = res_body.get('iBaseToken'), res_body.get('deviceid')
+ self.token, self.device_id = res_body.get("iBaseToken"), res_body.get(
+ "deviceid"
+ )
- match_res = re.findall(r'session=ismsession=\w+;', set_cookie)
+ match_res = re.findall(r"session=ismsession=\w+;", set_cookie)
if match_res:
self.ism_session = match_res[0][:-1]
- def make_header(self, content_type='application/json'):
- header = {'Content-type': content_type}
+ def make_header(self, content_type="application/json"):
+ header = {"Content-type": content_type}
if self.token:
- header['iBaseToken'] = self.token
+ header["iBaseToken"] = self.token
if self.ism_session:
- header['Cookie'] = self.ism_session
+ header["Cookie"] = self.ism_session
return header
def login(self, keep_session=False):
- url = '{}{}:{}{}'.format(RestElemConstant.HTTPS, self.ip_addr, RestElemConstant.PORT, RestElemConstant.LOGIN)
+ url = "{}{}:{}{}".format(
+ RestElemConstant.HTTPS,
+ self.ip_addr,
+ RestElemConstant.PORT,
+ RestElemConstant.LOGIN,
+ )
user_info = {
- 'username': self.user_name,
- 'password': self.passwd,
- 'scope': 0,
- 'loginMode': 3,
- 'timeConversion': 0,
- 'isEncrypt': 'false'
+ "username": self.user_name,
+ "password": self.passwd,
+ "scope": 0,
+ "loginMode": 3,
+ "timeConversion": 0,
+ "isEncrypt": "false",
}
login_header = {
- 'Content-type': 'application/json',
- 'Cookie': '__LANGUAGE_KEY__=zh-CN; __IBASE_LANGUAGE_KEY__=zh-CN'
+ "Content-type": "application/json",
+ "Cookie": "__LANGUAGE_KEY__=zh-CN; __IBASE_LANGUAGE_KEY__=zh-CN",
}
requests.packages.urllib3.disable_warnings()
with requests.session() as session:
- res = session.post(url, data=json.dumps(user_info), headers=login_header, verify=False)
+ res = session.post(
+ url, data=json.dumps(user_info), headers=login_header, verify=False
+ )
status_code, err_code, err_details = self.response_parse(res)
if err_code:
- err_msg = ('Login DM failed {}, status_code: {}, err_code: {}, '
- 'err_details: {}'.format(self.ip_addr, status_code, err_code, err_details))
+ err_msg = (
+ "Login DM failed {}, status_code: {}, err_code: {}, "
+ "err_details: {}".format(
+ self.ip_addr, status_code, err_code, err_details
+ )
+ )
raise Exception(err_msg)
self.update_cookies(res)
@@ -112,11 +130,11 @@ class RestClient:
else:
res.close()
- return NORMAL_STATE, 'success'
+ return NORMAL_STATE, "success"
def logout(self):
url = RestElemConstant.LOGOUT.format(deviceId=self.device_id)
- res = self.normal_request(url, 'delete')
+ res = self.normal_request(url, "delete")
result = ResponseParse(res)
status_code, error_code, error_des = result.get_res_code()
if status_code != 200 or error_code != 0:
@@ -137,27 +155,35 @@ class RestClient:
url = Constant.HTTPS + self.ip_addr + ":" + Constant.PORT + url
if keep_session:
req = self.session
- self.token = self.res_login.get('data').get('ibasetoken')
+ self.token = self.res_login.get("data").get("ibasetoken")
else:
req = requests.session()
headers = self.make_header()
with req as session:
- if method == 'put':
- res = session.put(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'post':
- res = session.post(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'get':
- res = session.get(url, data=data, headers=headers, verify=False, timeout=timeout)
- elif method == 'delete':
- res = session.delete(url, data=data, headers=headers, verify=False, timeout=timeout)
+ if method == "put":
+ res = session.put(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "post":
+ res = session.post(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "get":
+ res = session.get(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
+ elif method == "delete":
+ res = session.delete(
+ url, data=data, headers=headers, verify=False, timeout=timeout
+ )
res.close()
return res
def read_helper(file_path):
- with open(file_path, 'r', encoding='utf-8') as f_handler:
+ with open(file_path, "r", encoding="utf-8") as f_handler:
deploy_data = f_handler.read()
return deploy_data
@@ -166,8 +192,8 @@ def write_helper(file_path, data):
modes = stat.S_IWRITE | stat.S_IRUSR
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
if data:
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.write(json.dumps(data, indent=4))
else:
- with os.fdopen(os.open(file_path, flags, modes), 'w', encoding='utf-8') as file:
+ with os.fdopen(os.open(file_path, flags, modes), "w", encoding="utf-8") as file:
file.truncate()
diff --git a/pkg/deploy/action/utils/client/ssh_client.py b/pkg/deploy/action/utils/client/ssh_client.py
index 0eee8e3e6f175021119696044c832b640af56db0..78400c9da8a0c68f515d05b2afbbd037e48cf14b 100644
--- a/pkg/deploy/action/utils/client/ssh_client.py
+++ b/pkg/deploy/action/utils/client/ssh_client.py
@@ -19,12 +19,20 @@ def convert(code):
:param code:
:return:
"""
- code = code.decode('utf-8', errors='ignore') if isinstance(code, bytes) else code
+ code = code.decode("utf-8", errors="ignore") if isinstance(code, bytes) else code
return code
class SshClient(object):
- def __init__(self, ip, username, passwd=None, root_passwd=None, port=22, private_key_file=None):
+ def __init__(
+ self,
+ ip,
+ username,
+ passwd=None,
+ root_passwd=None,
+ port=22,
+ private_key_file=None,
+ ):
self.username = username
self.passwd = passwd
self.root_pwd = root_passwd
@@ -39,8 +47,11 @@ class SshClient(object):
if trans:
trans.close()
except Exception as ex:
- logger.error("failed to close connection after creating SSH client failed: {}."
- .format(ex))
+ logger.error(
+ "failed to close connection after creating SSH client failed: {}.".format(
+ ex
+ )
+ )
def create_client(self, timeout=180, width=300):
trans = None
@@ -62,21 +73,35 @@ class SshClient(object):
time.sleep(interval)
t += interval
if session.closed:
- output += convert(session.recv(SSH_RECV_BUFFER).decode('utf-8')).\
- replace(' \r', '\r').replace('\r', '')
+ output += (
+ convert(session.recv(SSH_RECV_BUFFER).decode("utf-8"))
+ .replace(" \r", "\r")
+ .replace("\r", "")
+ )
break
if not session.recv_ready():
continue
- output += convert(session.recv(SSH_RECV_BUFFER).decode('utf-8')).\
- replace(' \r', '\r').replace('\r', '')
+ output += (
+ convert(session.recv(SSH_RECV_BUFFER).decode("utf-8"))
+ .replace(" \r", "\r")
+ .replace("\r", "")
+ )
return output.split(cmd)[-1]
def close_client(self):
try:
- if self.ssh_client and isinstance(self.ssh_client, dict) and 'client' in self.ssh_client:
- self.ssh_client['client'].close()
- if self.ssh_client and isinstance(self.ssh_client, dict) and 'sshClient' in self.ssh_client:
- self.ssh_client['sshClient'].close()
+ if (
+ self.ssh_client
+ and isinstance(self.ssh_client, dict)
+ and "client" in self.ssh_client
+ ):
+ self.ssh_client["client"].close()
+ if (
+ self.ssh_client
+ and isinstance(self.ssh_client, dict)
+ and "sshClient" in self.ssh_client
+ ):
+ self.ssh_client["sshClient"].close()
except Exception as err:
err_msg = "Close ssh client err, details:%s" % str(err)
logger.error(err_msg)
@@ -89,7 +114,9 @@ class SshClient(object):
if self.passwd:
trans.connect(username=self.username, password=self.passwd)
else:
- private_key = paramiko.RSAKey.from_private_key_file(self.private_key_file)
+ private_key = paramiko.RSAKey.from_private_key_file(
+ self.private_key_file
+ )
trans.connect(username=self.username, pkey=private_key)
except Exception as err:
err_mgs = "Create ssh client failed, details: %s" % str(err)
@@ -100,15 +127,15 @@ class SshClient(object):
channel.settimeout(timeout)
channel.get_pty(width=width)
channel.invoke_shell()
- stdout = channel.makefile('r', -1)
- self.ssh_client['ip'] = self.ip
- self.ssh_client['port'] = self.port
- self.ssh_client['username'] = self.username
- self.ssh_client['timeout'] = timeout
- self.ssh_client['width'] = width
- self.ssh_client['client'] = trans
- self.ssh_client['channel'] = channel
- self.ssh_client['stdout'] = stdout
+ stdout = channel.makefile("r", -1)
+ self.ssh_client["ip"] = self.ip
+ self.ssh_client["port"] = self.port
+ self.ssh_client["username"] = self.username
+ self.ssh_client["timeout"] = timeout
+ self.ssh_client["width"] = width
+ self.ssh_client["client"] = trans
+ self.ssh_client["channel"] = channel
+ self.ssh_client["stdout"] = stdout
logger.info("Create ssh client host[%s] success" % self.ip)
return trans
@@ -120,7 +147,7 @@ class SshClient(object):
"""
sftp = None
try:
- sftp = paramiko.SFTPClient.from_transport(self.ssh_client['client'])
+ sftp = paramiko.SFTPClient.from_transport(self.ssh_client["client"])
sftp.put(source, dest)
except Exception as err:
err_msg = f"Upload failed from {source} to {dest}, details: {err}"
@@ -144,7 +171,7 @@ class SshClient(object):
dest = os.path.join(dest, filename)
if os.path.exists(dest):
os.remove(dest)
- sftp = paramiko.SFTPClient.from_transport(self.ssh_client['client'])
+ sftp = paramiko.SFTPClient.from_transport(self.ssh_client["client"])
sftp.get(source, dest)
except Exception as err:
err_msg = f"download failed from {source} to {dest}, details: {err}"
@@ -153,4 +180,3 @@ class SshClient(object):
finally:
if sftp is not None:
sftp.close()
-
diff --git a/pkg/deploy/action/utils/config/rest_constant.py b/pkg/deploy/action/utils/config/rest_constant.py
index 47a08b34bff3e232671d266b7e05623e566214ed..2f0e097a0f702da313dba1626e650539590d7378 100644
--- a/pkg/deploy/action/utils/config/rest_constant.py
+++ b/pkg/deploy/action/utils/config/rest_constant.py
@@ -1,56 +1,84 @@
class Constant:
- PORT = '8088'
- HTTPS = 'https://'
- LOGIN = '/deviceManager/rest/xxxxx/login'
- LOGOUT = '/deviceManager/rest/{deviceId}/sessions'
- QUERY_POOL = '/deviceManager/rest/{deviceId}/storagepool'
- CREATE_FS = '/deviceManager/rest/{deviceId}/filesystem'
- QUERY_FILE_SYSTEM_NUM = '/deviceManager/rest/{deviceId}/filesystem/count'
- DELETE_FS = '/deviceManager/rest/{deviceId}/filesystem/{id}'
- NFS_SERVICE = '/deviceManager/rest/{deviceId}/nfsservice'
- NFS_SHARE_ADD = '/deviceManager/rest/{deviceId}/NFSSHARE'
- NFS_SHARE_ADD_CLIENT = '/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT'
- NFS_SHARE_DELETE = '/deviceManager/rest/{deviceId}/NFSSHARE/{id}'
- NFS_SHARE_DEL_CLIENT = '/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT/{id}'
- NFS_SHARE_QUERY = '/deviceManager/rest/{deviceId}/NFSSHARE'
- QUERY_VSTORE = '/deviceManager/rest/{deviceId}/vstore/count'
- CREATE_VSTORE = '/deviceManager/rest/{deviceId}/vstore'
- DELETE_VSTORE = '/deviceManager/rest/{deviceId}/vstore/{id}'
+ PORT = "8088"
+ HTTPS = "https://"
+ LOGIN = "/deviceManager/rest/xxxxx/login"
+ LOGOUT = "/deviceManager/rest/{deviceId}/sessions"
+ QUERY_POOL = "/deviceManager/rest/{deviceId}/storagepool"
+ CREATE_FS = "/deviceManager/rest/{deviceId}/filesystem"
+ QUERY_FILE_SYSTEM_NUM = "/deviceManager/rest/{deviceId}/filesystem/count"
+ DELETE_FS = "/deviceManager/rest/{deviceId}/filesystem/{id}"
+ NFS_SERVICE = "/deviceManager/rest/{deviceId}/nfsservice"
+ NFS_SHARE_ADD = "/deviceManager/rest/{deviceId}/NFSSHARE"
+ NFS_SHARE_ADD_CLIENT = "/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT"
+ NFS_SHARE_DELETE = "/deviceManager/rest/{deviceId}/NFSSHARE/{id}"
+ NFS_SHARE_DEL_CLIENT = "/deviceManager/rest/{deviceId}/NFS_SHARE_AUTH_CLIENT/{id}"
+ NFS_SHARE_QUERY = "/deviceManager/rest/{deviceId}/NFSSHARE"
+ QUERY_VSTORE = "/deviceManager/rest/{deviceId}/vstore/count"
+ CREATE_VSTORE = "/deviceManager/rest/{deviceId}/vstore"
+ DELETE_VSTORE = "/deviceManager/rest/{deviceId}/vstore/{id}"
CREATE_LIF = "/deviceManager/rest/{deviceId}/lif"
DELETE_LIF = "/deviceManager/rest/{deviceId}/lif?NAME={name}"
CREATE_CLONE_FS = "/deviceManager/rest/{deviceId}/filesystem"
SPLIT_CLONE_FS = "/deviceManager/rest/{deviceId}/clone_fs_split"
CREATE_FSSNAPSHOT = "/deviceManager/rest/{deviceId}/fssnapshot"
ROLLBACK_SNAPSHOT = "/deviceManager/rest/{deviceId}/fssnapshot/rollback_fssnapshot"
- QUERY_ROLLBACK_SNAPSHOT_PROCESS = "/deviceManager/rest/{deviceId}/FSSNAPSHOT/" \
- "query_fs_snapshot_rollback?PARENTNAME={fs_name}"
+ QUERY_ROLLBACK_SNAPSHOT_PROCESS = (
+ "/deviceManager/rest/{deviceId}/FSSNAPSHOT/"
+ "query_fs_snapshot_rollback?PARENTNAME={fs_name}"
+ )
QUERY_LOGIC_PORT_INFO = "/deviceManager/rest/{deviceId}/lif"
# 容灾查询操作
QUERY_SYSTEM_INFO = "/deviceManager/rest/{deviceId}/system/"
QUERY_REMOTE_DEVICE_INFO = "/deviceManager/rest/{deviceId}/remote_device"
QUERY_LICENSE_FEATURE = "/deviceManager/rest/{deviceId}/license/feature"
- QUERY_HYPER_METRO_FILE_SYSTEM_PAIR = "/deviceManager/rest/{deviceId}/HyperMetroPair/associate"
- QUERY_HYPER_METRO_FILE_SYSTEM_COUNT = "/deviceManager/rest/{deviceId}/HyperMetroPair/count"
- QUERY_REPLICATION_FILE_SYSTEM_PAIR = "/deviceManager/rest/{deviceId}/replicationpair/associate"
- QUERY_FILESYSTEM_FOR_REPLICATION = "/deviceManager/rest/{deviceId}/filesystem_for_replication"
+ QUERY_HYPER_METRO_FILE_SYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/HyperMetroPair/associate"
+ )
+ QUERY_HYPER_METRO_FILE_SYSTEM_COUNT = (
+ "/deviceManager/rest/{deviceId}/HyperMetroPair/count"
+ )
+ QUERY_REPLICATION_FILE_SYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/replicationpair/associate"
+ )
+ QUERY_FILESYSTEM_FOR_REPLICATION = (
+ "/deviceManager/rest/{deviceId}/filesystem_for_replication"
+ )
# 容灾搭建操作
HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/FsHyperMetroDomain"
HYPER_METRO_VSTORE_PAIR = "/deviceManager/rest/{deviceId}/vstore_pair"
HYPER_METRO_FILESYSTEM_PAIR = "/deviceManager/rest/{deviceId}/HyperMetroPair"
- SPLIT_REMOTE_REPLICATION_FILESYSTEM_PAIR = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/split"
- SYNC_REMOTE_REPLICATION_FILESYSTEM_PAIR = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/sync"
- REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/{id}"
+ SPLIT_REMOTE_REPLICATION_FILESYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/split"
+ )
+ SYNC_REMOTE_REPLICATION_FILESYSTEM_PAIR = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/sync"
+ )
+ REMOTE_REPLICATION_FILESYSTEM_PAIR_OPT = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/{id}"
+ )
DELETE_HYPER_METRO_PAIR = "/deviceManager/rest/{deviceId}/HyperMetroPair/{id}"
DELETE_HYPER_METRO_VSTORE_PAIR = "/deviceManager/rest/{deviceId}/vstore_pair/{id}"
- SPLIT_FILESYSTEM_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/SplitFsHyperMetroDomain"
- DELETE_FILESYSTEM_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/FsHyperMetroDomain/{id}"
- CANCEL_SECONDARY_WRITE_LOCK = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/CANCEL_SECONDARY_WRITE_LOCK"
- SET_SECONDARY_WRITE_LOCK = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/SET_SECONDARY_WRITE_LOCK"
- SWAP_ROLE_FS_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/SwapRoleFsHyperMetroDomain"
+ SPLIT_FILESYSTEM_HYPER_METRO_DOMAIN = (
+ "/deviceManager/rest/{deviceId}/SplitFsHyperMetroDomain"
+ )
+ DELETE_FILESYSTEM_HYPER_METRO_DOMAIN = (
+ "/deviceManager/rest/{deviceId}/FsHyperMetroDomain/{id}"
+ )
+ CANCEL_SECONDARY_WRITE_LOCK = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/CANCEL_SECONDARY_WRITE_LOCK"
+ )
+ SET_SECONDARY_WRITE_LOCK = (
+ "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/SET_SECONDARY_WRITE_LOCK"
+ )
+ SWAP_ROLE_FS_HYPER_METRO_DOMAIN = (
+ "/deviceManager/rest/{deviceId}/SwapRoleFsHyperMetroDomain"
+ )
SWAP_ROLE_REPLICATION_PAIR = "/deviceManager/rest/{deviceId}/REPLICATIONPAIR/switch"
- CHANGE_FS_HYPER_METRO_DOMAIN_SECOND_ACCESS = "/deviceManager/rest/{deviceId}/ChangeFsHyperMetroDomainSecondAccess"
+ CHANGE_FS_HYPER_METRO_DOMAIN_SECOND_ACCESS = (
+ "/deviceManager/rest/{deviceId}/ChangeFsHyperMetroDomainSecondAccess"
+ )
JOIN_FS_HYPER_METRO_DOMAIN = "/deviceManager/rest/{deviceId}/JoinFsHyperMetroDomain"
# omtask rest api
@@ -59,24 +87,21 @@ class Constant:
QUERY_TASK_PROCESS = "/api/v2/task/{id}"
CREATE_REMOTE_REPLICATION_FILESYSTEM_PAIR = "/api/v2/task/protection/nas"
# cdp操作
- DELETE_FS_CDP_SCHEDULE = '/deviceManager/rest/{deviceId}/filesystem/remove_associate'
+ DELETE_FS_CDP_SCHEDULE = (
+ "/deviceManager/rest/{deviceId}/filesystem/remove_associate"
+ )
FULL_SYNC_MAX_TIME = 1500
CANTIAN_DOMAIN_PREFIX = "CantianDomain_%s%s"
-SPEED = {
- "low": 1,
- "medium": 2,
- "high": 3,
- "highest": 4
-}
+SPEED = {"low": 1, "medium": 2, "high": 3, "highest": 4}
class HealthStatus:
- Normal = "1" # 正常
- Faulty = "2" # 故障
- Invalid = "14" # 失效
+ Normal = "1" # 正常
+ Faulty = "2" # 故障
+ Invalid = "14" # 失效
class SystemRunningStatus:
@@ -88,21 +113,21 @@ class SystemRunningStatus:
class RemoteDeviceStatus:
- LinkUp = "10" # 已连接
- LinkDown = "11" # 未连接
- Disabled = "31" # 已禁用
- Connecting = "101" # 正在连接
- AirGapLinkDown = "118" # Air Gap断开
+ LinkUp = "10" # 已连接
+ LinkDown = "11" # 未连接
+ Disabled = "31" # 已禁用
+ Connecting = "101" # 正在连接
+ AirGapLinkDown = "118" # Air Gap断开
class ReplicationRunningStatus:
- Normal = "1" # 正常
- Synchronizing = "23" # 正在同步
- TobeRecovered = "33" # 待恢复
- Interrupted = "34" # 异常断开
- Split = "26" # 已分裂
- Invalid = "35" # 失效
- Standby = "110" # 备用
+ Normal = "1" # 正常
+ Synchronizing = "23" # 正在同步
+ TobeRecovered = "33" # 待恢复
+ Interrupted = "34" # 异常断开
+ Split = "26" # 已分裂
+ Invalid = "35" # 失效
+ Standby = "110" # 备用
class FilesystemRunningStatus:
@@ -113,68 +138,68 @@ class FilesystemRunningStatus:
class MetroDomainRunningStatus:
- Normal = "0" # 正常
- Recovering = "1" # 恢复中
- Faulty = "2" # 故障
- Split = "3" # 分裂
- ForceStarted = "4" # 强制拉起
- Invalid = "5" # 失效
+ Normal = "0" # 正常
+ Recovering = "1" # 恢复中
+ Faulty = "2" # 故障
+ Split = "3" # 分裂
+ ForceStarted = "4" # 强制拉起
+ Invalid = "5" # 失效
class VstorePairRunningStatus:
- Normal = "1" # 正常
- Unsynchronized = "25" # 未同步
- Split = "26" # 分裂
- Invalid = "35" # 失效
- ForceStarted = "93" # 强制启动
+ Normal = "1" # 正常
+ Unsynchronized = "25" # 未同步
+ Split = "26" # 分裂
+ Invalid = "35" # 失效
+ ForceStarted = "93" # 强制启动
class VstorePairConfigStatus:
- Normal = "0" # 正常
- Synchronizing = "1" # 同步中
- Unsynchronizing = "2" # 待同步
+ Normal = "0" # 正常
+ Synchronizing = "1" # 同步中
+ Unsynchronizing = "2" # 待同步
class FilesystemPairRunningStatus:
- Normal = "1" # 正常
- Synchronizing = "23" # 同步中
- Invalid = "35" # 失效
- Paused = "41" # 暂停
- ForceStarted = "93" # 强制启动
+ Normal = "1" # 正常
+ Synchronizing = "23" # 同步中
+ Invalid = "35" # 失效
+ Paused = "41" # 暂停
+ ForceStarted = "93" # 强制启动
ToBeSynchronized = "100" # 待同步
- Creating = "119" # 创建中
+ Creating = "119" # 创建中
class SecresAccess:
- AccessDenied = "1" # 禁止访问
- ReadOnly = "2" # 只读
- ReadAndWrite = "3" # 读写
+ AccessDenied = "1" # 禁止访问
+ ReadOnly = "2" # 只读
+ ReadAndWrite = "3" # 读写
class PoolStatus:
- PreCopy = "14" # 预拷贝
- Rebuilt = "16" # 重构
- Online = "27" # 在线
- Offline = "28" # 离线
- Balancing = "32" # 正在均衡
- Initializing = "53" # 初始化中
- Deleting = "106" # 删除中
+ PreCopy = "14" # 预拷贝
+ Rebuilt = "16" # 重构
+ Online = "27" # 在线
+ Offline = "28" # 离线
+ Balancing = "32" # 正在均衡
+ Initializing = "53" # 初始化中
+ Deleting = "106" # 删除中
class PoolHealth:
- Normal = "1" # 正常
- Faulty = "2" # 故障
- Degraded = "5" # 降级
+ Normal = "1" # 正常
+ Faulty = "2" # 故障
+ Degraded = "5" # 降级
class DomainAccess:
ReadAndWrite = "3" # 读写
- ReadOnly = "1" # 只读
+ ReadOnly = "1" # 只读
class ConfigRole:
- Secondary = "0" # 从端
- Primary = "1" # 主端
+ Secondary = "0" # 从端
+ Primary = "1" # 主端
class DataIntegrityStatus:
@@ -185,4 +210,3 @@ class DataIntegrityStatus:
class RepFileSystemNameRule:
NamePrefix = "ct_"
NameSuffix = "_rep"
-
diff --git a/pkg/deploy/action/write_config.py b/pkg/deploy/action/write_config.py
index a4bd0fc66d54d9e799a95aa8232e166bb8a0a891..b1269ce00fe6d6075e1398a80d49a0cff4e5b964 100644
--- a/pkg/deploy/action/write_config.py
+++ b/pkg/deploy/action/write_config.py
@@ -19,7 +19,7 @@ JVViGOl8yXdK5nFdBq+Oj4KDE5FMensDnZm0HZIyDxm9e2foOmHBYAKZN5novclj"""
def read_install_file():
- with open(INSTALL_FILE, 'r', encoding='utf8') as file_path:
+ with open(INSTALL_FILE, "r", encoding="utf8") as file_path:
_tmp = file_path.read()
info = json.loads(_tmp)
return info
@@ -28,12 +28,12 @@ def read_install_file():
def write_install_file(write_data):
modes = stat.S_IRWXU | stat.S_IROTH | stat.S_IRGRP
flag = os.O_RDWR | os.O_CREAT | os.O_TRUNC
- with os.fdopen(os.open(INSTALL_FILE, flag, modes), 'w') as file_path:
+ with os.fdopen(os.open(INSTALL_FILE, flag, modes), "w") as file_path:
config_params = json.dumps(write_data, indent=4)
file_path.write(config_params)
-if __name__ == '__main__':
+if __name__ == "__main__":
config_key = sys.argv[1]
config_value = sys.argv[2]
install_file_data = read_install_file()
diff --git a/pkg/install/Common.py b/pkg/install/Common.py
index 093739f5d16350f87c4bf5358d166afaf3adc22f..2d8169773c5256fa21b2cdc497704cec8d0643b7 100644
--- a/pkg/install/Common.py
+++ b/pkg/install/Common.py
@@ -10,6 +10,7 @@
import sys
+
sys.dont_write_bytecode = True
try:
@@ -25,8 +26,10 @@ class DefaultValue(object):
"""
Default value of some variables
"""
+
def __init__(self):
pass
+
# file mode
MAX_FILE_MODE = 640
MIN_FILE_MODE = 400
@@ -34,7 +37,7 @@ class DefaultValue(object):
MID_FILE_MODE = 500
KEY_DIRECTORY_MODE = 700
MAX_DIRECTORY_MODE = 750
- KEY_DIRECTORY_MODE_STR = '0700'
+ KEY_DIRECTORY_MODE_STR = "0700"
MIN_FILE_PERMISSION = 0o400
MID_FILE_PERMISSION = 0o500
KEY_FILE_PERMISSION = 0o600
@@ -43,7 +46,7 @@ class DefaultValue(object):
DOCKER_SHARE_DIR = "/home/regress/cantian_data"
DOCKER_DATA_DIR = "{}/data".format(DOCKER_SHARE_DIR)
DOCKER_GCC_DIR = "{}/gcc_home".format(DOCKER_SHARE_DIR)
-
+
# get os version and python version
CURRENT_OS = platform.system()
PY_VERSION = platform.python_version()
@@ -63,7 +66,7 @@ class DefaultValue(object):
while True:
# find the top path to be created
(tmpDir, topDirName) = os.path.split(tmpDir)
- if (os.path.exists(tmpDir) or topDirName == ""):
+ if os.path.exists(tmpDir) or topDirName == "":
tmpDir = os.path.join(tmpDir, topDirName)
break
return tmpDir
@@ -110,8 +113,13 @@ class DefaultValue(object):
bash_cmd = ["bash"]
if not stdin_list:
stdin_list = []
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
# in python 3, the stand output and stand error is
# unicode, we must decode it before return
diff --git a/pkg/install/funclib.py b/pkg/install/funclib.py
index 6569eade8b77adf3071313b4885650dbba53ee3b..57ee75850e8849746dd4870bcf3306373b65c469 100644
--- a/pkg/install/funclib.py
+++ b/pkg/install/funclib.py
@@ -4,6 +4,7 @@
# Copyright © Huawei Technologies Co., Ltd. 2010-2018. All rights reserved.
import sys
+
sys.dont_write_bytecode = True
try:
import os
@@ -28,15 +29,17 @@ except ImportError as import_err:
py_verion = platform.python_version()
-SYS_PATH = os.environ["PATH"].split(':')
+SYS_PATH = os.environ["PATH"].split(":")
class CommonValue(object):
"""
common value for some variables
"""
+
def __init__(self):
pass
+
# file mode
MAX_FILE_MODE = 640
MIN_FILE_MODE = 400
@@ -46,7 +49,7 @@ class CommonValue(object):
KEY_DIRECTORY_MODE = 700
MAX_DIRECTORY_MODE = 750
- KEY_DIRECTORY_MODE_STR = '0700'
+ KEY_DIRECTORY_MODE_STR = "0700"
MIN_FILE_PERMISSION = 0o400
MID_FILE_PERMISSION = 0o500
@@ -63,6 +66,7 @@ class DefaultConfigValue(object):
"""
default value for cantiand, cms, gss config
"""
+
def __init__(self):
pass
@@ -124,7 +128,7 @@ class DefaultConfigValue(object):
"_UNDO_ACTIVE_SEGMENTS": 64,
"USE_LARGE_PAGES": "FALSE",
"CTSTORE_MAX_OPEN_FILES": 40960,
- "REPLAY_PRELOAD_PROCESSES":0,
+ "REPLAY_PRELOAD_PROCESSES": 0,
"LOG_REPLAY_PROCESSES": 64,
"_LOG_MAX_FILE_SIZE": "4G",
"_LOG_BACKUP_FILE_COUNT": 128,
@@ -144,19 +148,19 @@ class DefaultConfigValue(object):
"REPL_SCRAM_AUTH": "TRUE",
"ENABLE_ACCESS_DC": "FALSE",
"REPLACE_PASSWORD_VERIFY": "TRUE",
- "LOG_HOME": "", #generate by installer
- "_SYS_PASSWORD": "", #input by user in command line parameter or from shell command interactively
- "INTERCONNECT_ADDR": "", #input by user in command line parameter
- "LSNR_ADDR": "", #input by user in command line parameter
+ "LOG_HOME": "", # generate by installer
+ "_SYS_PASSWORD": "", # input by user in command line parameter or from shell command interactively
+ "INTERCONNECT_ADDR": "", # input by user in command line parameter
+ "LSNR_ADDR": "", # input by user in command line parameter
"SHARED_PATH": "+vg1",
"ENABLE_IDX_KEY_LEN_CHECK": "FALSE",
"EMPTY_STRING_AS_NULL": "FALSE",
"MYSQL_METADATA_IN_CANTIAN": "TRUE",
"CT_CLUSTER_STRICT_CHECK": "TRUE",
"MYSQL_DEPLOY_GROUP_ID": "5000",
- "SQL_STATISTIC_TIME_LIMIT": 1000000
+ "SQL_STATISTIC_TIME_LIMIT": 1000000,
}
-
+
GSS_CONFIG = {
"INTERCONNECT_PORT": "1621",
"MAX_SESSION_NUMS": 4096,
@@ -164,38 +168,38 @@ class DefaultConfigValue(object):
"_LOG_LEVEL": 4294967295,
"STORAGE_MODE": "CLUSTER_RAID",
"INST_ID": 0,
- "LOG_HOME": "", #generate by installer
- "INTERCONNECT_ADDR": "", #input by user in command line parameter, same as CANTIAND_CONFIG#INTERCONNECT_ADDR
+ "LOG_HOME": "", # generate by installer
+ "INTERCONNECT_ADDR": "", # input by user in command line parameter, same as CANTIAND_CONFIG#INTERCONNECT_ADDR
}
-
+
CMS_CONFIG = {
"NODE_ID": 0,
- "GCC_HOME": "", #generate by installer
- "GCC_TYPE": "", #generate by installer
+ "GCC_HOME": "", # generate by installer
+ "GCC_TYPE": "", # generate by installer
"CMS_LOG": "/opt/cantian/log/cms",
"_PORT": 14587,
- "_IP": "", #input by user in command line parameter, same as CANTIAND_CONFIG#LSNR_ADDR
+ "_IP": "", # input by user in command line parameter, same as CANTIAND_CONFIG#LSNR_ADDR
"_LOG_LEVEL": 7,
"_SPLIT_BRAIN": "TRUE",
"_LOG_MAX_FILE_SIZE": "1G",
"_DETECT_DISK_TIMEOUT": 6000,
"_DISK_DETECT_FILE": "gcc_file,",
- "_STOP_RERUN_CMS_SCRIPT" : "/opt/cantian/common/script/cms_reg.sh",
+ "_STOP_RERUN_CMS_SCRIPT": "/opt/cantian/common/script/cms_reg.sh",
"_EXIT_NUM_COUNT_FILE": "/home/cantiandba/data/exit_num.txt",
- "_CMS_MES_THREAD_NUM" : "5",
- "_CMS_MES_MAX_SESSION_NUM" : "40",
- "_CMS_MES_MESSAGE_POOL_COUNT" : "1",
- "_CMS_MES_MESSAGE_QUEUE_COUNT" : "1",
- "_CMS_MES_MESSAGE_BUFF_COUNT" : "4096",
- "_CMS_MES_MESSAGE_CHANNEL_NUM" : "1",
- "_CMS_NODE_FAULT_THRESHOLD" : "5",
- "_USE_DBSTOR" : "FALSE",
- "_DBSTOR_NAMESPACE" : "",
- "_CMS_MES_PIPE_TYPE" : "TCP",
- "_CMS_MES_CRC_CHECK_SWITCH" : "TRUE",
+ "_CMS_MES_THREAD_NUM": "5",
+ "_CMS_MES_MAX_SESSION_NUM": "40",
+ "_CMS_MES_MESSAGE_POOL_COUNT": "1",
+ "_CMS_MES_MESSAGE_QUEUE_COUNT": "1",
+ "_CMS_MES_MESSAGE_BUFF_COUNT": "4096",
+ "_CMS_MES_MESSAGE_CHANNEL_NUM": "1",
+ "_CMS_NODE_FAULT_THRESHOLD": "5",
+ "_USE_DBSTOR": "FALSE",
+ "_DBSTOR_NAMESPACE": "",
+ "_CMS_MES_PIPE_TYPE": "TCP",
+ "_CMS_MES_CRC_CHECK_SWITCH": "TRUE",
"SHARED_PATH": "/home/cantiandba/data/data",
}
-
+
CANTIAND_DBG_CONFIG = {
"DBWR_PROCESSES": 8,
"SESSIONS": 8192,
@@ -229,7 +233,7 @@ class DefaultConfigValue(object):
"CR_POOL_SIZE": "2G",
"CR_POOL_COUNT": 4,
"VARIANT_MEMORY_AREA_SIZE": "1G",
- "REPLAY_PRELOAD_PROCESSES":0,
+ "REPLAY_PRELOAD_PROCESSES": 0,
"LOG_REPLAY_PROCESSES": 64,
"_LOG_MAX_FILE_SIZE": "1G",
"RECYCLEBIN": "FALSE",
@@ -251,43 +255,63 @@ class DefaultConfigValue(object):
"REPLACE_PASSWORD_VERIFY": "TRUE",
"INTERCONNECT_ADDR": "127.0.0.1",
"LSNR_ADDR": "127.0.0.1",
- "SHARED_PATH": "", #generate by installer
- "LOG_HOME": "", #generate by installer
- "_SYS_PASSWORD": "", #input by user in command line parameter or from shell command interactively
+ "SHARED_PATH": "", # generate by installer
+ "LOG_HOME": "", # generate by installer
+ "_SYS_PASSWORD": "", # input by user in command line parameter or from shell command interactively
"ENABLE_IDX_KEY_LEN_CHECK": "FALSE",
"EMPTY_STRING_AS_NULL": "FALSE",
"MYSQL_METADATA_IN_CANTIAN": "TRUE",
"CT_CLUSTER_STRICT_CHECK": "TRUE",
- "MYSQL_DEPLOY_GROUP_ID": "5000"
+ "MYSQL_DEPLOY_GROUP_ID": "5000",
}
class SingleNodeConfig(object):
@staticmethod
- def get_config(in_container = False):
- cantiand_cfg = DefaultConfigValue.CANTIAND_CONFIG if not in_container else DefaultConfigValue.CANTIAND_DBG_CONFIG
- return cantiand_cfg, DefaultConfigValue.CMS_CONFIG, DefaultConfigValue.GSS_CONFIG
+ def get_config(in_container=False):
+ cantiand_cfg = (
+ DefaultConfigValue.CANTIAND_CONFIG
+ if not in_container
+ else DefaultConfigValue.CANTIAND_DBG_CONFIG
+ )
+ return (
+ cantiand_cfg,
+ DefaultConfigValue.CMS_CONFIG,
+ DefaultConfigValue.GSS_CONFIG,
+ )
class ClusterNode0Config(object):
@staticmethod
- def get_config(in_container = False):
- cantiand_cfg = DefaultConfigValue.CANTIAND_CONFIG if not in_container else DefaultConfigValue.CANTIAND_DBG_CONFIG
+ def get_config(in_container=False):
+ cantiand_cfg = (
+ DefaultConfigValue.CANTIAND_CONFIG
+ if not in_container
+ else DefaultConfigValue.CANTIAND_DBG_CONFIG
+ )
if in_container:
cantiand_cfg["LSNR_ADDR"] = "192.168.86.1"
cantiand_cfg["INTERCONNECT_ADDR"] = "192.168.86.1;192.168.86.2"
cantiand_cfg["INTERCONNECT_PORT"] = "1601,1602"
DefaultConfigValue.GSS_CONFIG["INTERCONNECT_PORT"] = "1621,1621"
- return cantiand_cfg, DefaultConfigValue.CMS_CONFIG, DefaultConfigValue.GSS_CONFIG
+ return (
+ cantiand_cfg,
+ DefaultConfigValue.CMS_CONFIG,
+ DefaultConfigValue.GSS_CONFIG,
+ )
class ClusterNode1Config(object):
@staticmethod
- def get_config(in_container = False):
- cantiand_cfg = DefaultConfigValue.CANTIAND_CONFIG if not in_container else DefaultConfigValue.CANTIAND_DBG_CONFIG
+ def get_config(in_container=False):
+ cantiand_cfg = (
+ DefaultConfigValue.CANTIAND_CONFIG
+ if not in_container
+ else DefaultConfigValue.CANTIAND_DBG_CONFIG
+ )
if in_container:
cantiand_cfg["LSNR_ADDR"] = "192.168.86.2"
cantiand_cfg["INTERCONNECT_ADDR"] = "192.168.86.1;192.168.86.2"
@@ -296,7 +320,11 @@ class ClusterNode1Config(object):
DefaultConfigValue.GSS_CONFIG["INST_ID"] = 1
DefaultConfigValue.GSS_CONFIG["INTERCONNECT_PORT"] = "1621,1621"
DefaultConfigValue.CMS_CONFIG["NODE_ID"] = 1
- return cantiand_cfg, DefaultConfigValue.CMS_CONFIG, DefaultConfigValue.GSS_CONFIG
+ return (
+ cantiand_cfg,
+ DefaultConfigValue.CMS_CONFIG,
+ DefaultConfigValue.GSS_CONFIG,
+ )
class SshToolException(Exception):
@@ -312,8 +340,13 @@ def exec_popen(cmd):
:return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if py_verion[0] == "3":
stdout, stderr = pobj.communicate(cmd.encode())
@@ -358,43 +391,47 @@ def get_abs_path(_file):
def check_path(path_type_in):
path_len = len(path_type_in)
i = 0
- a_ascii = ord('a')
- z_ascii = ord('z')
- A_ascii = ord('A')
- Z_ascii = ord('Z')
- num0_ascii = ord('0')
- num9_ascii = ord('9')
- blank_ascii = ord(' ')
+ a_ascii = ord("a")
+ z_ascii = ord("z")
+ A_ascii = ord("A")
+ Z_ascii = ord("Z")
+ num0_ascii = ord("0")
+ num9_ascii = ord("9")
+ blank_ascii = ord(" ")
sep1_ascii = ord(os.sep)
- sep2_ascii = ord('_')
- sep3_ascii = ord(':')
- sep4_ascii = ord('-')
- sep5_ascii = ord('.')
+ sep2_ascii = ord("_")
+ sep3_ascii = ord(":")
+ sep4_ascii = ord("-")
+ sep5_ascii = ord(".")
CURRENT_OS = platform.system()
if CURRENT_OS == "Linux":
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if(not (a_ascii <= char_check <= z_ascii
- or A_ascii <= char_check <= Z_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check == blank_ascii
- or char_check == sep1_ascii
- or char_check == sep2_ascii
- or char_check == sep4_ascii
- or char_check == sep5_ascii)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or A_ascii <= char_check <= Z_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check == blank_ascii
+ or char_check == sep1_ascii
+ or char_check == sep2_ascii
+ or char_check == sep4_ascii
+ or char_check == sep5_ascii
+ ):
return False
elif CURRENT_OS == "Windows":
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if(not (a_ascii <= char_check <= z_ascii
- or A_ascii <= char_check <= Z_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check == blank_ascii
- or char_check == sep1_ascii
- or char_check == sep2_ascii
- or char_check == sep3_ascii
- or char_check == sep4_ascii)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or A_ascii <= char_check <= Z_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check == blank_ascii
+ or char_check == sep1_ascii
+ or char_check == sep2_ascii
+ or char_check == sep3_ascii
+ or char_check == sep4_ascii
+ ):
return False
else:
print("Error: Can not support this platform.")
@@ -403,10 +440,10 @@ def check_path(path_type_in):
def check_ssh_connection(ips):
- '''
+ """
check ssh connection without password, if success to
connect the node user trust to the node has be created
- '''
+ """
failed_ip = []
success_ip = []
ssh = get_abs_path("ssh")
@@ -418,30 +455,39 @@ def check_ssh_connection(ips):
cmd += "-o ServerAliveInterval=100 -o ServerAliveCountMax=36 "
cmd += "-n 'echo Last login'"
process = Execution(cmd)
- idx =\
- process.expect(['Permission denied',
- 'Last login',
- 'Are you sure you want to continue connecting',
- 'Password', 'ssh:', TimeoutException,
- EOFException], 60)
+ idx = process.expect(
+ [
+ "Permission denied",
+ "Last login",
+ "Are you sure you want to continue connecting",
+ "Password",
+ "ssh:",
+ TimeoutException,
+ EOFException,
+ ],
+ 60,
+ )
if idx == 0:
failed_ip.append(ip)
elif idx == 1:
success_ip.append(ip)
process.sendLine("exit")
elif idx == 2:
- process.sendLine('yes')
- idx = process.expect(['Permission denied', 'Last login',
- 'Password', 'ssh:'], 60)
+ process.sendLine("yes")
+ idx = process.expect(
+ ["Permission denied", "Last login", "Password", "ssh:"], 60
+ )
if idx == 0:
failed_ip.append(ip)
elif idx == 1:
success_ip.append(ip)
process.sendLine("exit")
elif idx == 2:
- raise Exception("Check ssh connection"
- " failed,check your ssh"
- " configure file please.")
+ raise Exception(
+ "Check ssh connection"
+ " failed,check your ssh"
+ " configure file please."
+ )
elif idx == 3:
raise Exception(str(process.context_buffer))
@@ -455,10 +501,12 @@ def check_ssh_connection(ips):
# password authentication method,
# so we must expect Password key word
# to avoid to wait to timeout
- raise Exception("Check ssh"
- " connection failed,"
- " check your ssh"
- " configure file please.")
+ raise Exception(
+ "Check ssh"
+ " connection failed,"
+ " check your ssh"
+ " configure file please."
+ )
elif idx == 4:
raise Exception(str(process.context_buffer))
@@ -472,6 +520,7 @@ class CommandTool(object):
"""
class for CommandTool
"""
+
def __init__(self, log):
self.log = log
@@ -484,16 +533,16 @@ class CommandTool(object):
raise SshToolException("Can't find bash command.")
def __execute(self, arg):
- '''
+ """
execute shell command by ssh to login remote host
arg - list for argument, ip address and shell command
- '''
+ """
ip = arg[0]
cmd = arg[1]
ssh_options = " -o ServerAliveInterval=100 "
ssh_options += " -o ServerAliveCountMax=36 "
cmd = "export TMOUT=0; %s" % cmd
- ssh_cmd = "ssh %s %s \"%s\"" % (ssh_options, ip, cmd)
+ ssh_cmd = 'ssh %s %s "%s"' % (ssh_options, ip, cmd)
return [ip, exec_popen(ssh_cmd)]
def __scp(self, arg):
@@ -511,14 +560,14 @@ class CommandTool(object):
def __interactive_input(self, process, ip, pw1, pw2):
- pw_str = 'Please enter password'
+ pw_str = "Please enter password"
self.log("Expect(%s) on: [%s]" % (ip, pw_str))
- process.expect(['Please enter password'])
+ process.expect(["Please enter password"])
self.log("Send(%s) password." % ip)
process.sendLine(pw1)
if pw2:
self.log("Expect(%s) on: [%s]" % (ip, pw_str))
- process.expect(['Please enter password'])
+ process.expect(["Please enter password"])
self.log("Send(%s) password." % ip)
process.sendLine(pw2)
@@ -544,9 +593,7 @@ class CommandTool(object):
if ip:
process = Execution("%s %s %s" % (self.ssh, ssh_options, ip))
pdict = user[1]
- self.log("ssh session info:\n%s %s %s" % (self.ssh,
- ssh_options,
- ip))
+ self.log("ssh session info:\n%s %s %s" % (self.ssh, ssh_options, ip))
else:
process = Execution("%s" % (self.bash))
self.log("bash session")
@@ -562,26 +609,26 @@ class CommandTool(object):
if user:
if instlist:
for inst in instlist:
- p0 = pdict[str(ip)+"_"+inst][0]
- p1 = pdict[str(ip)+"_"+inst][1]
+ p0 = pdict[str(ip) + "_" + inst][0]
+ p1 = pdict[str(ip) + "_" + inst][1]
self.__interactive_input(process, ip, p0, p1)
else:
self.__interactive_input(process, ip, user[1], user[2])
self.log("Expect(%s) on: [Done, Upgrade Failed]" % ip)
- idx = process.expect(['Done', 'Upgrade Failed'], timeout=51200)
+ idx = process.expect(["Done", "Upgrade Failed"], timeout=51200)
if idx == 0:
self.log("Expect(%s) received Done." % ip)
- process.sendLine('exit')
- return [ip, ('0', str(process.context_before))]
+ process.sendLine("exit")
+ return [ip, ("0", str(process.context_before))]
self.log("Expect(%s) received Upgrade Failed." % ip)
- process.sendLine('exit')
- return [ip, ('1', str(process.context_buffer))]
+ process.sendLine("exit")
+ return [ip, ("1", str(process.context_buffer))]
except (TimeoutException, EOFException) as err:
self.log("Expect(%s) timeout." % ip)
if process:
- process.sendLine('exit')
- return [ip, ('1', str(err)+'\n'+str(process.context_buffer))]
+ process.sendLine("exit")
+ return [ip, ("1", str(err) + "\n" + str(process.context_buffer))]
def execute_local(self, cmd):
ret_code, output, errput = exec_popen(cmd)
@@ -589,9 +636,9 @@ class CommandTool(object):
return ret_code, output
def expect_execute(self, ip_cmd_map):
- '''
+ """
execute shell command with expect
- '''
+ """
try:
pool = Pool(len(ip_cmd_map))
result = pool.map(self.__expect_execute, ip_cmd_map)
@@ -600,15 +647,13 @@ class CommandTool(object):
raise
def execute_in_node(self, ip_cmd_map):
- '''
- '''
+ """ """
pool = Pool(len(ip_cmd_map))
result = pool.map(self.__execute, ip_cmd_map)
return self.__parse(result)
def scp_in_node(self, ip_dest_map):
- '''
- '''
+ """ """
pool = Pool(len(ip_dest_map))
result = pool.map(self.__scp, ip_dest_map)
return self.__parse(result)
@@ -623,23 +668,22 @@ class CommandTool(object):
success_node = []
failed_node = []
for rs in result:
- if str(rs[1][0]) != '0':
+ if str(rs[1][0]) != "0":
ret_code = 1
failed_node.append(rs)
success_node.append(rs)
return ret_code, success_node, failed_node
def expect_ctsql(self, ip_cmd_map):
- '''
+ """
expect execute ctsql and sql command
- '''
+ """
pool = Pool(len(ip_cmd_map))
result = pool.map(self.__expect_ctsql, ip_cmd_map)
return self.__parse(result)
def __expect_ctsql(self, arg):
- '''
- '''
+ """ """
ip = arg[0]
ctsql = arg[1]
sql = arg[2]
@@ -655,21 +699,22 @@ class CommandTool(object):
process.sendLine(ctsql)
if passwd:
- process.expect(['Please enter password'])
+ process.expect(["Please enter password"])
process.sendLine(passwd)
- process.expect(['SQL>'])
+ process.expect(["SQL>"])
process.sendLine(sql)
- idx = process.expect(['rows fetched', 'Succeed', 'CT-', 'SQL>'],
- timeout=600)
+ idx = process.expect(
+ ["rows fetched", "Succeed", "CT-", "SQL>"], timeout=600
+ )
if idx == 0 or idx == 1:
- process.sendLine('exit')
- return [ip, ('0', str(process.context_before))]
- process.sendLine('exit')
- return [ip, '1', str(process.context_buffer)]
+ process.sendLine("exit")
+ return [ip, ("0", str(process.context_before))]
+ process.sendLine("exit")
+ return [ip, "1", str(process.context_buffer)]
except (TimeoutException, EOFException):
if process:
- process.sendLine('exit')
- return [ip, ('1', str(process.context_buffer))]
+ process.sendLine("exit")
+ return [ip, ("1", str(process.context_buffer))]
class ExpectException(Exception):
@@ -694,13 +739,16 @@ class Execution(object):
if py_verion[0] == "3":
ALLOWED_STRING_TYPES = (str,)
else:
- ALLOWED_STRING_TYPES = (type(b''), type(''), type(u''),)
+ ALLOWED_STRING_TYPES = (
+ type(b""),
+ type(""),
+ type(""),
+ )
LINE_SEPERATOR = os.linesep
- CTRLF = '\r\n'
+ CTRLF = "\r\n"
- def __init__(self, command, timeout=1800, maxReadSize=4096,
- delimiter=None):
+ def __init__(self, command, timeout=1800, maxReadSize=4096, delimiter=None):
self.matcher = None
self.context_before = None
@@ -724,11 +772,13 @@ class Execution(object):
try:
from termios import CEOF
from termios import CINTR
+
(self._INTR, self._EOF) = (CINTR, CEOF)
except ImportError:
try:
from termios import VEOF
from termios import VINTR
+
fp = sys.__stdin__.fileno()
self._INTR = ord(termios.tcgetattr(fp)[6][VINTR])
self._EOF = ord(termios.tcgetattr(fp)[6][VEOF])
@@ -739,13 +789,13 @@ class Execution(object):
@staticmethod
def _ascii(content):
if not isinstance(content, bytes):
- return content.encode('ascii')
+ return content.encode("ascii")
return content
@staticmethod
def _utf8(content):
if not isinstance(content, bytes):
- return content.encode('utf-8')
+ return content.encode("utf-8")
return content
def __del__(self):
@@ -757,29 +807,29 @@ class Execution(object):
def __str__(self):
s = list()
- s.append('%r' % self)
- s.append('after: %r' % self.context_after)
- s.append('pid: %s' % str(self.childPid))
- s.append('child_fd: %s' % str(self.childFd))
- s.append('closed: %s' % str(self.is_closed))
- s.append('timeout: %s' % str(self.timeout))
- s.append('delimiter: %s' % str(self.delimiter))
- s.append('maxReadSize: %s' % str(self.maxReadSize))
- return '\n'.join(s)
+ s.append("%r" % self)
+ s.append("after: %r" % self.context_after)
+ s.append("pid: %s" % str(self.childPid))
+ s.append("child_fd: %s" % str(self.childFd))
+ s.append("closed: %s" % str(self.is_closed))
+ s.append("timeout: %s" % str(self.timeout))
+ s.append("delimiter: %s" % str(self.delimiter))
+ s.append("maxReadSize: %s" % str(self.maxReadSize))
+ return "\n".join(s)
def _excute(self, command):
self.args = shlex.split(command)
if self.childPid is not None:
- raise ExpectException('The pid member must be None.')
+ raise ExpectException("The pid member must be None.")
if self.command is None:
- raise ExpectException('The command member must not be None.')
+ raise ExpectException("The command member must not be None.")
try:
self.childPid, self.childFd = pty.fork()
except OSError as err: # pragma: no cover
- raise ExpectException('pty.fork() failed: ' + str(err))
+ raise ExpectException("pty.fork() failed: " + str(err))
if self.childPid == pty.CHILD:
# child
@@ -819,19 +869,18 @@ class Execution(object):
# give kernel time to update process status.
time.sleep(self.closeDelay)
if self.isAlive() and not self.terminate():
- raise ExpectException('Could not terminate the child.')
+ raise ExpectException("Could not terminate the child.")
self.childFd = -1
self.is_closed = True
def setEcho(self, state):
- err_msg = ('method setEcho() may not be available on'
- ' this operating system.')
+ err_msg = "method setEcho() may not be available on" " this operating system."
try:
child_attr = termios.tcgetattr(self.childFd)
except termios.error as e:
if e.args[0] == errno.EINVAL:
- raise IOError(e.args[0], '%s: %s.' % (e.args[1], err_msg))
+ raise IOError(e.args[0], "%s: %s." % (e.args[1], err_msg))
raise
if state:
@@ -843,12 +892,12 @@ class Execution(object):
termios.tcsetattr(self.childFd, termios.TCSANOW, child_attr)
except IOError as e:
if e.args[0] == errno.EINVAL:
- raise IOError(e.args[0], '%s: %s.' % (e.args[1], err_msg))
+ raise IOError(e.args[0], "%s: %s." % (e.args[1], err_msg))
raise
def readNonBlock(self, size=1, timeout=-1):
if self.is_closed:
- raise ValueError('I/O operation on closed file.')
+ raise ValueError("I/O operation on closed file.")
if timeout == -1:
timeout = self.timeout
@@ -858,16 +907,16 @@ class Execution(object):
rfds, _, _ = self.__select([self.childFd], [], [], 0)
if not rfds:
self.eofFlag = True
- raise EOFException('End Of File (EOF). Braindead platform.')
+ raise EOFException("End Of File (EOF). Braindead platform.")
rfds, _, _ = self.__select([self.childFd], [], [], timeout)
if not rfds:
if not self.isAlive():
self.eofFlag = True
- raise EOFException('Reach end of File (EOF).')
+ raise EOFException("Reach end of File (EOF).")
else:
- raise TimeoutException('Timeout exceeded.')
+ raise TimeoutException("Timeout exceeded.")
if self.childFd in rfds:
try:
@@ -875,17 +924,19 @@ class Execution(object):
except OSError as e:
if e.args[0] == errno.EIO:
self.eofFlag = True
- raise EOFException('Reach End Of File (EOF). '
- 'Exception style platform.')
+ raise EOFException(
+ "Reach End Of File (EOF). " "Exception style platform."
+ )
raise
- if child_data == b'':
+ if child_data == b"":
self.eofFlag = True
- raise EOFException('Reach end Of File (EOF).'
- ' Empty string style platform.')
+ raise EOFException(
+ "Reach end Of File (EOF)." " Empty string style platform."
+ )
return child_data
- raise ExpectException('Reached an unexpected state.')
+ raise ExpectException("Reached an unexpected state.")
# pragma: no cover
def read(self, size=-1, timeout=-1):
@@ -902,7 +953,7 @@ class Execution(object):
def _send(self, content):
return os.write(self.childFd, content)
- def sendLine(self, content=''):
+ def sendLine(self, content=""):
sendCount = self.send(content)
sendCount = sendCount + self.send(self.LINE_SEPERATOR)
return sendCount
@@ -944,18 +995,17 @@ class Execution(object):
except OSError as e:
# No child processes
if e.errno == errno.ECHILD:
- raise ExpectException('process already not exist.')
+ raise ExpectException("process already not exist.")
else:
raise e
if childPid == 0:
try:
- childPid, childStatus = os.waitpid(self.childPid,
- waitpidOptions)
+ childPid, childStatus = os.waitpid(self.childPid, waitpidOptions)
except OSError as err:
# pragma: no cover
if err.errno == errno.ECHILD:
- raise ExpectException('process already not exist.')
+ raise ExpectException("process already not exist.")
else:
raise
@@ -968,7 +1018,7 @@ class Execution(object):
if os.WIFEXITED(childStatus) or os.WIFSIGNALED(childStatus):
self.is_terminated = True
elif os.WIFSTOPPED(childStatus):
- raise ExpectException('process already been stopped.')
+ raise ExpectException("process already been stopped.")
return False
@@ -985,10 +1035,13 @@ class Execution(object):
def raisePatternTypeError(self, pattern):
raise TypeError(
- 'got %s as pattern, must be one'
- ' of: %s, pexpect.EOFException, pexpect.TIMEOUTException'
- % (type(pattern), ', '.join([str(ast) for ast in
- self.ALLOWED_STRING_TYPES])))
+ "got %s as pattern, must be one"
+ " of: %s, pexpect.EOFException, pexpect.TIMEOUTException"
+ % (
+ type(pattern),
+ ", ".join([str(ast) for ast in self.ALLOWED_STRING_TYPES]),
+ )
+ )
def compilePatternList(self, pattern_list):
if not pattern_list:
@@ -1005,7 +1058,7 @@ class Execution(object):
patternList.append(EOFException)
elif pattern is TimeoutException:
patternList.append(TimeoutException)
- elif isinstance(pattern, type(re.compile(''))):
+ elif isinstance(pattern, type(re.compile(""))):
patternList.append(pattern)
else:
self.raisePatternTypeError(pattern)
@@ -1030,16 +1083,17 @@ class Execution(object):
while True:
matchIndex = re_searcher.search(context_buffer)
if matchIndex > -1:
- self.context_buffer = context_buffer[re_searcher.end:]
+ self.context_buffer = context_buffer[re_searcher.end :]
self.context_before = context_buffer[: re_searcher.start]
- self.context_after = context_buffer[re_searcher.start:
- re_searcher.end]
+ self.context_after = context_buffer[
+ re_searcher.start : re_searcher.end
+ ]
self.context_match = re_searcher.context_match
self.matchIndex = matchIndex
return self.matchIndex
# no match at this point
if (timeout is not None) and (timeout < 0):
- raise TimeoutException('Timeout exceeded in loopExpect().')
+ raise TimeoutException("Timeout exceeded in loopExpect().")
# not timed out, continue read
more_context = self.readNonBlock(self.maxReadSize, timeout)
time.sleep(0.0001)
@@ -1080,8 +1134,8 @@ class Execution(object):
raise
def setWinSize(self, rows, cols):
- win_size = getattr(termios, 'TIOCSWINSZ', -2146929561)
- s_size = struct.pack('HHHH', rows, cols, 0, 0)
+ win_size = getattr(termios, "TIOCSWINSZ", -2146929561)
+ s_size = struct.pack("HHHH", rows, cols, 0, 0)
fcntl.ioctl(self.fileno(), win_size, s_size)
def __select(self, inputs, outputs, errputs, timeout=None):
@@ -1095,7 +1149,7 @@ class Execution(object):
if timeout is not None:
timeout = endTime - time.time()
if timeout < 0:
- return([], [], [])
+ return ([], [], [])
else:
raise
@@ -1108,8 +1162,7 @@ class RESearcher(object):
self.start = None
self.context_match = None
self.end = None
- for index, pattern_item in zip(list(range(len(pattern_list))),
- pattern_list):
+ for index, pattern_item in zip(list(range(len(pattern_list))), pattern_list):
if pattern_item is EOFException:
self.eof_index = index
continue
@@ -1122,21 +1175,23 @@ class RESearcher(object):
result_list = list()
for index, pattern_item in self._searches:
try:
- result_list.append((index, ' %d: re.compile("%s")' %
- (index, pattern_item.pattern)))
+ result_list.append(
+ (index, ' %d: re.compile("%s")' % (index, pattern_item.pattern))
+ )
except UnicodeEncodeError:
- result_list.append((index, ' %d: re.compile(%r)' %
- (index, pattern_item.pattern)))
- result_list.append((-1, 'RESearcher:'))
+ result_list.append(
+ (index, " %d: re.compile(%r)" % (index, pattern_item.pattern))
+ )
+ result_list.append((-1, "RESearcher:"))
if self.eof_index >= 0:
- result_list.append((self.eof_index, ' %d: EOF' %
- self.eof_index))
+ result_list.append((self.eof_index, " %d: EOF" % self.eof_index))
if self.timeout_index >= 0:
- result_list.append((self.timeout_index, ' %d: TIMEOUT' %
- self.timeout_index))
+ result_list.append(
+ (self.timeout_index, " %d: TIMEOUT" % self.timeout_index)
+ )
result_list.sort()
s_result_list = list(zip(*result_list))[1]
- return '\n'.join(s_result_list)
+ return "\n".join(s_result_list)
def search(self, content):
first_match_index = None
diff --git a/pkg/install/install.py b/pkg/install/install.py
index c3ed8e3d34d22e0403e4c49aaa87c5f9c718bac3..c9b50d71cc976b7172733efaedf21146c399a7f3 100644
--- a/pkg/install/install.py
+++ b/pkg/install/install.py
@@ -5,6 +5,7 @@
import sys
+
sys.dont_write_bytecode = True
try:
@@ -26,7 +27,13 @@ try:
import json
import threading
import signal
- from funclib import CommonValue, SingleNodeConfig, ClusterNode0Config, ClusterNode1Config, DefaultConfigValue
+ from funclib import (
+ CommonValue,
+ SingleNodeConfig,
+ ClusterNode0Config,
+ ClusterNode1Config,
+ DefaultConfigValue,
+ )
import argparse
PYTHON242 = "2.4.2"
@@ -38,8 +45,7 @@ try:
elif gPyVersion >= PYTHON25:
import hashlib
else:
- print("This install script can not support python version: %s"
- % gPyVersion)
+ print("This install script can not support python version: %s" % gPyVersion)
sys.exit(1)
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
@@ -52,18 +58,31 @@ CURRENT_OS = platform.system()
CANTIAND = "cantiand"
CANTIAND_WITH_MYSQL = "cantiand_with_mysql"
-CANTIAND_WITH_MYSQL_ST = "cantiand_with_mysql_st" #single process mode with mysql llt
+CANTIAND_WITH_MYSQL_ST = "cantiand_with_mysql_st" # single process mode with mysql llt
CANTIAND_IN_CLUSTER = "cantiand_in_cluster"
CANTIAND_WITH_MYSQL_IN_CLUSTER = "cantiand_with_mysql_in_cluster"
MYSQLD = "mysqld"
-INSTALL_SCRIPT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "installdb.sh")
+INSTALL_SCRIPT = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "installdb.sh"
+)
-VALID_RUNNING_MODE = {CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST, CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER, MYSQLD}
+VALID_RUNNING_MODE = {
+ CANTIAND,
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ MYSQLD,
+}
-VALID_SINGLE_MYSQL_RUNNING_MODE = {CANTIAND_WITH_MYSQL_IN_CLUSTER, CANTIAND_WITH_MYSQL_ST, CANTIAND_WITH_MYSQL}
+VALID_SINGLE_MYSQL_RUNNING_MODE = {
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL,
+}
-CLUSTER_SIZE = 2 # default to 2, 4 node cluster mode need add parameter to specify this
+CLUSTER_SIZE = 2 # default to 2, 4 node cluster mode need add parameter to specify this
INSTALL_SCPRIT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_DIR = os.path.abspath(os.path.join(INSTALL_SCPRIT_DIR, "../.."))
@@ -81,13 +100,17 @@ MYSQL_BIN_DIR = "/usr/local/mysql"
CTC_LIB_DIR = "/opt/cantian/mysql/server/"
MYSQL_LOG_FILE = os.path.join(MYSQL_DATA_DIR, "mysql.log")
-MYSQL_LIB_OUTPUT_DIR = os.path.join(PKG_DIR, "cantian-connector-mysql/mysql-source/bld_debug/plugin_output_directory")
+MYSQL_LIB_OUTPUT_DIR = os.path.join(
+ PKG_DIR, "cantian-connector-mysql/mysql-source/bld_debug/plugin_output_directory"
+)
CTC_PLUGIN = "ha_ctc.so"
+
class Options(object):
"""
command line options
"""
+
def __init__(self):
self.log_file = ""
self.install_user_privilege = ""
@@ -114,10 +137,10 @@ class Options(object):
# flag indicate user using gss storage
self.use_gss = False
-
+
# flag indicate user using dbstor storage
self.use_dbstor = False
-
+
self.link_type = "TCP"
self.link_type_from_para = False
@@ -138,23 +161,31 @@ class Options(object):
# In slave cluster:
self.slave_cluster = False
-
+
# is re-launch mysql server
self.is_relaunch = False
+
g_opts = Options()
+
def check_directories():
global MYSQL_CODE_DIR
global MYSQL_VERSION
- global MYSQL_DATA_DIR
+ global MYSQL_DATA_DIR
global MYSQL_BIN_DIR
global CONFIG_FILE
global MYSQL_LOG_FILE
if os.path.exists("/.dockerenv") and (g_opts.cantian_in_container != "1"):
MYSQL_CODE_DIR = os.path.join(PKG_DIR, "cantian-connector-mysql/mysql-source")
- is_mysql_metadata_in_cantian = (DefaultConfigValue.CANTIAND_CONFIG["MYSQL_METADATA_IN_CANTIAN"] == "TRUE")
- MYSQL_VERSION = VERSION_DOCKER_META if is_mysql_metadata_in_cantian else VERSION_DOCKER_NOMETA
+ is_mysql_metadata_in_cantian = (
+ DefaultConfigValue.CANTIAND_CONFIG["MYSQL_METADATA_IN_CANTIAN"] == "TRUE"
+ )
+ MYSQL_VERSION = (
+ VERSION_DOCKER_META
+ if is_mysql_metadata_in_cantian
+ else VERSION_DOCKER_NOMETA
+ )
MYSQL_DATA_DIR = "/data/data"
MYSQL_BIN_DIR = "/usr/local/mysql"
CTC_LIB_DIR = "/home/regress/cantian-connector-mysql/mysql-source/"
@@ -165,13 +196,17 @@ def check_directories():
_tmp = conf.read()
info = json.loads(_tmp)
is_mysql_metadata_in_cantian = info.get("mysql_metadata_in_cantian")
- MYSQL_VERSION = VERSION_ENV_META if is_mysql_metadata_in_cantian else VERSION_ENV_NOMETA
- MYSQL_DATA_DIR = "/mnt/dbdata/remote/metadata_{metadata}/node{node}"\
- .format(metadata=info.get("storage_metadata_fs", ""), node=info["node_id"])
+ MYSQL_VERSION = (
+ VERSION_ENV_META if is_mysql_metadata_in_cantian else VERSION_ENV_NOMETA
+ )
+ MYSQL_DATA_DIR = "/mnt/dbdata/remote/metadata_{metadata}/node{node}".format(
+ metadata=info.get("storage_metadata_fs", ""), node=info["node_id"]
+ )
MYSQL_BIN_DIR = "/opt/cantian/mysql/install/mysql"
MYSQL_LOG_FILE = os.path.join(MYSQL_DATA_DIR, "mysql.log")
+
def check_kernel_parameter(para):
"""Is kernel parameter invalid?"""
pattern = re.compile("^[A-Z_][A-Z0-9_]+$")
@@ -186,10 +221,10 @@ def check_invalid_symbol(para):
:param para: parameter's value
:return: NA
"""
- symbols = ["|", "&", "$", ">", "<", "\"", "'", "`"]
+ symbols = ["|", "&", "$", ">", "<", '"', "'", "`"]
for symbol in symbols:
if para.find(symbol) > -1:
- print("There is invalid symbol \"%s\" in %s" % (symbol, para))
+ print('There is invalid symbol "%s" in %s' % (symbol, para))
sys.exit(1)
@@ -201,7 +236,7 @@ def all_zero_addr_after_ping(nodeIp):
"""
if not nodeIp:
return False
- allowed_chars = set('0:.')
+ allowed_chars = set("0:.")
if set(nodeIp).issubset(allowed_chars):
return True
else:
@@ -215,64 +250,92 @@ def checkPath(path_type_in):
:return: weather validity
"""
path_len = len(path_type_in)
- a_ascii = ord('a')
- z_ascii = ord('z')
- A_ascii = ord('A')
- Z_ascii = ord('Z')
- num0_ascii = ord('0')
- num9_ascii = ord('9')
- blank_ascii = ord(' ')
+ a_ascii = ord("a")
+ z_ascii = ord("z")
+ A_ascii = ord("A")
+ Z_ascii = ord("Z")
+ num0_ascii = ord("0")
+ num9_ascii = ord("9")
+ blank_ascii = ord(" ")
sep1_ascii = ord(os.sep)
- sep2_ascii = ord('_')
- sep3_ascii = ord(':')
- sep4_ascii = ord('-')
- sep5_ascii = ord('.')
- char_check_list1 = [blank_ascii,
- sep1_ascii,
- sep2_ascii,
- sep4_ascii,
- sep5_ascii
- ]
-
- char_check_list2 = [blank_ascii,
- sep1_ascii,
- sep2_ascii,
- sep3_ascii,
- sep4_ascii
- ]
+ sep2_ascii = ord("_")
+ sep3_ascii = ord(":")
+ sep4_ascii = ord("-")
+ sep5_ascii = ord(".")
+ char_check_list1 = [blank_ascii, sep1_ascii, sep2_ascii, sep4_ascii, sep5_ascii]
+
+ char_check_list2 = [blank_ascii, sep1_ascii, sep2_ascii, sep3_ascii, sep4_ascii]
if CURRENT_OS == "Linux":
- return checkPathLinux(path_len, path_type_in, a_ascii, z_ascii,
- A_ascii, Z_ascii, num0_ascii, num9_ascii,
- char_check_list1)
+ return checkPathLinux(
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ A_ascii,
+ Z_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list1,
+ )
elif CURRENT_OS == "Windows":
- return checkPathWindows(path_len, path_type_in, a_ascii, z_ascii,
- A_ascii, Z_ascii, num0_ascii, num9_ascii,
- char_check_list2)
+ return checkPathWindows(
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ A_ascii,
+ Z_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list2,
+ )
else:
print("Error: Can not support this platform.")
sys.exit(1)
-def checkPathLinux(path_len, path_type_in, a_ascii, z_ascii,
- A_ascii, Z_ascii, num0_ascii, num9_ascii, char_check_list):
+def checkPathLinux(
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ A_ascii,
+ Z_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list,
+):
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (a_ascii <= char_check <= z_ascii
- or A_ascii <= char_check <= Z_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in char_check_list)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or A_ascii <= char_check <= Z_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in char_check_list
+ ):
return False
return True
-def checkPathWindows(path_len, path_type_in, a_ascii, z_ascii, A_ascii,
- Z_ascii, num0_ascii, num9_ascii, char_check_list):
+def checkPathWindows(
+ path_len,
+ path_type_in,
+ a_ascii,
+ z_ascii,
+ A_ascii,
+ Z_ascii,
+ num0_ascii,
+ num9_ascii,
+ char_check_list,
+):
for i in range(0, path_len):
char_check = ord(path_type_in[i])
- if (not (a_ascii <= char_check <= z_ascii
- or A_ascii <= char_check <= Z_ascii
- or num0_ascii <= char_check <= num9_ascii
- or char_check in char_check_list)):
+ if not (
+ a_ascii <= char_check <= z_ascii
+ or A_ascii <= char_check <= Z_ascii
+ or num0_ascii <= char_check <= num9_ascii
+ or char_check in char_check_list
+ ):
return False
return True
@@ -286,8 +349,13 @@ def _exec_popen(cmd, values=None):
if not values:
values = []
bash_cmd = ["bash"]
- pobj = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ pobj = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
pobj.stdin.write(cmd.encode())
@@ -350,22 +418,29 @@ def check_runner():
if owner_uid == 0:
if runner_uid != 0:
runner = pwd.getpwuid(runner_uid).pw_name
- print("Error: The owner of install.py has root privilege,"
- " can't run it by user [%s]." % runner)
+ print(
+ "Error: The owner of install.py has root privilege,"
+ " can't run it by user [%s]." % runner
+ )
sys.exit(1)
else:
if runner_uid == 0:
owner = pwd.getpwuid(owner_uid).pw_name
- print("Error: The owner of install.py is [%s],"
- " can't run it by root." % owner)
+ print(
+ "Error: The owner of install.py is [%s],"
+ " can't run it by root." % owner
+ )
sys.exit(1)
elif runner_uid != owner_uid:
runner = pwd.getpwuid(runner_uid).pw_name
owner = pwd.getpwuid(owner_uid).pw_name
- print("Error: The owner of install.py [%s] is different"
- " with the executor [%s]." % (owner, runner))
+ print(
+ "Error: The owner of install.py [%s] is different"
+ " with the executor [%s]." % (owner, runner)
+ )
sys.exit(1)
+
def persist_environment_variable(var_name, var_value, config_file=None):
"""
将环境变量持久化到指定的配置文件中。
@@ -380,7 +455,7 @@ def persist_environment_variable(var_name, var_value, config_file=None):
# 读取现有的配置文件内容
try:
- with open(config_file, 'r') as file:
+ with open(config_file, "r") as file:
lines = file.readlines()
except FileNotFoundError:
lines = []
@@ -398,7 +473,7 @@ def persist_environment_variable(var_name, var_value, config_file=None):
lines.append(f"\nexport {var_name}={var_value}\n")
# 写回配置文件
- with open(config_file, 'w') as file:
+ with open(config_file, "w") as file:
file.writelines(lines)
# 使更改生效
@@ -406,14 +481,15 @@ def persist_environment_variable(var_name, var_value, config_file=None):
print(f"{var_name} set to {var_value} and made persistent.")
+
def get_random_num(lower_num, upper_num):
# range of values
differ_num = upper_num - lower_num + 1
# get the first ten rows of random numbers
cmd = "cat /dev/random | head -n 10 | cksum | awk -F ' ' '{print $1}'"
- p = subprocess.Popen(["bash", "-c", cmd], shell=False,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ ["bash", "-c", cmd], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
# format result to string
result_num = p.stdout.read().strip().decode()
# string to int
@@ -454,53 +530,53 @@ def generate_password():
def usage():
"""install.py is a utility to install cantiand server.
-Usage:
- python install.py --help
- python install.py -U user:group -R installpath -M cantiand -N 0 -D DATADIR [-O] [-c]
- [-C PARAMETER=VALUE] [-g withoutroot] [-f SQLFILE] [-d] [-p]
- python install.py -U user:group -R installpath -M cantiand_in_cluster -N 0 -D DATADIR [-O] [-c]
- [-C 'PARAMETER=VALUE'] [-g withoutroot] [-f SQLFILE] [-d] [-p]
- python install.py -U user:group -R installpath -M cantiand_with_mysql -N 0 -D DATADIR [-O] [-c]
- [-C 'PARAMETER=VALUE'] [-g withoutroot] [-f SQLFILE] [-d] [-p]
- python install.py -U user:group -R installpath -M cantiand_in_cluster -N 1 -D DATADIR [-O] [-c]
- [-C \"PARAMETER=VALUE\"] [-g withoutroot] [-f SQLFILE] [-d] [-p]
-
-Common options:
- -U the database program and cluster owner
- -R the database program path
- -M the database running mode, case insensitive, default value [cantiand]
- cantiand: running cantiand in single mode;
- cantiand_with_mysql: running cantiand with mysql connector in single mode;
- cantiand_with_mysql_st: running cantiand(mysql llt) with mysql connector in single mode;
- cantiand_in_cluster: running cantiand in cluster mode;
- cantiand_with_mysql_in_cluster: running cantiand with mysql connector in cluster mode.
- -N node id, value is [0, 1]
- -O only install database program, and omit other optional parameters
- -D location of the database cluster storage area
- -g run install script without root privilege,
- but you must have permission of installation folder
- note: use \"-g withoutroot\" exactly
- -Z configure the database cluster config, eg: -Z "LSNR_PORT=1611",
- for more detail information see documentation.
- -C configure the database cluster cms config, eg: -C "GCC_HOME=/dev/cms-disk1",
- for more detail information see documentation.
- -G configure the database cluster gss config, eg: -G "STORAGE_MODE=CLUSTER_RAID",
- for more detail information see documentation.
- -W configure the database ip white list, eg: -W "127.0.0.1".
- -c not use SSL-based secure connections
- -s using gss as storage, default using file system
- -l specify the cantiand install log file path and name,
- if not, use the default
- disable it(not recommended)
- -P Compatibility parameter, which does not take effect.
- -f specify a customized create database sql file.
- if not, use default create database sql file.
- -d install inside docker container
- -p if ignore pkg version check
- -m assign the config file of mysql
- --help show this help, then exit
-
-If all the optional parameters are not specified, -O option will be used.
+ Usage:
+ python install.py --help
+ python install.py -U user:group -R installpath -M cantiand -N 0 -D DATADIR [-O] [-c]
+ [-C PARAMETER=VALUE] [-g withoutroot] [-f SQLFILE] [-d] [-p]
+ python install.py -U user:group -R installpath -M cantiand_in_cluster -N 0 -D DATADIR [-O] [-c]
+ [-C 'PARAMETER=VALUE'] [-g withoutroot] [-f SQLFILE] [-d] [-p]
+ python install.py -U user:group -R installpath -M cantiand_with_mysql -N 0 -D DATADIR [-O] [-c]
+ [-C 'PARAMETER=VALUE'] [-g withoutroot] [-f SQLFILE] [-d] [-p]
+ python install.py -U user:group -R installpath -M cantiand_in_cluster -N 1 -D DATADIR [-O] [-c]
+ [-C \"PARAMETER=VALUE\"] [-g withoutroot] [-f SQLFILE] [-d] [-p]
+
+ Common options:
+ -U the database program and cluster owner
+ -R the database program path
+ -M the database running mode, case insensitive, default value [cantiand]
+ cantiand: running cantiand in single mode;
+ cantiand_with_mysql: running cantiand with mysql connector in single mode;
+ cantiand_with_mysql_st: running cantiand(mysql llt) with mysql connector in single mode;
+ cantiand_in_cluster: running cantiand in cluster mode;
+ cantiand_with_mysql_in_cluster: running cantiand with mysql connector in cluster mode.
+ -N node id, value is [0, 1]
+ -O only install database program, and omit other optional parameters
+ -D location of the database cluster storage area
+ -g run install script without root privilege,
+ but you must have permission of installation folder
+ note: use \"-g withoutroot\" exactly
+ -Z configure the database cluster config, eg: -Z "LSNR_PORT=1611",
+ for more detail information see documentation.
+ -C configure the database cluster cms config, eg: -C "GCC_HOME=/dev/cms-disk1",
+ for more detail information see documentation.
+ -G configure the database cluster gss config, eg: -G "STORAGE_MODE=CLUSTER_RAID",
+ for more detail information see documentation.
+ -W configure the database ip white list, eg: -W "127.0.0.1".
+ -c not use SSL-based secure connections
+ -s using gss as storage, default using file system
+ -l specify the cantiand install log file path and name,
+ if not, use the default
+ disable it(not recommended)
+ -P Compatibility parameter, which does not take effect.
+ -f specify a customized create database sql file.
+ if not, use default create database sql file.
+ -d install inside docker container
+ -p if ignore pkg version check
+ -m assign the config file of mysql
+ --help show this help, then exit
+
+ If all the optional parameters are not specified, -O option will be used.
"""
print(usage.__doc__)
@@ -523,19 +599,22 @@ def check_user(user, group):
print("Parameter input error: -U, the user does not match the group.")
sys.exit(1)
elif user == "root" or user_.pw_uid == 0:
- print("Parameter input error: -U, can not install program to"
- " root user.")
+ print("Parameter input error: -U, can not install program to" " root user.")
sys.exit(1)
elif group == "root" or user_.pw_gid == 0:
- print("Parameter input error: -U, can not install program to"
- " user with root privilege.")
+ print(
+ "Parameter input error: -U, can not install program to"
+ " user with root privilege."
+ )
sys.exit(1)
runner_uid = os.getuid()
if runner_uid != 0 and runner_uid != user_.pw_uid:
runner = pwd.getpwuid(runner_uid).pw_name
- print("Parameter input error: -U, has to be the same as the"
- " executor [%s]" % runner)
+ print(
+ "Parameter input error: -U, has to be the same as the"
+ " executor [%s]" % runner
+ )
sys.exit(1)
@@ -564,8 +643,11 @@ def parse_parameter():
try:
# Parameters are passed into argv. After parsing, they are stored
# in opts as binary tuples. Unresolved parameters are stored in args.
- opts, args = getopt.getopt(sys.argv[1:],
- "U:R:M:N:OD:Z:C:G:W:cg:sdl:Ppf:m:S:r", ["help", "dbstor", "linktype="])
+ opts, args = getopt.getopt(
+ sys.argv[1:],
+ "U:R:M:N:OD:Z:C:G:W:cg:sdl:Ppf:m:S:r",
+ ["help", "dbstor", "linktype="],
+ )
if args:
print("Parameter input error: " + str(args[0]))
exit(1)
@@ -643,7 +725,7 @@ def is_mlnx():
log("exec ofed_info cmd failed")
return False
- if 'MLNX_OFED_LINUX-5.5' in stdout:
+ if "MLNX_OFED_LINUX-5.5" in stdout:
log("Is mlnx 5.5")
return True
@@ -685,8 +767,10 @@ def check_parameter():
"""
if g_opts.install_user_privilege != "withoutroot":
if os.getuid():
- print("Error: User has no root privilege, do install,"
- " need specify parameter '-g withoutroot'.")
+ print(
+ "Error: User has no root privilege, do install,"
+ " need specify parameter '-g withoutroot'."
+ )
sys.exit(1)
# Check the log path.
check_log_path()
@@ -706,17 +790,30 @@ def check_parameter():
print(str(ex))
sys.exit(1)
# Check running mode
- if len(g_opts.running_mode) == 0 or g_opts.running_mode.lower() not in VALID_RUNNING_MODE:
+ if (
+ len(g_opts.running_mode) == 0
+ or g_opts.running_mode.lower() not in VALID_RUNNING_MODE
+ ):
print("Invalid running mode: " + g_opts.running_mode)
sys.exit(1)
- if g_opts.node_id not in [0 ,1]:
+ if g_opts.node_id not in [0, 1]:
print("Invalid node id: " + g_opts.node_id)
sys.exit(1)
- if g_opts.running_mode.lower() in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST] and g_opts.node_id == 1:
- print("Invalid node id: " + g_opts.node_id + ", this node id can only run in cluster mode")
+ if (
+ g_opts.running_mode.lower()
+ in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST]
+ and g_opts.node_id == 1
+ ):
+ print(
+ "Invalid node id: "
+ + g_opts.node_id
+ + ", this node id can only run in cluster mode"
+ )
sys.exit(1)
# Check docker option
- if (g_opts.in_container ^ os.path.exists("/.dockerenv")) and (g_opts.cantian_in_container != "1"):
+ if (g_opts.in_container ^ os.path.exists("/.dockerenv")) and (
+ g_opts.cantian_in_container != "1"
+ ):
print("Wrong docker container env option of -d")
sys.exit(1)
@@ -749,12 +846,16 @@ def check_log_path():
if not os.path.isdir(dir_path):
g_opts.log_file = ""
- print("Specified log path: \"%s\" does not exist, "
- "choose the default path instead." % dir_path)
+ print(
+ 'Specified log path: "%s" does not exist, '
+ "choose the default path instead." % dir_path
+ )
elif not base_name:
g_opts.log_file = ""
- print("Log file does not been specified, "
- "choose the default logfile instead.")
+ print(
+ "Log file does not been specified, "
+ "choose the default logfile instead."
+ )
def use_default_log_path():
@@ -770,8 +871,9 @@ def use_default_log_path():
if not os.path.exists(os.path.realpath(stdout)):
print("Cant get the home path of current user.")
sys.exit(1)
- g_opts.log_file = os.path.join(os.path.realpath(os.path.normpath(stdout)),
- "cantiandinstall.log")
+ g_opts.log_file = os.path.join(
+ os.path.realpath(os.path.normpath(stdout)), "cantiandinstall.log"
+ )
def log(msg, is_screen=False):
@@ -796,8 +898,10 @@ def logExit(msg):
"""
log("Error: " + msg)
print("Error: " + msg)
- print("Please refer to install log \"%s\" for more detailed information."
- % g_opts.log_file)
+ print(
+ 'Please refer to install log "%s" for more detailed information.'
+ % g_opts.log_file
+ )
sys.exit(1)
@@ -805,32 +909,46 @@ class Platform(object):
"""
get dist name/version/id from /etc/*release
"""
+
def __init__(self):
pass
- SUPPORTED_DISTS = ('suse', 'debian', 'fedora', 'redhat', 'centos',
- 'mandrake', 'mandriva', 'rocks', 'slackware',
- 'yellowdog', 'gentoo', 'unitedlinux',
- 'turbolinux', 'arch', 'mageia', 'openeuler',
- 'neokylin', 'euleros', 'kylin')
- UNIXCONFDIR = '/etc'
+ SUPPORTED_DISTS = (
+ "suse",
+ "debian",
+ "fedora",
+ "redhat",
+ "centos",
+ "mandrake",
+ "mandriva",
+ "rocks",
+ "slackware",
+ "yellowdog",
+ "gentoo",
+ "unitedlinux",
+ "turbolinux",
+ "arch",
+ "mageia",
+ "openeuler",
+ "neokylin",
+ "euleros",
+ "kylin",
+ )
+ UNIXCONFDIR = "/etc"
@staticmethod
- def _parse_release_file(firstline, version='', dst_id=''):
+ def _parse_release_file(firstline, version="", dst_id=""):
"""
function: parse first line of /etc/*release
input: string
output: tuple(string, string, string)
"""
- lsb_release_version_re = r'(.+) release ([\d.]+)[^(]*(?:\((.+)\))?'
- release_version_re = (r'([^0-9]+)(?: release )?([\d.]+)[^(]*'
- r'(?:\((.+)\))?')
+ lsb_release_version_re = r"(.+) release ([\d.]+)[^(]*(?:\((.+)\))?"
+ release_version_re = r"([^0-9]+)(?: release )?([\d.]+)[^(]*" r"(?:\((.+)\))?"
try:
- lsb_release_version = re.compile(lsb_release_version_re,
- re.ASCII)
- release_version = re.compile(release_version_re,
- re.ASCII)
+ lsb_release_version = re.compile(lsb_release_version_re, re.ASCII)
+ release_version = re.compile(release_version_re, re.ASCII)
except AttributeError:
lsb_release_version = re.compile(lsb_release_version_re)
release_version = re.compile(release_version_re)
@@ -853,7 +971,7 @@ class Platform(object):
if len(line_list) > 1:
dst_id = line_list[1]
- return '', version, dst_id
+ return "", version, dst_id
@staticmethod
def dist():
@@ -872,9 +990,9 @@ class Platform(object):
return "", "", ""
try:
- release_re = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
+ release_re = re.compile(r"(\w+)[-_](release|version)", re.ASCII)
except AttributeError:
- release_re = re.compile(r'(\w+)[-_](release|version)')
+ release_re = re.compile(r"(\w+)[-_](release|version)")
for etc_file in etc:
m = release_re.match(etc_file)
@@ -886,14 +1004,16 @@ class Platform(object):
# read the first line
try:
etc_file_name = os.path.join(Platform.UNIXCONFDIR, etc_file)
- with open(etc_file_name, 'r') as f:
+ with open(etc_file_name, "r") as f:
firstline = f.readline()
except Exception:
continue
# when euler, has centos-release
- if (_distname.lower() == "centos" and
- _distname.lower() not in firstline.lower()):
+ if (
+ _distname.lower() == "centos"
+ and _distname.lower() not in firstline.lower()
+ ):
continue
if _distname.lower() in Platform.SUPPORTED_DISTS:
@@ -934,8 +1054,7 @@ class ParameterContainer(object):
index = self.ifiles.index(value)
para_index = 0
for _ in range(index + 1):
- para_index = self.parameters.index(self.IFILE,
- para_index + 1)
+ para_index = self.parameters.index(self.IFILE, para_index + 1)
self.parameters.pop(para_index)
self.ifiles.pop(index)
self.parameters.append(key)
@@ -1027,17 +1146,21 @@ class SslCertConstructor(object):
status, stdout, stderr = _exec_popen(cmd, values)
except Exception as error:
err_msg = str(error).replace(self.passwd, "*")
- raise OSError("Failed to generate {}."
- " Error: {}".format(cert_name, err_msg))
+ raise OSError(
+ "Failed to generate {}." " Error: {}".format(cert_name, err_msg)
+ )
output = stdout + stderr
output = output.replace(self.passwd, "*")
if status:
- raise OSError("Failed to generate {}."
- " Error: {}".format(cert_name, output))
+ raise OSError(
+ "Failed to generate {}." " Error: {}".format(cert_name, output)
+ )
if not self.check_certificate_files_exist([cert_name]):
- raise OSError("Failed to generate {}. The file does not exist now."
- " Error: {}".format(cert_name, output))
+ raise OSError(
+ "Failed to generate {}. The file does not exist now."
+ " Error: {}".format(cert_name, output)
+ )
# Verify key files.
if cert_name.endswith(".key") or cert_name == "cakey.pem":
@@ -1045,8 +1168,9 @@ class SslCertConstructor(object):
try:
self.verify_ssl_key(path)
except (OSError, ValueError) as error:
- raise OSError("Failed to verify {}."
- " Error: {}".format(cert_name, error))
+ raise OSError(
+ "Failed to verify {}." " Error: {}".format(cert_name, error)
+ )
def __generate_cert_file(self, cmd, cert_name):
"""Generate cert file."""
@@ -1062,8 +1186,7 @@ class SslCertConstructor(object):
def check_certificate_files_exist(self, cert_names):
"""Check whether the certificate file is generated."""
- log("Check whether the certificate files %s are generated."
- % cert_names)
+ log("Check whether the certificate files %s are generated." % cert_names)
for cert_name in cert_names:
cert_path = os.path.join(self.keys_path, cert_name)
if not os.path.exists(cert_path):
@@ -1097,15 +1220,19 @@ class SslCertConstructor(object):
log("Generate ca keys.")
# cakey.pem
- cmd = ('openssl genrsa -aes256 -f4 -passout stdin'
- ' -out {0}/cakey.pem 2048'.format(self.keys_path))
+ cmd = (
+ "openssl genrsa -aes256 -f4 -passout stdin"
+ " -out {0}/cakey.pem 2048".format(self.keys_path)
+ )
self.__generate_cert_file(cmd, "cakey.pem")
# cacert.pem
- cmd = ('openssl req -new -x509 -passin stdin -days 10950'
- ' -key {0}/cakey.pem -out {0}/cacert.pem'
- ' -subj "/C=CN/ST=NULL/L=NULL/O=NULL/OU=NULL/'
- 'CN=CA"'.format(self.keys_path))
+ cmd = (
+ "openssl req -new -x509 -passin stdin -days 10950"
+ " -key {0}/cakey.pem -out {0}/cacert.pem"
+ ' -subj "/C=CN/ST=NULL/L=NULL/O=NULL/OU=NULL/'
+ 'CN=CA"'.format(self.keys_path)
+ )
self.__generate_cert_file(cmd, "cacert.pem")
def _generate_cert(self, role):
@@ -1117,23 +1244,28 @@ class SslCertConstructor(object):
log("Generate %s keys." % role)
# key
- cmd = ("openssl genrsa -aes256 -passout stdin -out {0}/{1}.key"
- " 2048".format(self.keys_path, role))
+ cmd = "openssl genrsa -aes256 -passout stdin -out {0}/{1}.key" " 2048".format(
+ self.keys_path, role
+ )
cert_name = "{}.key".format(role)
self.__generate_cert_file(cmd, cert_name)
# csr
- cmd = ('openssl req -new -key {0}/{1}.key -passin stdin -out '
- '{0}/{1}.csr -subj "/C=CN/ST=NULL/L=NULL/O=NULL/OU=NULL/'
- 'CN={1}"'.format(self.keys_path, role))
+ cmd = (
+ "openssl req -new -key {0}/{1}.key -passin stdin -out "
+ '{0}/{1}.csr -subj "/C=CN/ST=NULL/L=NULL/O=NULL/OU=NULL/'
+ 'CN={1}"'.format(self.keys_path, role)
+ )
cert_name = "{}.csr".format(role)
self.__generate_cert_file(cmd, cert_name)
# crt
- cmd = ('openssl x509 -req -days 10950 -in {0}/{1}.csr'
- ' -CA {0}/cacert.pem -CAkey {0}/cakey.pem -passin stdin'
- ' -CAserial {0}/cacert.srl -CAcreateserial -out {0}/{1}.crt'
- ' -extfile {0}/openssl.cnf'.format(self.keys_path, role))
+ cmd = (
+ "openssl x509 -req -days 10950 -in {0}/{1}.csr"
+ " -CA {0}/cacert.pem -CAkey {0}/cakey.pem -passin stdin"
+ " -CAserial {0}/cacert.srl -CAcreateserial -out {0}/{1}.crt"
+ " -extfile {0}/openssl.cnf".format(self.keys_path, role)
+ )
cert_name = "{}.crt".format(role)
self.__generate_cert_file(cmd, cert_name)
@@ -1143,22 +1275,19 @@ class SslCertConstructor(object):
def _check_all_keys_exists(self):
"""All keys must be exists."""
- keys = ["cacert.pem", "server.crt", "server.key",
- "client.crt", "client.key"]
+ keys = ["cacert.pem", "server.crt", "server.key", "client.crt", "client.key"]
keys_ = set(keys)
files = set([_ for _ in os.listdir(self.keys_path)])
lack_keys = keys_ - files
if lack_keys:
- raise Exception("Failed to generate keys: %s"
- % " ".join(lack_keys))
+ raise Exception("Failed to generate keys: %s" % " ".join(lack_keys))
def _clean_useless_path(self):
"""
Clean useless dirs and files, chmod the target files
:return: NA
"""
- keys = ["cacert.pem", "server.crt", "server.key",
- "client.crt", "client.key"]
+ keys = ["cacert.pem", "server.crt", "server.key", "client.crt", "client.key"]
for filename in os.listdir(self.keys_path):
if filename in [os.curdir, os.pardir]:
continue
@@ -1207,13 +1336,15 @@ class SslCertConstructor(object):
except Exception as ssl_err:
self.clean_all()
err_msg = str(ssl_err).replace(self.passwd, "*")
- raise Exception("Failed to generate ssl certificate. Error: %s"
- % err_msg)
+ raise Exception("Failed to generate ssl certificate. Error: %s" % err_msg)
log("Complete to generate ssl certificate.")
def skip_execute_in_node_1():
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER] and g_opts.node_id == 1:
+ if (
+ g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]
+ and g_opts.node_id == 1
+ ):
return True
return False
@@ -1223,6 +1354,7 @@ def skip_execute_in_slave_cluster():
return True
return False
+
def create_dir_if_needed(condition, dir):
if condition:
return
@@ -1231,14 +1363,16 @@ def create_dir_if_needed(condition, dir):
def check_command(cmd):
try:
- output = subprocess.check_output(['/usr/bin/which', cmd], stderr=subprocess.STDOUT)
+ output = subprocess.check_output(
+ ["/usr/bin/which", cmd], stderr=subprocess.STDOUT
+ )
return True
except subprocess.CalledProcessError as e:
return False
class Installer:
- """ This is Cantiand installer. """
+ """This is Cantiand installer."""
# Defining a constant identifies which step the installer failed to take.
# For roll back.
@@ -1277,7 +1411,7 @@ class Installer:
IPV_TYPE = "ipv4"
def __init__(self, user, group):
- """ Constructor for the Installer class. """
+ """Constructor for the Installer class."""
log("Begin init...")
log("Installer runs on python version : " + gPyVersion)
@@ -1343,7 +1477,7 @@ class Installer:
self.repl_scram_auth = True
# ENABLE_ACCESS_DC
self.enable_access_dc = False
-
+
# replace_password_verify
self.replace_password_verify = True
@@ -1357,11 +1491,11 @@ class Installer:
self.factor_key = ""
self.os_type = platform.machine()
- self.have_numactl = check_command('numactl')
+ self.have_numactl = check_command("numactl")
self.numactl_str = ""
- if self.os_type == 'aarch64' and self.have_numactl == True:
+ if self.os_type == "aarch64" and self.have_numactl == True:
last_cpu_core = os.cpu_count() - 1
- self.numactl_str = "numactl -C 0-1,6-11,16-" + str(last_cpu_core) + " "
+ self.numactl_str = "numactl -C 0-1,6-11,16-" + str(last_cpu_core) + " "
log("End init")
@@ -1374,24 +1508,24 @@ class Installer:
find_files.append(file_name)
if not find_files:
- return ''
+ return ""
if len(find_files) > 1:
- raise Exception("More than one target found in %s: %s\n"
- "Please remove the unused files."
- % (path, ' ;'.join(find_files)))
+ raise Exception(
+ "More than one target found in %s: %s\n"
+ "Please remove the unused files." % (path, " ;".join(find_files))
+ )
# file exists, return absolute file name
file_name = os.path.realpath(os.path.join(path, find_files[0]))
if not os.path.isfile(file_name):
- raise Exception("%s is not file, please check your package."
- % file_name)
+ raise Exception("%s is not file, please check your package." % file_name)
return file_name
def get_decompress_tarname(self, tar_file):
- '''
+ """
decompress a.tar.gz, then get file name
:return:
- '''
+ """
# get real directory name in tar file
tars = tarfile.open(tar_file)
basename = tars.getnames()[0]
@@ -1409,19 +1543,21 @@ class Installer:
self.dirName = os.path.dirname(installFile)
# get run.tar.gz package
- run_pattern = ("^(%s|%s)-[A-Z0-9]+-64bit.tar.gz$"
- % (self.RUN_VERSION_A, self.RUN_VERSION_B))
+ run_pattern = "^(%s|%s)-[A-Z0-9]+-64bit.tar.gz$" % (
+ self.RUN_VERSION_A,
+ self.RUN_VERSION_B,
+ )
self.runFile = self.find_file(self.dirName, run_pattern)
if not self.runFile:
- raise Exception("Can not get correct run package in path %s"
- % self.dirName)
+ raise Exception("Can not get correct run package in path %s" % self.dirName)
# get run.sha256 file
- sha256_pattern = ("^(%s|%s)-[A-Z0-9]+-64bit.sha256$"
- % (self.RUN_VERSION_A, self.RUN_VERSION_B))
+ sha256_pattern = "^(%s|%s)-[A-Z0-9]+-64bit.sha256$" % (
+ self.RUN_VERSION_A,
+ self.RUN_VERSION_B,
+ )
self.runSha256File = self.find_file(self.dirName, sha256_pattern)
if not self.runSha256File:
- raise Exception("Can not get correct sha256 file in path %s"
- % self.dirName)
+ raise Exception("Can not get correct sha256 file in path %s" % self.dirName)
# get run file name without suffix
# compress package name is run.tar.gz,
# decompress is run, remove .tar.gz
@@ -1430,20 +1566,20 @@ class Installer:
log("Using run file as : %s" % self.runFile)
def is_readable(self, file_name, user):
- '''
+ """
:param path:
:param user:
:return:
- '''
+ """
user_info = pwd.getpwnam(user)
uid = user_info.pw_uid
gid = user_info.pw_gid
s = os.stat(file_name)
mode = s[stat.ST_MODE]
return (
- ((s[stat.ST_UID] == uid) and (mode & stat.S_IRUSR > 0)) or
- ((s[stat.ST_GID] == gid) and (mode & stat.S_IRGRP > 0)) or
- (mode & stat.S_IROTH > 0)
+ ((s[stat.ST_UID] == uid) and (mode & stat.S_IRUSR > 0))
+ or ((s[stat.ST_GID] == gid) and (mode & stat.S_IRGRP > 0))
+ or (mode & stat.S_IROTH > 0)
)
def clean_dir(self, dir_path):
@@ -1464,35 +1600,37 @@ class Installer:
pass
def checkCreatedbFile(self):
- '''
+ """
check it is a file; user has read permission,
:return:
- '''
+ """
# check -f parameter
if self.option != self.INS_ALL:
- raise Exception("Error: -f parameter should be used without"
- " -O parameter ")
+ raise Exception(
+ "Error: -f parameter should be used without" " -O parameter "
+ )
# check it is a file
if not os.path.isfile(self.create_db_file):
- raise Exception("Error: %s does not exists or is not a file"
- " or permission is not right."
- % self.create_db_file)
+ raise Exception(
+ "Error: %s does not exists or is not a file"
+ " or permission is not right." % self.create_db_file
+ )
if not checkPath(self.create_db_file):
- raise Exception("Error: %s file path invalid: "
- % self.create_db_file)
+ raise Exception("Error: %s file path invalid: " % self.create_db_file)
# if execute user is root, check common user has read permission
file_path = os.path.dirname(self.create_db_file)
# check path of create db sql file that user can cd
permission_ok, _ = self.checkPermission(file_path, True)
if not permission_ok:
- raise Exception("Error: %s can not access %s"
- % (self.user, file_path))
+ raise Exception("Error: %s can not access %s" % (self.user, file_path))
# check create db file is readable for user
if not self.is_readable(self.create_db_file, self.user):
- raise Exception("Error: %s is not readable for user %s"
- % (self.create_db_file, self.user))
+ raise Exception(
+ "Error: %s is not readable for user %s"
+ % (self.create_db_file, self.user)
+ )
# change file to a realpath file
self.create_db_file = os.path.realpath(self.create_db_file)
@@ -1501,17 +1639,35 @@ class Installer:
Parse cantiand, cms, gss default config
:return: cantiand config, cms config, gss config
"""
- if g_opts.running_mode in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST]:
- self.cantiandConfigs, self.cmsConfigs, self.gssConfigs = SingleNodeConfig.get_config(g_opts.in_container)
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER] and g_opts.node_id == 0:
- self.cantiandConfigs, self.cmsConfigs, self.gssConfigs = ClusterNode0Config.get_config(g_opts.in_container)
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER] and g_opts.node_id == 1:
- self.cantiandConfigs, self.cmsConfigs, self.gssConfigs = ClusterNode1Config.get_config(g_opts.in_container)
+ if g_opts.running_mode in [
+ CANTIAND,
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ ]:
+ self.cantiandConfigs, self.cmsConfigs, self.gssConfigs = (
+ SingleNodeConfig.get_config(g_opts.in_container)
+ )
+ if (
+ g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]
+ and g_opts.node_id == 0
+ ):
+ self.cantiandConfigs, self.cmsConfigs, self.gssConfigs = (
+ ClusterNode0Config.get_config(g_opts.in_container)
+ )
+ if (
+ g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]
+ and g_opts.node_id == 1
+ ):
+ self.cantiandConfigs, self.cmsConfigs, self.gssConfigs = (
+ ClusterNode1Config.get_config(g_opts.in_container)
+ )
def addConfigForGss(self):
- self.cantiandConfigs["CONTROL_FILES"] = "{0}, {1}, {2}".format(os.path.join(self.data, "data/ctrl1"),
- os.path.join(self.data, "data/ctrl2"),
- os.path.join(self.data, "data/ctrl3"))
+ self.cantiandConfigs["CONTROL_FILES"] = "{0}, {1}, {2}".format(
+ os.path.join(self.data, "data/ctrl1"),
+ os.path.join(self.data, "data/ctrl2"),
+ os.path.join(self.data, "data/ctrl3"),
+ )
self.gssConfigs["INTERCONNECT_ADDR"] = self.cantiandConfigs["INTERCONNECT_ADDR"]
self.gssConfigs["LOG_HOME"] = self.cantiandConfigs["LOG_HOME"]
del self.gssConfigs["STORAGE_MODE"]
@@ -1519,9 +1675,11 @@ class Installer:
self.cmsConfigs["GCC_HOME"] = os.path.join(self.data, "gcc_home")
self.cmsConfigs["GCC_TYPE"] = "FILE"
self.cmsConfigs["CMS_LOG"] = self.cantiandConfigs["LOG_HOME"]
-
+
if g_opts.use_gss:
- self.cantiandConfigs["CONTROL_FILES"] = "(+vg1/ctrl1, +vg1/ctrl2, +vg1/ctrl3)"
+ self.cantiandConfigs["CONTROL_FILES"] = (
+ "(+vg1/ctrl1, +vg1/ctrl2, +vg1/ctrl3)"
+ )
self.gssConfigs["STORAGE_MODE"] = "CLUSTER_RAID"
self.cmsConfigs["GCC_HOME"] = "/dev/gcc-disk"
self.cmsConfigs["GCC_TYPE"] = "SD"
@@ -1556,7 +1714,7 @@ class Installer:
# Check user
if not self.user:
logExit("Parameter input error, need -U parameter.")
- os.environ['cantiand_user'] = str(self.user)
+ os.environ["cantiand_user"] = str(self.user)
# User must be exist.
strCmd = "id -u ${cantiand_user}"
ret_code, _, _ = _exec_popen(strCmd)
@@ -1584,10 +1742,14 @@ class Installer:
self.cantiandConfigs["LOG_HOME"] = os.path.join(self.data, "log")
if len(self.cantiandConfigs["SHARED_PATH"]) == 0:
self.cantiandConfigs["SHARED_PATH"] = os.path.join(self.data, "data")
- self.cantiandConfigs["CT_CLUSTER_STRICT_CHECK"] = self.cantiandConfigs["CT_CLUSTER_STRICT_CHECK"].upper()
+ self.cantiandConfigs["CT_CLUSTER_STRICT_CHECK"] = self.cantiandConfigs[
+ "CT_CLUSTER_STRICT_CHECK"
+ ].upper()
if self.cantiandConfigs["CT_CLUSTER_STRICT_CHECK"] not in ["TRUE", "FALSE"]:
self.cantiandConfigs["CT_CLUSTER_STRICT_CHECK"] = "TRUE"
- self.cantiandConfigs["MYSQL_DEPLOY_GROUP_ID"] = grp.getgrnam(g_opts.os_group).gr_gid
+ self.cantiandConfigs["MYSQL_DEPLOY_GROUP_ID"] = grp.getgrnam(
+ g_opts.os_group
+ ).gr_gid
self.addConfigForGss()
self.gcc_home = self.cmsConfigs["GCC_HOME"]
self.ssl_path = os.path.join(self.installPath, "sslkeys")
@@ -1633,30 +1795,56 @@ class Installer:
self.option = self.INS_PROGRAM
# Get the kernel parameter
elif key == "-Z":
- _value = value.strip().split('=')
+ _value = value.strip().split("=")
if len(_value) != 2:
- log("Warning: kernel parameter will not take effect reason is invalid parameter: " + value, True)
+ log(
+ "Warning: kernel parameter will not take effect reason is invalid parameter: "
+ + value,
+ True,
+ )
continue
self.cantiandConfigs[_value[0].strip().upper()] = _value[1].strip()
elif key == "-C":
- _value = value.strip().split('=')
+ _value = value.strip().split("=")
if len(_value) != 2:
- log("Warning: cms parameter will not take effect reason is invalid parameter: " + value, True)
+ log(
+ "Warning: cms parameter will not take effect reason is invalid parameter: "
+ + value,
+ True,
+ )
continue
self.cmsConfigs[_value[0].strip().upper()] = _value[1].strip()
elif key == "-G":
- _value = value.strip().split('=')
+ _value = value.strip().split("=")
if len(_value) != 2:
- log("Warning: gss parameter will not take effect reason is invalid parameter: " + value, True)
+ log(
+ "Warning: gss parameter will not take effect reason is invalid parameter: "
+ + value,
+ True,
+ )
continue
self.gssConfigs[_value[0].strip().upper()] = _value[1].strip()
elif key == "-P":
pass # Compatibility parameter
- elif key in ["-g", "-l", "-U", "-M", "-W", "-s", "-N", "-d", "-p", "-m", "-r", "--dbstor", "--linktype"]:
+ elif key in [
+ "-g",
+ "-l",
+ "-U",
+ "-M",
+ "-W",
+ "-s",
+ "-N",
+ "-d",
+ "-p",
+ "-m",
+ "-r",
+ "--dbstor",
+ "--linktype",
+ ]:
pass
elif key == "-f":
self.create_db_file = value.strip()
- elif key == '-c':
+ elif key == "-c":
self.close_ssl = True
else:
logExit("Parameter input error: %s." % value)
@@ -1671,9 +1859,8 @@ class Installer:
log("Checking runner.", True)
gid = os.getgid()
uid = os.getuid()
- log("Check runner user id and group id is : %s, %s"
- % (str(uid), str(gid)))
- if(gid != 0 and uid != 0):
+ log("Check runner user id and group id is : %s, %s" % (str(uid), str(gid)))
+ if gid != 0 and uid != 0:
logExit("Only user with root privilege can run this script")
log("End check runner is root")
@@ -1690,19 +1877,32 @@ class Installer:
os.chown(g_opts.log_file, uid, gid)
except Exception as ex:
logExit("Can not change log file's owner. Output:%s" % str(ex))
-
+
def chownDataDir(self):
"""
chown data and gcc dirs
:return:
"""
- cmd = "chown %s:%s -R \"%s\" \"%s\";" % (self.user, self.group, self.data, self.gcc_home)
+ cmd = 'chown %s:%s -R "%s" "%s";' % (
+ self.user,
+ self.group,
+ self.data,
+ self.gcc_home,
+ )
if g_opts.in_container:
- cmd += "chown %s:%s -R \"%s\" \"%s\";" % (self.user, self.group, CommonValue.DOCKER_DATA_DIR, CommonValue.DOCKER_GCC_DIR)
+ cmd += 'chown %s:%s -R "%s" "%s";' % (
+ self.user,
+ self.group,
+ CommonValue.DOCKER_DATA_DIR,
+ CommonValue.DOCKER_GCC_DIR,
+ )
log("Change owner cmd: %s" % cmd)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("chown to %s:%s return: %s%s%s" % (self.user, self.group, str(ret_code), os.linesep, stderr))
+ raise Exception(
+ "chown to %s:%s return: %s%s%s"
+ % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ )
###########################################################################
# Is there a database installed by the user? If right, raise error
@@ -1718,7 +1918,7 @@ class Installer:
log("Checking old install.", True)
# Check $CTDB_HOME.
- if(g_opts.install_user_privilege == "withoutroot"):
+ if g_opts.install_user_privilege == "withoutroot":
strCmd = "echo ~"
else:
strCmd = "su - '%s' -c \"echo ~\"" % self.user
@@ -1727,7 +1927,7 @@ class Installer:
logExit("Can not get user home.")
# Get the profile of user.
output = os.path.realpath(os.path.normpath(stdout))
- if (not checkPath(output)):
+ if not checkPath(output):
logExit("The user home directory is invalid.")
self.userProfile = os.path.join(output, ".bashrc")
self.userHomePath = output
@@ -1750,15 +1950,18 @@ class Installer:
def dealwithCTDB(self, isFind, _file):
while True:
strLine = _file.readline()
- if (not strLine):
+ if not strLine:
break
strLine = strLine.strip()
- if (strLine.startswith("#")):
+ if strLine.startswith("#"):
continue
user_info = strLine.split()
self.dealwithCTDB_DATA(user_info, strLine)
- if (len(user_info) >= 2 and user_info[0] == "export"
- and user_info[1].startswith("CTDB_HOME=") > 0):
+ if (
+ len(user_info) >= 2
+ and user_info[0] == "export"
+ and user_info[1].startswith("CTDB_HOME=") > 0
+ ):
isFind = True
break
else:
@@ -1767,9 +1970,12 @@ class Installer:
def dealwithCTDB_DATA(self, user_info, strLine):
# deal with the CTDB_DATA with """
- if (len(user_info) >= 2 and user_info[0] == "export"
- and user_info[1].startswith('CTDB_DATA="') > 0):
- self.oldDataPath = strLine[strLine.find("=") + 2:-1]
+ if (
+ len(user_info) >= 2
+ and user_info[0] == "export"
+ and user_info[1].startswith('CTDB_DATA="') > 0
+ ):
+ self.oldDataPath = strLine[strLine.find("=") + 2 : -1]
self.oldDataPath = os.path.normpath(self.oldDataPath)
realPath = os.path.realpath(self.oldDataPath)
if not checkPath(realPath):
@@ -1778,12 +1984,15 @@ class Installer:
if self.option == self.INS_ALL and self.oldDataPath != self.data:
logExit("User CTDB_DATA is different from -D parameter value")
# deal with the CTDB_DATA path without """
- elif (len(user_info) >= 2 and user_info[0] == "export"
- and user_info[1].startswith("CTDB_DATA=") > 0):
- self.oldDataPath = strLine[strLine.find("=") + 1:]
+ elif (
+ len(user_info) >= 2
+ and user_info[0] == "export"
+ and user_info[1].startswith("CTDB_DATA=") > 0
+ ):
+ self.oldDataPath = strLine[strLine.find("=") + 1 :]
self.oldDataPath = os.path.normpath(self.oldDataPath)
realPath = os.path.realpath(self.oldDataPath)
- if (not checkPath(realPath)):
+ if not checkPath(realPath):
logExit("The Path specified by CTDB_DATA is invalid.")
log("Old data path: " + self.oldDataPath)
if self.option == self.INS_ALL and self.oldDataPath != self.data:
@@ -1815,10 +2024,10 @@ class Installer:
"""
log("Preparing path [%s]." % onePath)
ownerPath = onePath
- if(os.path.exists(onePath)):
- if(checkEmpty):
+ if os.path.exists(onePath):
+ if checkEmpty:
fileList = os.listdir(onePath)
- if(len(fileList) != 0):
+ if len(fileList) != 0:
logExit("Database path %s should be empty." % onePath)
else:
while True:
@@ -1840,8 +2049,7 @@ class Installer:
# will cause an error
if ownerPath != onePath:
cmd = "chown -R %s:%s %s; " % (self.user, self.group, ownerPath)
- cmd += "chmod -R %s %s" % (CommonValue.MAX_DIRECTORY_MODE,
- ownerPath)
+ cmd += "chmod -R %s %s" % (CommonValue.MAX_DIRECTORY_MODE, ownerPath)
else:
cmd = "chown %s:%s %s; " % (self.user, self.group, ownerPath)
cmd += "chmod %s %s" % (CommonValue.MAX_DIRECTORY_MODE, ownerPath)
@@ -1855,13 +2063,18 @@ class Installer:
log("check [%s] user permission" % onePath)
permission_ok, stderr = self.checkPermission(onePath)
if not permission_ok:
- logExit("Failed to check user [%s] path [%s] permission. Error: %s"
- % (self.user, onePath, stderr))
+ logExit(
+ "Failed to check user [%s] path [%s] permission. Error: %s"
+ % (self.user, onePath, stderr)
+ )
def checkMysqlDir(self, onePath):
log("Checking Mysql path [%s]." % onePath)
if not os.path.exists(onePath):
- logExit("Database path %s does not exist or can not accessed. Please check it." % onePath)
+ logExit(
+ "Database path %s does not exist or can not accessed. Please check it."
+ % onePath
+ )
def checkPermission(self, originalPath, check_enter_only=False):
"""
@@ -1892,11 +2105,18 @@ class Installer:
# check the user has write permission or not
testFile = os.path.join(originalPath, "touch.tst")
if g_opts.install_user_privilege == "withoutroot":
- cmd = ("touch %s && chmod %s %s "
- % (testFile, CommonValue.KEY_FILE_MODE, testFile))
+ cmd = "touch %s && chmod %s %s " % (
+ testFile,
+ CommonValue.KEY_FILE_MODE,
+ testFile,
+ )
else:
- cmd = ("su - '%s' -c 'touch %s && chmod %s %s' " %
- (self.user, testFile, CommonValue.KEY_FILE_MODE, testFile))
+ cmd = "su - '%s' -c 'touch %s && chmod %s %s' " % (
+ self.user,
+ testFile,
+ CommonValue.KEY_FILE_MODE,
+ testFile,
+ )
status, _, stderr = _exec_popen(cmd)
if status != 0:
@@ -1941,7 +2161,7 @@ class Installer:
log("Checking directory.", True)
# check if data or app path is too long(over 100 chars)
- if(len(self.data) >= 110 or len(self.installPath) >= 110):
+ if len(self.data) >= 110 or len(self.installPath) >= 110:
logExit("Install path or Data path is over 110 characters, exit.")
# check install path is empty or not.
self.prepareGivenPath(self.installPath)
@@ -1949,59 +2169,62 @@ class Installer:
if g_opts.slave_cluster:
# Allow the data dir not empty in slave_cluster.
self.prepareGivenPath(self.data, False)
- else:
+ else:
self.prepareGivenPath(self.data)
# check install path
vfs = os.statvfs(self.installPath)
- availableSize = vfs.f_bavail * vfs.f_bsize / (1024*1024)
- log("Database program install path available size: %sM"
- % str(availableSize))
+ availableSize = vfs.f_bavail * vfs.f_bsize / (1024 * 1024)
+ log("Database program install path available size: %sM" % str(availableSize))
if availableSize < 100:
- logExit("Database program install path available size smaller"
- " than 100M, current size is: %sM"
- % str(availableSize))
+ logExit(
+ "Database program install path available size smaller"
+ " than 100M, current size is: %sM" % str(availableSize)
+ )
# check data dir.
if self.option == self.INS_ALL:
# check partition of install path
- strCmd1 = "df -h \"%s\"" % self.installPath
- strCmd2 = "df -h \"%s\" | head -2" % self.installPath
- strCmd3 = "df -h \"%s\" | head -2 |tail -1" % self.installPath
- strCmd4 = ("df -h \"%s\" | head -2 |tail -1 | "
- "awk -F\" \" '{print $1}'" % self.installPath)
+ strCmd1 = 'df -h "%s"' % self.installPath
+ strCmd2 = 'df -h "%s" | head -2' % self.installPath
+ strCmd3 = 'df -h "%s" | head -2 |tail -1' % self.installPath
+ strCmd4 = (
+ 'df -h "%s" | head -2 |tail -1 | '
+ "awk -F\" \" '{print $1}'" % self.installPath
+ )
cmds = [strCmd1, strCmd2, strCmd3, strCmd4]
stdout = ""
stdout_list = []
for cmd in cmds:
ret_code, stdout, stderr = _exec_popen(strCmd1)
if ret_code:
- logExit("Can not get the partition of path: %s "
- "%scommand: %s. %sError: %s"
- % (self.installPath, os.linesep,
- cmd, os.linesep, stderr))
+ logExit(
+ "Can not get the partition of path: %s "
+ "%scommand: %s. %sError: %s"
+ % (self.installPath, os.linesep, cmd, os.linesep, stderr)
+ )
stdout_list.append(stdout)
- log("The partition of path \"%s\": %s"
- % (self.installPath, stdout))
+ log('The partition of path "%s": %s' % (self.installPath, stdout))
self.checkPartitionOfDataDir(strCmd1, stdout_list)
log("End check dir.")
def checkPartitionOfDataDir(self, strCmd1, stdout_list):
- strCmd5 = "df -h \"%s\"" % self.data
- strCmd6 = "df -h \"%s\" | head -2" % self.data
- strCmd7 = "df -h \"%s\" | head -2 |tail -1" % self.data
- strCmd8 = ("df -h \"%s\" | head -2 |tail -1 "
- "| awk -F\" \" '{print $1}'" % self.data)
+ strCmd5 = 'df -h "%s"' % self.data
+ strCmd6 = 'df -h "%s" | head -2' % self.data
+ strCmd7 = 'df -h "%s" | head -2 |tail -1' % self.data
+ strCmd8 = (
+ 'df -h "%s" | head -2 |tail -1 ' "| awk -F\" \" '{print $1}'" % self.data
+ )
cmds = [strCmd5, strCmd6, strCmd7, strCmd8]
stdout = ""
for cmd in cmds:
ret_code, stdout, stderr = _exec_popen(strCmd1)
if ret_code:
- logExit("Can not get the partition of path: %s "
- "%scommand: %s. %sError: %s"
- % (self.data, os.linesep,
- cmd, os.linesep, stderr))
- log("The partition of path \"%s\": %s"
- % (self.data, stdout))
+ logExit(
+ "Can not get the partition of path: %s "
+ "%scommand: %s. %sError: %s"
+ % (self.data, os.linesep, cmd, os.linesep, stderr)
+ )
+ log('The partition of path "%s": %s' % (self.data, stdout))
vfs = os.statvfs(self.data)
availableSize = vfs.f_bavail * vfs.f_bsize / (1024 * 1024)
@@ -2009,15 +2232,19 @@ class Installer:
# check install path and data dir are in the same path or not
if stdout_list[0] == stdout_list[1]:
- if (availableSize < 20580):
- logExit("The sum of database program and data directories"
- " available size smaller than 20580M, "
- "current size is: %sM" % str(availableSize))
+ if availableSize < 20580:
+ logExit(
+ "The sum of database program and data directories"
+ " available size smaller than 20580M, "
+ "current size is: %sM" % str(availableSize)
+ )
else:
- if (availableSize < 20480):
- logExit("Database data directory available size smaller"
- " than 20480M, current size is: "
- "%sM" % str(availableSize))
+ if availableSize < 20480:
+ logExit(
+ "Database data directory available size smaller"
+ " than 20480M, current size is: "
+ "%sM" % str(availableSize)
+ )
########################################################################
# Check if the port is used in the installation parameters, and exit
@@ -2054,21 +2281,31 @@ class Installer:
# 98: Address already in use
# 95: Operation not supported
# 13: Permission denied
- if (int(err.errno) == 98 or int(err.errno) == 95
- or int(err.errno) == 13):
- log("Error: port %s has been used,the detail"
- " information is as follows:" % value, True)
+ if (
+ int(err.errno) == 98
+ or int(err.errno) == 95
+ or int(err.errno) == 13
+ ):
+ log(
+ "Error: port %s has been used,the detail"
+ " information is as follows:" % value,
+ True,
+ )
strCmd = "netstat -unltp | grep %s" % value
ret_code, stdout, _ = _exec_popen(strCmd)
if ret_code:
- logExit("can not get detail information of the"
- " port, command: " + strCmd)
+ logExit(
+ "can not get detail information of the"
+ " port, command: " + strCmd
+ )
logExit(str(stdout))
except ValueError as ex:
logExit("check port failed: " + str(ex))
else:
- logExit("This install script can not support python version"
- " : " + gPyVersion)
+ logExit(
+ "This install script can not support python version"
+ " : " + gPyVersion
+ )
def checkInnerPort(self, value):
TIME_OUT = 2
@@ -2105,7 +2342,9 @@ class Installer:
socket.inet_pton(socket.AF_INET6, nodeIp)
self.IPV_TYPE = "ipv6"
except socket.error:
- logExit("The invalid IP address : %s is not ipv4 or ipv6 format." % nodeIp)
+ logExit(
+ "The invalid IP address : %s is not ipv4 or ipv6 format." % nodeIp
+ )
if self.IPV_TYPE == "ipv6":
ping_cmd = "ping6"
@@ -2116,9 +2355,11 @@ class Installer:
cmd = "%s %s -i 1 -c 3 | grep ttl | wc -l" % (ping_cmd, nodeIp)
ret_code, stdout, _ = _exec_popen(cmd)
- if ret_code or stdout != '3':
- logExit("The invalid IP address is %s. "
- "ret_code : %s, stdout : %s" % (nodeIp, ret_code, stdout))
+ if ret_code or stdout != "3":
+ logExit(
+ "The invalid IP address is %s. "
+ "ret_code : %s, stdout : %s" % (nodeIp, ret_code, stdout)
+ )
if all_zero_addr_after_ping(nodeIp):
ip_is_found = 1
@@ -2127,11 +2368,12 @@ class Installer:
ret_code, ip_is_found, _ = _exec_popen(ip_cmd)
else:
ip_is_found = 0
-
- if ret_code or not int(ip_is_found):
- logExit("The invalid IP address is %s. "
- "ret_code : %s, ip_is_found : %s" % (nodeIp, ret_code, ip_is_found))
+ if ret_code or not int(ip_is_found):
+ logExit(
+ "The invalid IP address is %s. "
+ "ret_code : %s, ip_is_found : %s" % (nodeIp, ret_code, ip_is_found)
+ )
log("checked the node IP address : %s" % nodeIp)
@@ -2140,39 +2382,45 @@ class Installer:
# not meet the requirements of the database。
#########################################################################
def set_numa_config(self):
- if not os.path.exists('/usr/bin/lscpu'):
+ if not os.path.exists("/usr/bin/lscpu"):
log("Warning: lscpu path get error")
return
-
- _, result, _ = _exec_popen('/usr/bin/lscpu')
+
+ _, result, _ = _exec_popen("/usr/bin/lscpu")
if "NUMA node(s)" not in result:
- err_code, ans, err_msg = _exec_popen('/usr/bin/lscpu | grep -i "On-line CPU(s) list"')
- _ans = ans.strip().split(':')
+ err_code, ans, err_msg = _exec_popen(
+ '/usr/bin/lscpu | grep -i "On-line CPU(s) list"'
+ )
+ _ans = ans.strip().split(":")
if len(_ans) != 2:
log("Warning: CPU(s) list get error, ans:%s" % ans)
return
self.cantiandConfigs["SHM_MYSQL_CPU_GROUP_INFO"] = _ans[1].strip() + " "
self.cantiandConfigs["SHM_CPU_GROUP_INFO"] = _ans[1].strip() + " "
return
-
- ret_code, result, stderr = _exec_popen('/usr/bin/lscpu | grep -i "NUMA node(s)"')
+
+ ret_code, result, stderr = _exec_popen(
+ '/usr/bin/lscpu | grep -i "NUMA node(s)"'
+ )
if ret_code:
logExit("can not get numa node parameters, err: %s" % stderr)
- _result = result.strip().split(':')
-
+ _result = result.strip().split(":")
+
if len(_result) != 2:
log("Warning: numa get error, result:%s" % result)
return
numa_num = 0
numa_info = ""
- #判断_result[1].strip()
+ # 判断_result[1].strip()
if not _result[1].strip().isdigit():
log("Warning: numa(s) size get error, result:%s" % result)
return
while numa_num < int(_result[1].strip()):
- err_code, ans, err_msg = _exec_popen('/usr/bin/lscpu | grep -i "NUMA node%s"' % numa_num)
- _ans = ans.strip().split(':')
+ err_code, ans, err_msg = _exec_popen(
+ '/usr/bin/lscpu | grep -i "NUMA node%s"' % numa_num
+ )
+ _ans = ans.strip().split(":")
if len(_ans) != 2:
log("Warning: numa node get error, ans:%s" % ans)
return
@@ -2190,16 +2438,17 @@ class Installer:
"""
log("Checking kernel parameters.", True)
# GB MB kB
- GB = 1024*1024*1024
- MB = 1024*1024
+ GB = 1024 * 1024 * 1024
+ MB = 1024 * 1024
KB = 1024
# The size of database
- log_buffer_size = 4*MB
- shared_pool_size = 128*MB
- data_buffer_size = 128*MB
- temp_buffer_size = 32*MB
- sga_buff_size = (log_buffer_size + shared_pool_size + data_buffer_size
- + temp_buffer_size)
+ log_buffer_size = 4 * MB
+ shared_pool_size = 128 * MB
+ data_buffer_size = 128 * MB
+ temp_buffer_size = 32 * MB
+ sga_buff_size = (
+ log_buffer_size + shared_pool_size + data_buffer_size + temp_buffer_size
+ )
# getNuma
self.set_numa_config()
@@ -2213,12 +2462,18 @@ class Installer:
check_kernel_parameter(key)
check_invalid_symbol(value)
# Unit conversion
- sga_buff_size = self.doUnitConversion(GB, MB, KB, key, value,
- sga_buff_size,
- temp_buffer_size,
- data_buffer_size,
- shared_pool_size,
- log_buffer_size)
+ sga_buff_size = self.doUnitConversion(
+ GB,
+ MB,
+ KB,
+ key,
+ value,
+ sga_buff_size,
+ temp_buffer_size,
+ data_buffer_size,
+ shared_pool_size,
+ log_buffer_size,
+ )
except ValueError as ex:
logExit("check kernel parameter failed: " + str(ex))
@@ -2227,8 +2482,9 @@ class Installer:
# Check the ip address
for item in _list:
if len(_list) != 1 and all_zero_addr_after_ping(item):
- logExit("lsnr_addr contains all-zero ip,"
- " can not specify other ip.")
+ logExit(
+ "lsnr_addr contains all-zero ip," " can not specify other ip."
+ )
self.checkIPisVaild(item)
else:
# If this parameter is empty, the IPv4 is used by default.
@@ -2253,18 +2509,23 @@ class Installer:
if ret_code:
logExit("can not get shmmax parameters, command: %s" % strCmd)
- cmd = ("cat /proc/meminfo |grep -wE 'MemFree:|Buffers:|Cached:"
- "|SwapCached' |awk '{sum += $2};END {print sum}'")
+ cmd = (
+ "cat /proc/meminfo |grep -wE 'MemFree:|Buffers:|Cached:"
+ "|SwapCached' |awk '{sum += $2};END {print sum}'"
+ )
ret_code, cur_avi_memory, strerr = _exec_popen(cmd)
if ret_code:
- logExit("can not get shmmax parameters, command: %s, err: %s" % (cmd, stderr))
+ logExit(
+ "can not get shmmax parameters, command: %s, err: %s" % (cmd, stderr)
+ )
if sga_buff_size < 114 * MB:
- logExit("sga_buff_size should bigger than or equal to 114*MB,"
- " please check it!")
+ logExit(
+ "sga_buff_size should bigger than or equal to 114*MB,"
+ " please check it!"
+ )
try:
if sga_buff_size > int(cur_avi_memory) * KB:
- logExit("sga_buff_size should smaller than shmmax,"
- " please check it!")
+ logExit("sga_buff_size should smaller than shmmax," " please check it!")
except ValueError as ex:
logExit("check kernel parameter failed: " + str(ex))
@@ -2290,28 +2551,37 @@ class Installer:
elif key == "LSNR_PORT":
self.lsnr_port = value
elif key == "ENABLE_SYSDBA_LOGIN":
- self.enableSysdbaLogin = Installer.check_pare_bool_value(
- key, value)
+ self.enableSysdbaLogin = Installer.check_pare_bool_value(key, value)
elif key == "REPL_AUTH":
- self.repl_auth = Installer.check_pare_bool_value(
- key, value)
+ self.repl_auth = Installer.check_pare_bool_value(key, value)
elif key == "REPL_SCRAM_AUTH":
- self.repl_scram_auth = Installer.check_pare_bool_value(
- key, value)
+ self.repl_scram_auth = Installer.check_pare_bool_value(key, value)
elif key == "ENABLE_ACCESS_DC":
- self.enable_access_dc = Installer.check_pare_bool_value(
- key, value)
+ self.enable_access_dc = Installer.check_pare_bool_value(key, value)
elif key == "REPLACE_VERIFY_PASSWORD":
- self.replace_password_verify = Installer.check_pare_bool_value(
- key, value)
+ self.replace_password_verify = Installer.check_pare_bool_value(key, value)
else:
return
- def doUnitConversion(self, GB, MB, KB, key, value, sga_buff_size,
- temp_buffer_size, data_buffer_size, shared_pool_size,
- log_buffer_size):
- if key in ["TEMP_BUFFER_SIZE", "DATA_BUFFER_SIZE",
- "SHARED_POOL_SIZE", "LOG_BUFFER_SIZE"]:
+ def doUnitConversion(
+ self,
+ GB,
+ MB,
+ KB,
+ key,
+ value,
+ sga_buff_size,
+ temp_buffer_size,
+ data_buffer_size,
+ shared_pool_size,
+ log_buffer_size,
+ ):
+ if key in [
+ "TEMP_BUFFER_SIZE",
+ "DATA_BUFFER_SIZE",
+ "SHARED_POOL_SIZE",
+ "LOG_BUFFER_SIZE",
+ ]:
if value[0:-1].isdigit() and value[-1:] in ["G", "M", "K"]:
unit_map = {
"G": GB,
@@ -2350,24 +2620,26 @@ class Installer:
with open(self.runFile, "rb") as _file:
# Use sha256 when python version is lower higher 2.4 and
# lower than 2.5.
- if(gPyVersion >= PYTHON242 and gPyVersion < PYTHON25):
+ if gPyVersion >= PYTHON242 and gPyVersion < PYTHON25:
sha256Obj = sha256.new()
# Use hash when python version is higher than 2.5
- elif(gPyVersion >= PYTHON25):
+ elif gPyVersion >= PYTHON25:
sha256Obj = hashlib.sha256()
- if(sha256Obj is None):
- logExit("check integrality of bin file failed, can not get"
- " verification Obj.")
+ if sha256Obj is None:
+ logExit(
+ "check integrality of bin file failed, can not get"
+ " verification Obj."
+ )
while True:
strRead = _file.read(8096)
- if(not strRead):
+ if not strRead:
break
sha256Obj.update(strRead)
strSHA256 = sha256Obj.hexdigest()
with open(self.runSha256File, "r") as fileSHA256:
strRead = fileSHA256.readline()
oldSHA256 = strRead.strip()
- if(strSHA256 == oldSHA256):
+ if strSHA256 == oldSHA256:
isSameSHA256 = True
else:
isSameSHA256 = False
@@ -2381,9 +2653,9 @@ class Installer:
log("End check integrality of bin file")
def execLogExit(self):
- if (self.runFile == ""):
+ if self.runFile == "":
logExit("Can not find run file.")
- if (self.runSha256File == ""):
+ if self.runSha256File == "":
logExit("Can not find verification file.")
def changeAppPermission(self):
@@ -2393,32 +2665,45 @@ class Installer:
output: NA
"""
# change install path privilege to 700
- strCmd = "chmod %s %s -R" % (CommonValue.KEY_DIRECTORY_MODE,
- self.installPath)
+ strCmd = "chmod %s %s -R" % (CommonValue.KEY_DIRECTORY_MODE, self.installPath)
# chmod add-ons/ file 500
- strCmd += ("&& find '%s'/add-ons -type f | xargs chmod %s "
- % (self.installPath, CommonValue.MID_FILE_MODE))
+ strCmd += "&& find '%s'/add-ons -type f | xargs chmod %s " % (
+ self.installPath,
+ CommonValue.MID_FILE_MODE,
+ )
# chmod admin/ file 600
- strCmd += ("&& find '%s'/admin -type f | xargs chmod %s "
- % (self.installPath, CommonValue.KEY_FILE_MODE))
+ strCmd += "&& find '%s'/admin -type f | xargs chmod %s " % (
+ self.installPath,
+ CommonValue.KEY_FILE_MODE,
+ )
# chmod lib/ file 500
- strCmd += ("&& find '%s'/lib -type f | xargs chmod %s"
- % (self.installPath, CommonValue.MID_FILE_MODE))
+ strCmd += "&& find '%s'/lib -type f | xargs chmod %s" % (
+ self.installPath,
+ CommonValue.MID_FILE_MODE,
+ )
# chmod bin/ file 500
- strCmd += ("&& find '%s'/bin -type f | xargs chmod %s "
- % (self.installPath, CommonValue.MID_FILE_MODE))
+ strCmd += "&& find '%s'/bin -type f | xargs chmod %s " % (
+ self.installPath,
+ CommonValue.MID_FILE_MODE,
+ )
package_xml = os.path.join(self.installPath, "package.xml")
if os.path.exists(package_xml):
- strCmd += ("&& chmod %s '%s'/package.xml"
- % (CommonValue.MIN_FILE_MODE, self.installPath))
+ strCmd += "&& chmod %s '%s'/package.xml" % (
+ CommonValue.MIN_FILE_MODE,
+ self.installPath,
+ )
log("Change app permission cmd: %s" % strCmd)
ret_code, _, stderr = _exec_popen(strCmd)
if ret_code:
self.FAILED_POS = self.DECOMPRESS_BIN_FAILED
self.rollBack()
- logExit("chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE
- + str(ret_code) + os.linesep + stderr)
+ logExit(
+ "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE
+ + str(ret_code)
+ + os.linesep
+ + stderr
+ )
if not self.close_ssl:
for file_name in os.listdir(self.ssl_path):
@@ -2432,15 +2717,15 @@ class Installer:
"""
# eg 'length in [8-64]'
if len(passwd) < shortest_len or len(passwd) > 64:
- raise ValueError("The length of password must be %s to 64."
- % shortest_len)
+ raise ValueError("The length of password must be %s to 64." % shortest_len)
# Can't save with user name
if passwd == self.user:
raise ValueError("Error: Password can't be the same as username.")
elif passwd == self.user[::-1]:
- raise ValueError("Error: Password cannot be the same as username "
- "in reverse order")
+ raise ValueError(
+ "Error: Password cannot be the same as username " "in reverse order"
+ )
upper_cases = set("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
lower_cases = set("abcdefghijklmnopqrstuvwxyz")
@@ -2454,16 +2739,19 @@ class Installer:
if passwd_set & cases:
types += 1
if types < 3:
- raise ValueError("Error: Password must contains at least three"
- " different types of characters.")
+ raise ValueError(
+ "Error: Password must contains at least three"
+ " different types of characters."
+ )
# Only can contains enumerated cases
all_cases = upper_cases | lower_cases | digits | special_cases
un_cases = passwd_set - all_cases
if un_cases:
- raise ValueError("Error: There are characters that are not"
- " allowed in the password: '%s'"
- % "".join(un_cases))
+ raise ValueError(
+ "Error: There are characters that are not"
+ " allowed in the password: '%s'" % "".join(un_cases)
+ )
def get_new_passwd(self, pw_prompt, user_prompt, shortest_len):
"""Get new passwd"""
@@ -2498,9 +2786,11 @@ class Installer:
# 0. "_SYS_PASSWORD" can't be set when ENABLE_SYSDBA_LOGIN is False
sys_password = self.cantiandConfigs["_SYS_PASSWORD"]
if not self.enableSysdbaLogin and len(sys_password) != 0:
- raise ValueError("Can't use _SYS_PASSWORD to set the password of "
- "user [SYS] in the installation script when "
- "ENABLE_SYSDBA_LOGIN is False.")
+ raise ValueError(
+ "Can't use _SYS_PASSWORD to set the password of "
+ "user [SYS] in the installation script when "
+ "ENABLE_SYSDBA_LOGIN is False."
+ )
# 1. Get passed from parameter -C
# Set passwd of SYS in cantiand.ini by parameter -C
@@ -2511,9 +2801,8 @@ class Installer:
if sys.stdin.isatty():
# If not pipe content, get passwd by interactive input
g_opts.db_passwd = self.get_new_passwd(
- pw_prompt="database password",
- user_prompt="user [SYS]",
- shortest_len=8)
+ pw_prompt="database password", user_prompt="user [SYS]", shortest_len=8
+ )
else:
try:
# Get passwd from pipe
@@ -2526,22 +2815,40 @@ class Installer:
def copy_dbstor_path(self):
str_cmd = ""
if g_opts.use_dbstor:
- os.makedirs("%s/dbstor/conf/dbs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/conf/infra/config" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/data/logs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/dbstor/data/ftds" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
+ os.makedirs(
+ "%s/dbstor/conf/dbs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
+ os.makedirs(
+ "%s/dbstor/conf/infra/config" % self.data,
+ CommonValue.KEY_DIRECTORY_PERMISSION,
+ )
+ os.makedirs(
+ "%s/dbstor/data/logs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
+ os.makedirs(
+ "%s/dbstor/data/ftds" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
if is_rdma_startup():
- str_cmd += " && cp %s/cfg/node_config_rdma.xml %s/dbstor/conf/infra/config/node_config.xml" % (
- self.installPath, self.data)
+ str_cmd += (
+ " && cp %s/cfg/node_config_rdma.xml %s/dbstor/conf/infra/config/node_config.xml"
+ % (self.installPath, self.data)
+ )
else:
- str_cmd += " && cp %s/cfg/node_config_tcp.xml %s/dbstor/conf/infra/config/node_config.xml" % (
- self.installPath, self.data)
-
- str_cmd += " && cp %s/cfg/osd.cfg %s/dbstor/conf/infra/config/osd.cfg" % (self.installPath, self.data)
- str_cmd += " && cp /home/conf/dbstor_config.ini %s/dbstor/conf/dbs/" % (self.data)
-
+ str_cmd += (
+ " && cp %s/cfg/node_config_tcp.xml %s/dbstor/conf/infra/config/node_config.xml"
+ % (self.installPath, self.data)
+ )
+
+ str_cmd += " && cp %s/cfg/osd.cfg %s/dbstor/conf/infra/config/osd.cfg" % (
+ self.installPath,
+ self.data,
+ )
+ str_cmd += " && cp /home/conf/dbstor_config.ini %s/dbstor/conf/dbs/" % (
+ self.data
+ )
+
return str_cmd
-
+
#########################################################################
# Unzip the installation files to the installation directory.
#########################################################################
@@ -2554,24 +2861,34 @@ class Installer:
log("Decompressing run file.", True)
# let bin executable
- str_cmd = "chmod %s \"%s\"" % (CommonValue.KEY_DIRECTORY_MODE,
- self.runFile)
+ str_cmd = 'chmod %s "%s"' % (CommonValue.KEY_DIRECTORY_MODE, self.runFile)
log("decompress bin file executable cmd: %s" % str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
self.FAILED_POS = self.DECOMPRESS_BIN_FAILED
self.rollBack()
- logExit("decompress bin file executable return: %s%s%s"
- % (str(ret_code), os.linesep, stderr))
+ logExit(
+ "decompress bin file executable return: %s%s%s"
+ % (str(ret_code), os.linesep, stderr)
+ )
# decompress bin file.
cantian_pkg_file = "%s/%s" % (self.installPath, self.run_pkg_name)
- str_cmd = "tar -xvf \"%s\" -C \"%s\"" % (self.runFile, self.installPath)
- str_cmd = ("%s && cp -rf %s/add-ons %s/admin %s/bin %s/cfg %s/lib "
- "%s/package.xml %s"
- % (str_cmd, cantian_pkg_file, cantian_pkg_file, cantian_pkg_file,
- cantian_pkg_file, cantian_pkg_file, cantian_pkg_file,
- self.installPath))
-
+ str_cmd = 'tar -xvf "%s" -C "%s"' % (self.runFile, self.installPath)
+ str_cmd = (
+ "%s && cp -rf %s/add-ons %s/admin %s/bin %s/cfg %s/lib "
+ "%s/package.xml %s"
+ % (
+ str_cmd,
+ cantian_pkg_file,
+ cantian_pkg_file,
+ cantian_pkg_file,
+ cantian_pkg_file,
+ cantian_pkg_file,
+ cantian_pkg_file,
+ self.installPath,
+ )
+ )
+
str_cmd += self.copy_dbstor_path()
str_cmd += " && rm -rf %s" % cantian_pkg_file
log("Decompress cmd: " + str_cmd)
@@ -2579,24 +2896,23 @@ class Installer:
if ret_code:
self.FAILED_POS = self.DECOMPRESS_BIN_FAILED
self.rollBack()
- logExit("Decompress bin return: " + str(ret_code)
- + os.linesep + stderr)
+ logExit("Decompress bin return: " + str(ret_code) + os.linesep + stderr)
# change app permission
self.changeAppPermission()
# change owner to user:group
- str_cmd = "chown %s:%s -R %s " % (self.user, self.group,
- self.installPath)
+ str_cmd = "chown %s:%s -R %s " % (self.user, self.group, self.installPath)
# Change the owner
log("Change owner cmd: %s" % str_cmd)
ret_code, _, stderr = _exec_popen(str_cmd)
if ret_code:
self.FAILED_POS = self.DECOMPRESS_BIN_FAILED
self.rollBack()
- logExit("chown to %s: %s return: %s%s%s"
- % (self.user, self.group, str(ret_code),
- os.linesep, stderr))
+ logExit(
+ "chown to %s: %s return: %s%s%s"
+ % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ )
log("End decompress bin file.")
@@ -2617,22 +2933,30 @@ class Installer:
def exportUserEnv(self):
try:
with open(self.userProfile, "a") as _file:
- _file.write("export CTDB_HOME=\"%s\"" % self.installPath)
+ _file.write('export CTDB_HOME="%s"' % self.installPath)
_file.write(os.linesep)
- _file.write("export PATH=\"%s\":\"%s\":$PATH"
- % (os.path.join(self.installPath, "bin"),
- os.path.join(MYSQL_BIN_DIR, "bin")))
+ _file.write(
+ 'export PATH="%s":"%s":$PATH'
+ % (
+ os.path.join(self.installPath, "bin"),
+ os.path.join(MYSQL_BIN_DIR, "bin"),
+ )
+ )
_file.write(os.linesep)
- _file.write("export LD_LIBRARY_PATH=\"%s\":\"%s\""
- ":$LD_LIBRARY_PATH"
- % (os.path.join(self.installPath, "lib"),
- os.path.join(self.installPath, "add-ons")))
+ _file.write(
+ 'export LD_LIBRARY_PATH="%s":"%s"'
+ ":$LD_LIBRARY_PATH"
+ % (
+ os.path.join(self.installPath, "lib"),
+ os.path.join(self.installPath, "add-ons"),
+ )
+ )
_file.write(os.linesep)
if self.oldDataPath == "":
# set CTDB_DATA
- _file.write("export CTDB_DATA=\"%s\"" % self.data)
+ _file.write('export CTDB_DATA="%s"' % self.data)
_file.write(os.linesep)
- _file.write("export CMS_HOME=\"%s\"" % self.data)
+ _file.write('export CMS_HOME="%s"' % self.data)
_file.write(os.linesep)
_file.flush()
except IOError as ex:
@@ -2658,19 +2982,25 @@ class Installer:
# Avoid create database failed by the value of CTSQL_SSL_KEY_PASSWD
self.clean_ssl_env()
- os.environ['PATH'] = (os.path.join(self.installPath, "bin")
- + ":" + os.environ['PATH'])
+ os.environ["PATH"] = (
+ os.path.join(self.installPath, "bin") + ":" + os.environ["PATH"]
+ )
# in some system LD_LIBRARY_PATH is not set,
# so must check it, or excetion will be raise
- if 'LD_LIBRARY_PATH' in os.environ:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s:%s" % (
- os.path.join(self.installPath, "lib"), os.path.join(
- self.installPath, "add-ons",),
- os.environ['LD_LIBRARY_PATH']))
+ if "LD_LIBRARY_PATH" in os.environ:
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s:%s" % (
+ os.path.join(self.installPath, "lib"),
+ os.path.join(
+ self.installPath,
+ "add-ons",
+ ),
+ os.environ["LD_LIBRARY_PATH"],
+ )
else:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s" % (
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s" % (
os.path.join(self.installPath, "lib"),
- os.path.join(self.installPath, "add-ons"),))
+ os.path.join(self.installPath, "add-ons"),
+ )
os.environ["CTDB_HOME"] = self.installPath
os.environ["CTDB_DATA"] = self.data
os.environ["CMS_HOME"] = self.data
@@ -2700,8 +3030,10 @@ class Installer:
cmd = cmd.strip(";")
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (conf_file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (conf_file, cmd, stderr)
+ )
def clean_old_conf(self, param_list, conf_file):
"""
@@ -2712,28 +3044,38 @@ class Installer:
cmd = ""
# make the command of delete the parameter
for parameter in param_list:
- cmd += "sed -i '/^%s/d' %s;" % (parameter.replace('[', '\[').replace(']', '\]'), conf_file)
+ cmd += "sed -i '/^%s/d' %s;" % (
+ parameter.replace("[", "\[").replace("]", "\]"),
+ conf_file,
+ )
if cmd:
cmd = cmd.strip(";")
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s"
- % (conf_file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (conf_file, cmd, stderr)
+ )
def generate_nomount_passwd(self, plain_passwd=""):
if g_opts.install_user_privilege == "withoutroot":
cmd = "%s/bin/ctencrypt -e PBKDF2" % self.installPath
else:
- cmd = (""" su - '%s' -c "%s/bin/ctencrypt -e PBKDF2" """
- % (self.user, self.installPath))
- g_opts.db_passwd = g_opts.db_passwd if len(plain_passwd.strip()) == 0 else plain_passwd.strip()
+ cmd = """ su - '%s' -c "%s/bin/ctencrypt -e PBKDF2" """ % (
+ self.user,
+ self.installPath,
+ )
+ g_opts.db_passwd = (
+ g_opts.db_passwd if len(plain_passwd.strip()) == 0 else plain_passwd.strip()
+ )
values = [g_opts.db_passwd, g_opts.db_passwd]
ret_code, stdout, stderr = _exec_popen(cmd, values)
if ret_code:
- raise OSError("Failed to encrypt password of user [sys]."
- " Error: %s" % (stderr+os.linesep+stderr))
+ raise OSError(
+ "Failed to encrypt password of user [sys]."
+ " Error: %s" % (stderr + os.linesep + stderr)
+ )
# Example of output:
# Please enter password to encrypt:
@@ -2758,17 +3100,25 @@ class Installer:
cmd = "echo >> %s" % conf_file
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (file, cmd, stderr)
+ )
# Generate new kernel parameters
common_parameters = copy.deepcopy(config)
# Set password of NOMOUNT mode before create database.
if encrypt_passwd:
- common_parameters["_SYS_PASSWORD"] = self.generate_nomount_passwd(common_parameters["_SYS_PASSWORD"])
- if "GCC_TYPE" in common_parameters and\
- (common_parameters["GCC_TYPE"] == "FILE" or common_parameters["GCC_TYPE"] == "NFS"):
- common_parameters["GCC_HOME"] = os.path.join(common_parameters["GCC_HOME"], "gcc_file")
+ common_parameters["_SYS_PASSWORD"] = self.generate_nomount_passwd(
+ common_parameters["_SYS_PASSWORD"]
+ )
+ if "GCC_TYPE" in common_parameters and (
+ common_parameters["GCC_TYPE"] == "FILE"
+ or common_parameters["GCC_TYPE"] == "NFS"
+ ):
+ common_parameters["GCC_HOME"] = os.path.join(
+ common_parameters["GCC_HOME"], "gcc_file"
+ )
# 1.clean old conf
self.clean_old_conf(list(common_parameters.keys()), conf_file)
@@ -2785,25 +3135,34 @@ class Installer:
cmd = "echo >> %s" % conf_file
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (conf_file, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (conf_file, cmd, stderr)
+ )
size = CLUSTER_SIZE
- if g_opts.running_mode in [CANTIAND, CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST]:
+ if g_opts.running_mode in [
+ CANTIAND,
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ ]:
size = 1
node_ip = re.split(r"[;,]", self.cantiandConfigs["INTERCONNECT_ADDR"])
if len(node_ip) == 1:
node_ip.append("127.0.0.1")
-
+
gcc_home = self.cmsConfigs["GCC_HOME"]
- if self.cmsConfigs["GCC_TYPE"] == "FILE" or self.cmsConfigs["GCC_TYPE"] == "NFS":
+ if (
+ self.cmsConfigs["GCC_TYPE"] == "FILE"
+ or self.cmsConfigs["GCC_TYPE"] == "NFS"
+ ):
gcc_home = os.path.join(gcc_home, "gcc_file")
-
+
# Generate new kernel parameters
common_parameters = {
"GCC_HOME": gcc_home,
"REPORT_FILE": g_opts.log_file,
"STATUS_LOG": os.path.join(self.data, "log", "cantianstatus.log"),
- "LD_LIBRARY_PATH": os.environ['LD_LIBRARY_PATH'],
+ "LD_LIBRARY_PATH": os.environ["LD_LIBRARY_PATH"],
"USER_HOME": self.userHomePath,
"USE_GSS": g_opts.use_gss,
"USE_DBSTOR": g_opts.use_dbstor,
@@ -2844,7 +3203,7 @@ class Installer:
addr_list = [_.strip() for _ in g_opts.white_list.split(",")]
for item in re.split(r"[;,]", self.cantiandConfigs["INTERCONNECT_ADDR"]):
if item not in addr_list:
- addr_list.append(item)
+ addr_list.append(item)
if "127.0.0.1" in addr_list:
addr_list.remove("127.0.0.1")
if "::1" in addr_list:
@@ -2861,8 +3220,11 @@ class Installer:
cmd = cmd.strip(";")
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not write the %s, command: %s,"
- " output: %s" % (self.CANTIAND_HBA_FILE, cmd, stderr))
+ raise Exception(
+ "Can not write the %s, command: %s,"
+ " output: %s" % (self.CANTIAND_HBA_FILE, cmd, stderr)
+ )
+
def update_ssl_conf(self):
"""Update ssl config."""
if self.close_ssl:
@@ -2883,8 +3245,9 @@ class Installer:
key_, work_key = self.get_ctencrypt_keys_and_file()
# Get the value of SSL_KEY_PASSWORD from ini and
# CTSQL_SSL_KEY_PASSWD from env and set SSL_KEY_PASSWORD into ini
- cipher = self.encrypt_ssl_key_passwd(key_, work_key,
- ssl_constructor.passwd, skip_execute_sql)
+ cipher = self.encrypt_ssl_key_passwd(
+ key_, work_key, ssl_constructor.passwd, skip_execute_sql
+ )
# 3. Modify cantiand.ini by write
# Set the ssl config in cantiand.ini for server
self.set_ssl_conf(cipher, key_, work_key)
@@ -2926,9 +3289,9 @@ class Installer:
log("Get password of REPL_AUTH keys.")
if sys.stdin.isatty():
# If not pipe content, get passwd by interactive input
- passwd = self.get_new_passwd(pw_prompt="password",
- user_prompt="REPL_AUTH keys",
- shortest_len=16)
+ passwd = self.get_new_passwd(
+ pw_prompt="password", user_prompt="REPL_AUTH keys", shortest_len=16
+ )
else:
try:
# Get passwd from pipe
@@ -2942,15 +3305,19 @@ class Installer:
log("Generate REPL_AUTH keys.", True)
if g_opts.install_user_privilege == "withoutroot":
- cmd = "%s/bin/ctencrypt -r -d '%s'" % (self.installPath,
- self.data)
+ cmd = "%s/bin/ctencrypt -r -d '%s'" % (self.installPath, self.data)
else:
- cmd = (""" su - '%s' -c "%s/bin/ctencrypt -r -d '%s'" """
- % (self.user, self.installPath, self.data))
+ cmd = """ su - '%s' -c "%s/bin/ctencrypt -r -d '%s'" """ % (
+ self.user,
+ self.installPath,
+ self.data,
+ )
ret_code, stdout, stderr = _exec_popen(cmd, [passwd, passwd])
if ret_code:
- raise OSError("Failed to generate REPL_AUTH keys."
- " Error: %s" % (stdout + os.linesep + stderr))
+ raise OSError(
+ "Failed to generate REPL_AUTH keys."
+ " Error: %s" % (stdout + os.linesep + stderr)
+ )
log("Successfully generate REPL_AUTH keys")
except Exception as err:
self.rollBack()
@@ -2970,7 +3337,7 @@ class Installer:
insList = insStr.split(os.sep)
regString = ""
for i in insList:
- if(i == ""):
+ if i == "":
continue
else:
regString += r"\/" + i
@@ -2984,8 +3351,7 @@ class Installer:
log("Clean environment variables cmd: %s" % cmd)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- log("Failed to clean environment variables."
- " Error: %s" % stderr)
+ log("Failed to clean environment variables." " Error: %s" % stderr)
logExit("Failed to clean environment variables.")
def clean_ssl_env(self):
@@ -3016,13 +3382,15 @@ class Installer:
# Clear environment variable CTDB_DATA
data_cmd = r"/^\s*export\s*CTDB_DATA=.*$/d"
# Clear environment variable PATH about database
- path_cmd = (r"/^\s*export\s*PATH=.*%s\/bin.*:\$PATH$/d"
- % self.genregstring(self.installPath))
+ path_cmd = r"/^\s*export\s*PATH=.*%s\/bin.*:\$PATH$/d" % self.genregstring(
+ self.installPath
+ )
# Clear environment variable LD_LIBRARY_PATH about database
- lib_cmd = (r"/^\s*export\s*LD_LIBRARY_PATH=.*%s\/lib.*"
- r":.*%s\/add-ons.*:\$LD_LIBRARY_PATH$/d"
- % (self.genregstring(self.installPath),
- self.genregstring(self.installPath)))
+ lib_cmd = (
+ r"/^\s*export\s*LD_LIBRARY_PATH=.*%s\/lib.*"
+ r":.*%s\/add-ons.*:\$LD_LIBRARY_PATH$/d"
+ % (self.genregstring(self.installPath), self.genregstring(self.installPath))
+ )
# Clear environment variable CTDB_HOME
home_cmd = r"/^\s*export\s*CTDB_HOME=.*$/d"
# Clear environment variable CMS_HOME
@@ -3035,8 +3403,17 @@ class Installer:
mode_cmd = r"/^\s*export\s*CTSQL_SSL_MODE=.*$/d"
cipher_cmd = r"/^\s*export\s*CTSQL_SSL_KEY_PASSWD=.*$/d"
- cmds = [path_cmd, lib_cmd, home_cmd, cms_cmd,
- ca_cmd, cert_cmd, key_cmd, mode_cmd, cipher_cmd]
+ cmds = [
+ path_cmd,
+ lib_cmd,
+ home_cmd,
+ cms_cmd,
+ ca_cmd,
+ cert_cmd,
+ key_cmd,
+ mode_cmd,
+ cipher_cmd,
+ ]
if self.option == self.INS_ALL:
cmds.insert(0, data_cmd)
@@ -3063,9 +3440,11 @@ class Installer:
# Delete program.
self.rollbackFromDecompress()
# rollback from set user env
- elif(self.FAILED_POS == self.SET_ENV_FAILED
- or self.FAILED_POS == self.PRE_DATA_DIR_FAILED
- or self.FAILED_POS == self.INIT_DB_FAILED):
+ elif (
+ self.FAILED_POS == self.SET_ENV_FAILED
+ or self.FAILED_POS == self.PRE_DATA_DIR_FAILED
+ or self.FAILED_POS == self.INIT_DB_FAILED
+ ):
self.rollbackFromSetUserEnv()
# rollback from create database
@@ -3077,7 +3456,9 @@ class Installer:
log("Roll back: init ")
else:
# should not be here.
- logExit("Roll back can not recognize this operation: " + str(self.FAILED_POS))
+ logExit(
+ "Roll back can not recognize this operation: " + str(self.FAILED_POS)
+ )
log("End roll back")
def rollbackDataDirs(self):
@@ -3117,19 +3498,27 @@ class Installer:
def __kill_process(self, process_name):
# root do install, need su - user kill process
- cmd = ("proc_pid_list=\`ps ux | grep %s | grep -v grep "
- "| awk '{print \$2}'\`" % process_name)
- cmd += (" && (if [ X\\\"\$proc_pid_list\\\" != X\\\"\\\" ]; "
- "then echo \\\"\$proc_pid_list\\\" | xargs kill -9 ; "
- "exit 0; fi)")
+ cmd = (
+ "proc_pid_list=\`ps ux | grep %s | grep -v grep "
+ "| awk '{print \$2}'\`" % process_name
+ )
+ cmd += (
+ ' && (if [ X\\"\$proc_pid_list\\" != X\\"\\" ]; '
+ 'then echo \\"\$proc_pid_list\\" | xargs kill -9 ; '
+ "exit 0; fi)"
+ )
kill_cmd = "su - '%s' -c \"%s\" " % (self.user, cmd)
if g_opts.install_user_privilege == "withoutroot":
# user do install, kill process
- kill_cmd = (r"proc_pid_list=`ps ux | grep %s | grep -v grep"
- r"|awk '{print $2}'` && " % process_name)
- kill_cmd += (r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
- r"$proc_pid_list | xargs kill -9; exit 0; fi)")
+ kill_cmd = (
+ r"proc_pid_list=`ps ux | grep %s | grep -v grep"
+ r"|awk '{print $2}'` && " % process_name
+ )
+ kill_cmd += (
+ r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
+ r"$proc_pid_list | xargs kill -9; exit 0; fi)"
+ )
log("kill process cmd: %s" % kill_cmd)
ret_code, _, _ = _exec_popen(kill_cmd)
if ret_code:
@@ -3143,7 +3532,9 @@ class Installer:
log("Begin to backup log cmd: " + strCmd)
ret_code, _, stderr = _exec_popen(strCmd)
if ret_code:
- logExit("Can not backup cantiand log command: %s, output: %s" % (strCmd, stderr))
+ logExit(
+ "Can not backup cantiand log command: %s, output: %s" % (strCmd, stderr)
+ )
log("Error:The detail log for CREATE_DB_FAILED: %s" % self.backup_log_dir)
# backup cantiand cfg before rm data
@@ -3151,7 +3542,9 @@ class Installer:
log("Begin to backup cfg cmd: " + strCmd)
ret_code, _, stderr = _exec_popen(strCmd)
if ret_code:
- logExit("Can not backup cantiand cfg command: %s, output: %s" % (strCmd, stderr))
+ logExit(
+ "Can not backup cantiand cfg command: %s, output: %s" % (strCmd, stderr)
+ )
log("Error:The detail log for CREATE_DB_FAILED: %s" % self.backup_log_dir)
# kill database process
@@ -3194,31 +3587,47 @@ class Installer:
log("Change privilege cmd: %s" % strCmd)
ret_code, _, stderr = _exec_popen(strCmd)
if ret_code:
- raise Exception("chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE + str(ret_code) + os.linesep + stderr)
+ raise Exception(
+ "chmod %s return: " % CommonValue.KEY_DIRECTORY_MODE
+ + str(ret_code)
+ + os.linesep
+ + stderr
+ )
# create data, cfg, log dir, trc
data_dir = "%s/data" % self.data
if g_opts.in_container:
# Do not create the data dir in slave cluster.
if not g_opts.slave_cluster:
- create_dir_if_needed(skip_execute_in_node_1(), CommonValue.DOCKER_DATA_DIR)
+ create_dir_if_needed(
+ skip_execute_in_node_1(), CommonValue.DOCKER_DATA_DIR
+ )
cmd = "ln -s %s %s;" % (CommonValue.DOCKER_DATA_DIR, data_dir)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not link data dir, command: %s, output: %s" % (cmd, stderr))
+ raise Exception(
+ "Can not link data dir, command: %s, output: %s" % (cmd, stderr)
+ )
else:
os.makedirs(data_dir, CommonValue.KEY_DIRECTORY_PERMISSION)
os.makedirs("%s/log" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
- os.makedirs("%s/archive_log" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
+ os.makedirs(
+ "%s/archive_log" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION
+ )
os.makedirs("%s/trc" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
os.makedirs("%s/tmp" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION)
if not g_opts.use_gss and not g_opts.use_dbstor:
if g_opts.in_container:
- create_dir_if_needed(skip_execute_in_node_1(), CommonValue.DOCKER_GCC_DIR)
+ create_dir_if_needed(
+ skip_execute_in_node_1(), CommonValue.DOCKER_GCC_DIR
+ )
cmd = "ln -s %s %s" % (CommonValue.DOCKER_GCC_DIR, self.gcc_home)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not link gcc home dir, command: %s, output: %s" % (cmd, stderr))
+ raise Exception(
+ "Can not link gcc home dir, command: %s, output: %s"
+ % (cmd, stderr)
+ )
else:
os.makedirs(self.gcc_home, CommonValue.KEY_DIRECTORY_PERMISSION)
@@ -3229,15 +3638,27 @@ class Installer:
cmd = "mv -i %s/cfg %s" % (self.installPath, self.data)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("Can not create prepare data dir, command: %s, output: %s" % (cmd, stderr))
+ raise Exception(
+ "Can not create prepare data dir, command: %s, output: %s"
+ % (cmd, stderr)
+ )
# Change the mode of config files to 600
cmd = "chmod {0} {1}/cfg/{2} {1}/cfg/{3} {1}/cfg/{4}".format(
- CommonValue.KEY_FILE_MODE, self.data, self.CANTIAND_CONF_FILE,
- self.CMS_CONF_FILE, self.CANTIAND_HBA_FILE)
+ CommonValue.KEY_FILE_MODE,
+ self.data,
+ self.CANTIAND_CONF_FILE,
+ self.CMS_CONF_FILE,
+ self.CANTIAND_HBA_FILE,
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("chmod %s return: " % CommonValue.KEY_FILE_MODE + str(ret_code) + os.linesep + stderr)
+ raise Exception(
+ "chmod %s return: " % CommonValue.KEY_FILE_MODE
+ + str(ret_code)
+ + os.linesep
+ + stderr
+ )
# Change the owner of config files
self.chownDataDir()
@@ -3253,14 +3674,14 @@ class Installer:
"""
if not g_opts.use_gss and not g_opts.use_dbstor:
return
-
+
cmd = "sh %s -P setcap >> %s 2>&1" % (INSTALL_SCRIPT, g_opts.log_file)
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
output = stdout + stderr
raise Exception("Failed to setcap.\ncmd: %s.\nOutput: %s" % (cmd, output))
log("setcap sucessed", True)
-
+
def prepareMysqlForSingle(self):
if g_opts.running_mode.lower() not in VALID_SINGLE_MYSQL_RUNNING_MODE:
return
@@ -3269,7 +3690,9 @@ class Installer:
self.prepare_mysql_data_dir()
self.prepare_mysql_bin_dir()
self.set_mysql_env()
+
pass
+
def start_gss(self):
if not g_opts.use_gss:
return
@@ -3280,7 +3703,9 @@ class Installer:
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
output = stdout + stderr
- raise Exception("Can not start gss.\nStart cmd: %s.\nOutput: %s" % (cmd, output))
+ raise Exception(
+ "Can not start gss.\nStart cmd: %s.\nOutput: %s" % (cmd, output)
+ )
log("gss has started", True)
def start_cms(self):
@@ -3291,9 +3716,11 @@ class Installer:
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
output = stdout + stderr
- raise Exception("Can not start cms.\nStart cmd: %s.\nOutput: %s" % (cmd, output))
+ raise Exception(
+ "Can not start cms.\nStart cmd: %s.\nOutput: %s" % (cmd, output)
+ )
log("cms has started", True)
-
+
##################################################################
# start cantian instance
##################################################################
@@ -3313,23 +3740,33 @@ class Installer:
start_mode = self.OPEN_MODE
if g_opts.node_id == 1:
start_mode = self.OPEN_MODE
-
+
# Start instance, according to running mode can point to cantiand or cantiand with mysql
cmd = "sh %s -P cantiand -M %s -T %s -C %s >> %s 2>&1" % (
- INSTALL_SCRIPT, start_mode, g_opts.running_mode.lower(), g_opts.mysql_config_file_path, g_opts.log_file)
+ INSTALL_SCRIPT,
+ start_mode,
+ g_opts.running_mode.lower(),
+ g_opts.mysql_config_file_path,
+ g_opts.log_file,
+ )
if os.getuid() == 0:
cmd = "su - %s -c '" % self.user + cmd + "'"
- log("start cantiand with cmd:%s"%cmd)
+ log("start cantiand with cmd:%s" % cmd)
status, stdout, stderr = _exec_popen(cmd)
if status != 0:
output = stdout + stderr
- raise Exception("Can not start instance %s.\nStart cmd: %s.\nOutput: %s" % (self.data, cmd, output))
+ raise Exception(
+ "Can not start instance %s.\nStart cmd: %s.\nOutput: %s"
+ % (self.data, cmd, output)
+ )
# In some condition cantian will take some time to start, so wait
# it by checking the process cyclically after the start command
# returned. If the cantiand process can't be found within the
# expected time, it is considered that the startup failed.
- tem_log_info, status_success = self.initSomeCondition(status_success, self.status_log)
+ tem_log_info, status_success = self.initSomeCondition(
+ status_success, self.status_log
+ )
# the log file's permission is 600, change it
if os.path.exists(self.status_log):
@@ -3339,8 +3776,10 @@ class Installer:
os.chmod(self.status_log, CommonValue.KEY_FILE_PERMISSION)
if not status_success:
- raise Exception("Can not get instance '%s' process pid,"
- "The detailed information: '%s' " % (self.data, tem_log_info))
+ raise Exception(
+ "Can not get instance '%s' process pid,"
+ "The detailed information: '%s' " % (self.data, tem_log_info)
+ )
log("cantiand has started", True)
def get_invalid_parameter(self):
@@ -3350,7 +3789,10 @@ class Installer:
ret_code, stdout, stderr = _exec_popen(cmd)
output = stdout + stderr
if ret_code:
- log("Failed to get the error message from '%s'. Output: %s" % (run_log, output))
+ log(
+ "Failed to get the error message from '%s'. Output: %s"
+ % (run_log, output)
+ )
return ""
else:
return output
@@ -3361,25 +3803,38 @@ class Installer:
for i in range(0, start_time):
time.sleep(3)
if g_opts.install_user_privilege == "withoutroot":
- cmd = ("ps ux | grep -v grep | grep %s | grep $ "
- "|awk '{print $2}'" % (self.data))
+ cmd = "ps ux | grep -v grep | grep %s | grep $ " "|awk '{print $2}'" % (
+ self.data
+ )
else:
- cmd = ("su - '%s' -c \"ps ux | grep -v grep | grep cantiand | "
- "grep %s$ |awk '{print \$2}'\" "
- % (self.user, self.data))
- if g_opts.running_mode.lower() in [CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST, CANTIAND_WITH_MYSQL_IN_CLUSTER]:
- cmd = ("su - '%s' -c \"ps -ef | grep -v grep | grep mysqld\""%(self.user))
+ cmd = (
+ "su - '%s' -c \"ps ux | grep -v grep | grep cantiand | "
+ "grep %s$ |awk '{print \$2}'\" " % (self.user, self.data)
+ )
+ if g_opts.running_mode.lower() in [
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ ]:
+ cmd = "su - '%s' -c \"ps -ef | grep -v grep | grep mysqld\"" % (
+ self.user
+ )
status_log = MYSQL_LOG_FILE
pass
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
status_success = False
- tem_log_info = ("Failed to execute cmd: %s.output:%s"
- % (str(cmd), str(stderr)))
+ tem_log_info = "Failed to execute cmd: %s.output:%s" % (
+ str(cmd),
+ str(stderr),
+ )
break
else:
all_the_text = open(status_log).read()
- log("Instance start log output:%s.cmd:%s" %(str(all_the_text),str(cmd)))
+ log(
+ "Instance start log output:%s.cmd:%s"
+ % (str(all_the_text), str(cmd))
+ )
if all_the_text.find("instance started") > 0:
if stdout:
status_success = True
@@ -3395,7 +3850,10 @@ class Installer:
run_log_info = self.get_invalid_parameter()
if run_log_info:
tem_log_info += os.linesep
- tem_log_info += ("The run log error: %s%s" % (os.linesep, run_log_info))
+ tem_log_info += "The run log error: %s%s" % (
+ os.linesep,
+ run_log_info,
+ )
break
if (i + 1) == start_time:
status_success = False
@@ -3427,53 +3885,66 @@ class Installer:
return
if g_opts.install_user_privilege == "withoutroot":
if self.enableSysdbaLogin:
- cmd = "%s/bin/ctsql / as sysdba -q -D %s -f \"%s\" " % (
+ cmd = '%s/bin/ctsql / as sysdba -q -D %s -f "%s" ' % (
self.installPath,
self.data,
- sql_file)
+ sql_file,
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
else:
- cmd = "%s/bin/ctsql %s@%s:%s -q -f \"%s\" " % (
+ cmd = '%s/bin/ctsql %s@%s:%s -q -f "%s" ' % (
self.installPath,
g_opts.db_user,
self.LOGIN_IP,
self.lsnr_port,
- sql_file)
+ sql_file,
+ )
return_code, stdout_data, stderr_data = _exec_popen(
- cmd, [g_opts.db_passwd])
+ cmd, [g_opts.db_passwd]
+ )
else:
if self.enableSysdbaLogin:
- cmd = ("su - '%s' -c \"%s/bin/ctsql / as sysdba "
- "-q -D %s -f \"%s\" \""
- % (self.user, self.installPath, self.data, sql_file))
+ cmd = (
+ "su - '%s' -c \"%s/bin/ctsql / as sysdba "
+ '-q -D %s -f "%s" "'
+ % (self.user, self.installPath, self.data, sql_file)
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
else:
- cmd = ("su - '%s' -c \"%s/bin/ctsql %s@%s:%s -q -f \"%s\"\"" % (
+ cmd = 'su - \'%s\' -c "%s/bin/ctsql %s@%s:%s -q -f "%s""' % (
self.user,
self.installPath,
g_opts.db_user,
self.LOGIN_IP,
self.lsnr_port,
- sql_file))
+ sql_file,
+ )
return_code, stdout_data, stderr_data = _exec_popen(
- cmd, [g_opts.db_passwd])
+ cmd, [g_opts.db_passwd]
+ )
output = "%s%s" % (str(stdout_data), str(stderr_data))
log("Execute sql file %s output: %s" % (sql_file, output))
if return_code:
- raise Exception("Failed to execute sql file %s, output:%s" % (sql_file, output))
+ raise Exception(
+ "Failed to execute sql file %s, output:%s" % (sql_file, output)
+ )
# return code is 0, but output has error info, CT-xxx, ZS-xxx
result = output.replace("\n", "")
if re.match(".*CT-\d{5}.*", result) or re.match(".*ZS-\d{5}.*", result):
- raise Exception("Failed to execute sql file %s, output:%s" % (sql_file, output))
+ raise Exception(
+ "Failed to execute sql file %s, output:%s" % (sql_file, output)
+ )
def execute_mysql_update(self, sql_file):
is_need_update = False
if is_need_update:
print("update cantian sys table and views...")
action_parse = argparse.ArgumentParser()
- action_parse.add_argument("--mysql_cmd", dest="mysql_cmd", required=True) # /usr/local/mysql/bin/mysql
+ action_parse.add_argument(
+ "--mysql_cmd", dest="mysql_cmd", required=True
+ ) # /usr/local/mysql/bin/mysql
action_parse.add_argument("--mysql_user", dest="mysql_user", required=True)
args = action_parse.parse_args()
mysql_cmd = args.mysql_cmd
@@ -3484,12 +3955,13 @@ class Installer:
mysql_pwd = getpass.getpass()
cmd = "%s -u%s -p%s < %s" % (mysql_cmd, mysql_user, mysql_pwd, sql_file)
-
return_code, stdout_data, stderr_data = _exec_popen(cmd)
if return_code and MYSQL_VERSION != VERSION_DOCKER_META:
output = "%s%s" % (str(stdout_data), str(stderr_data))
- raise Exception("Failed to execute mysql file %s, output:%s, return_code:%s"
- % (sql_file, output, return_code))
+ raise Exception(
+ "Failed to execute mysql file %s, output:%s, return_code:%s"
+ % (sql_file, output, return_code)
+ )
def execute_sql(self, sql, message):
"""
@@ -3505,41 +3977,47 @@ class Installer:
# 4 normal user execute install.py and disable sysdba
if g_opts.install_user_privilege == "withoutroot":
if self.enableSysdbaLogin:
- cmd = "%s/bin/ctsql / as sysdba -q -D %s -c \"%s\" " % (
+ cmd = '%s/bin/ctsql / as sysdba -q -D %s -c "%s" ' % (
self.installPath,
self.data,
- sql)
+ sql,
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
else:
- cmd = "%s/bin/ctsql %s@%s:%s -q -c \"%s\" " % (
+ cmd = '%s/bin/ctsql %s@%s:%s -q -c "%s" ' % (
self.installPath,
g_opts.db_user,
self.LOGIN_IP,
self.lsnr_port,
- sql)
+ sql,
+ )
return_code, stdout_data, stderr_data = _exec_popen(
- cmd, [g_opts.db_passwd])
+ cmd, [g_opts.db_passwd]
+ )
else:
if self.enableSysdbaLogin:
- cmd = ("su - '%s' -c \"%s/bin/ctsql / as sysdba "
- "-q -D %s -c \\\"%s\\\" \""
- % (self.user, self.installPath, self.data, sql))
+ cmd = (
+ "su - '%s' -c \"%s/bin/ctsql / as sysdba "
+ '-q -D %s -c \\"%s\\" "'
+ % (self.user, self.installPath, self.data, sql)
+ )
return_code, stdout_data, stderr_data = _exec_popen(cmd)
else:
- cmd = ("su - '%s' -c \"%s/bin/ctsql %s@%s:%s -q"
- " -c \\\"%s\\\"\"" % (self.user,
- self.installPath,
- g_opts.db_user,
- self.LOGIN_IP,
- self.lsnr_port,
- sql))
+ cmd = "su - '%s' -c \"%s/bin/ctsql %s@%s:%s -q" ' -c \\"%s\\""' % (
+ self.user,
+ self.installPath,
+ g_opts.db_user,
+ self.LOGIN_IP,
+ self.lsnr_port,
+ sql,
+ )
return_code, stdout_data, stderr_data = _exec_popen(
- cmd, [g_opts.db_passwd])
+ cmd, [g_opts.db_passwd]
+ )
output = "%s%s" % (str(stdout_data), str(stderr_data))
if return_code:
- raise Exception("Failed to %s by sql, output:%s"
- % (message, output))
+ raise Exception("Failed to %s by sql, output:%s" % (message, output))
# return code is 0, but output has error info, CT-xxx, ZS-xxx
result = output.replace("\n", "")
@@ -3567,19 +4045,26 @@ class Installer:
self.status_log = "%s/log/cantianstatus.log" % self.data
if os.path.exists(self.status_log):
os.remove(self.status_log)
-
+
if os.getuid() == 0:
cmd = "chown %s:%s %s;" % (self.user, self.group, INSTALL_SCRIPT)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("chown to %s:%s return: %s%s%s"
- % (self.user, self.group, str(ret_code),
- os.linesep, stderr))
-
- persist_environment_variable("RUN_MODE", g_opts.running_mode.lower(),self.userProfile)
-
- # start mysql and cantian in single process mode with metadata in cantian
- if g_opts.running_mode.lower() in [CANTIAND_WITH_MYSQL, CANTIAND_WITH_MYSQL_ST, CANTIAND_WITH_MYSQL_IN_CLUSTER]:
+ raise Exception(
+ "chown to %s:%s return: %s%s%s"
+ % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ )
+
+ persist_environment_variable(
+ "RUN_MODE", g_opts.running_mode.lower(), self.userProfile
+ )
+
+ # start mysql and cantian in single process mode with metadata in cantian
+ if g_opts.running_mode.lower() in [
+ CANTIAND_WITH_MYSQL,
+ CANTIAND_WITH_MYSQL_ST,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ ]:
try:
# start MySQL in single process mode
self.FAILED_POS = self.CREATE_DB_FAILED
@@ -3589,13 +4074,15 @@ class Installer:
self.start_gss()
# create and start threads to start mysql and create db
cantiand_thread = threading.Thread(target=self.start_cantiand)
- create_db_thread = threading.Thread(target=self.create_db_for_cantian_in_single)
+ create_db_thread = threading.Thread(
+ target=self.create_db_for_cantian_in_single
+ )
cantiand_thread.start()
create_db_thread.start()
# wait for completion of threads
cantiand_thread.join()
create_db_thread.join()
- log("Both cantiand and database creation processes are complete.",True)
+ log("Both cantiand and database creation processes are complete.", True)
except Exception as err:
self.rollBack()
logExit(str(err))
@@ -3622,17 +4109,17 @@ class Installer:
# wait for mysql process to start
log_home = self.cantiandConfigs["LOG_HOME"]
run_log = os.path.join(log_home, "run", "cantiand.rlog")
- search_string = 'start waiting for db to be open'
+ search_string = "start waiting for db to be open"
cmd = "cat %s | grep '%s'" % (run_log, search_string)
is_instance_up = False
while not is_instance_up:
ret_code, stdout, stderr = _exec_popen(cmd)
output = stdout + stderr
if ret_code:
- log("still waiting for cantian to start",True)
+ log("still waiting for cantian to start", True)
else:
is_instance_up = True
- log("cantian instance started",True)
+ log("cantian instance started", True)
# wait for 5 seconds
time.sleep(5)
log("mysqld is running. Proceeding with database creation...", True)
@@ -3666,18 +4153,21 @@ class Installer:
self._sed_file("dbfiles3", "+vg3", create_database_sql)
elif g_opts.use_dbstor:
file_name = "create_dbstor_database.sample.sql"
- if g_opts.running_mode in [CANTIAND_IN_CLUSTER, CANTIAND_WITH_MYSQL_IN_CLUSTER]:
+ if g_opts.running_mode in [
+ CANTIAND_IN_CLUSTER,
+ CANTIAND_WITH_MYSQL_IN_CLUSTER,
+ ]:
file_name = "create_dbstor_cluster_database.sample.sql"
create_database_sql = os.path.join(sql_file_path, file_name)
else:
- dbDataPath = os.path.join(self.data, "data").replace('/', '\/')
+ dbDataPath = os.path.join(self.data, "data").replace("/", "\/")
self._sed_file("dbfiles1", dbDataPath, create_database_sql)
self._sed_file("dbfiles2", dbDataPath, create_database_sql)
self._sed_file("dbfiles3", dbDataPath, create_database_sql)
return create_database_sql
def _sed_file(self, prefix, replace, file_name):
- fixSqlFileCmd = ("sed -i 's/%s/%s/g' %s" % (prefix, replace, file_name))
+ fixSqlFileCmd = "sed -i 's/%s/%s/g' %s" % (prefix, replace, file_name)
ret_code, _, _ = _exec_popen(fixSqlFileCmd)
if ret_code:
raise Exception("sed %s failed, replace %s" % (file_name, replace))
@@ -3689,21 +4179,28 @@ class Installer:
cmd = "chown -R %s:%s %s; " % (self.user, self.group, self.ssl_path)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- raise Exception("chown to %s:%s return: %s%s%s"
- % (self.user, self.group, str(ret_code), os.linesep, stderr))
+ raise Exception(
+ "chown to %s:%s return: %s%s%s"
+ % (self.user, self.group, str(ret_code), os.linesep, stderr)
+ )
- def get_ctencrypt_keys(self, skip_execute_sql = False):
+ def get_ctencrypt_keys(self, skip_execute_sql=False):
"""Set the config about _FACTOR_KEY and LOCAL_KEY."""
# Generate Key and WorkKey
log("Generate encrypted keys.")
if g_opts.install_user_privilege == "withoutroot":
cmd = "%s/bin/ctencrypt -g" % self.installPath
else:
- cmd = "su - '%s' -c \"%s/bin/ctencrypt -g \"" % (self.user, self.installPath)
+ cmd = "su - '%s' -c \"%s/bin/ctencrypt -g \"" % (
+ self.user,
+ self.installPath,
+ )
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
- raise OSError("Failed to generate encrypted keys. Error: %s"
- % (stderr+os.linesep+stderr))
+ raise OSError(
+ "Failed to generate encrypted keys. Error: %s"
+ % (stderr + os.linesep + stderr)
+ )
# Example of output:
# eg'Key: XXXXXXXXXXXXXXXXXXXXXXX'
@@ -3734,12 +4231,17 @@ class Installer:
if g_opts.install_user_privilege == "withoutroot":
cmd = "%s/bin/ctencrypt -g -o '%s' " % (self.installPath, f_factor1)
else:
- cmd = ("su - '%s' -c \"%s/bin/ctencrypt -g -o '%s' \""
- % (self.user, self.installPath, f_factor1))
+ cmd = "su - '%s' -c \"%s/bin/ctencrypt -g -o '%s' \"" % (
+ self.user,
+ self.installPath,
+ f_factor1,
+ )
ret_code, stdout, stderr = _exec_popen(cmd)
if ret_code:
- raise OSError("Failed to generate encrypted keys. Error: %s"
- % (stderr+os.linesep+stderr))
+ raise OSError(
+ "Failed to generate encrypted keys. Error: %s"
+ % (stderr + os.linesep + stderr)
+ )
# Example of output:
# eg'Key: XXXXXXXXXXXXXXXXXXXXXXX'
@@ -3762,21 +4264,31 @@ class Installer:
log("Generate encrypted keys successfully.")
return key_, work_key
- def encrypt_ssl_key_passwd(self, key_, work_key, ssl_passwd, skip_execute_sql = False):
+ def encrypt_ssl_key_passwd(
+ self, key_, work_key, ssl_passwd, skip_execute_sql=False
+ ):
"""Encrypt ssl key password with _FACTOR_KEY and LOCAL_KEY."""
log("Encrypt ssl key password.")
if g_opts.install_user_privilege == "withoutroot":
- cmd = ("""%s/bin/ctencrypt -e AES256 -f %s -k %s """
- % (self.installPath, key_, work_key))
+ cmd = """%s/bin/ctencrypt -e AES256 -f %s -k %s """ % (
+ self.installPath,
+ key_,
+ work_key,
+ )
else:
- cmd = ("su - '%s' -c \"%s/bin/ctencrypt -e AES256"
- " -f '%s' -k '%s' \""
- % (self.user, self.installPath, key_, work_key))
+ cmd = "su - '%s' -c \"%s/bin/ctencrypt -e AES256" " -f '%s' -k '%s' \"" % (
+ self.user,
+ self.installPath,
+ key_,
+ work_key,
+ )
values = [ssl_passwd, ssl_passwd]
ret_code, stdout, stderr = _exec_popen(cmd, values)
if ret_code:
- raise OSError("Failed to encrypt ssl key password. Error: %s"
- % (stderr+os.linesep+stderr))
+ raise OSError(
+ "Failed to encrypt ssl key password. Error: %s"
+ % (stderr + os.linesep + stderr)
+ )
# Example of output:
# Please enter password to encrypt:
@@ -3798,8 +4310,7 @@ class Installer:
# Don't set SSL_CA and CTSQL_SSL_CA.
# Avoid the need to copy files, env and kernel parameter
# from the primary dn when installing the backup dn.
- cantian_conf_file = os.path.join(self.data, "cfg",
- self.CANTIAND_CONF_FILE)
+ cantian_conf_file = os.path.join(self.data, "cfg", self.CANTIAND_CONF_FILE)
ssl_map = {
"SSL_CERT": os.path.join(self.ssl_path, "server.crt"),
"SSL_KEY": os.path.join(self.ssl_path, "server.key"),
@@ -3828,20 +4339,25 @@ class Installer:
log("Set user environment variables about ssl.")
try:
with open(self.userProfile, "a") as _file:
- _file.write("export CTSQL_SSL_CERT=\"%s\""
- % os.path.join(self.ssl_path, "client.crt"))
+ _file.write(
+ 'export CTSQL_SSL_CERT="%s"'
+ % os.path.join(self.ssl_path, "client.crt")
+ )
_file.write(os.linesep)
- _file.write("export CTSQL_SSL_KEY=\"%s\""
- % os.path.join(self.ssl_path, "client.key"))
+ _file.write(
+ 'export CTSQL_SSL_KEY="%s"'
+ % os.path.join(self.ssl_path, "client.key")
+ )
_file.write(os.linesep)
- _file.write("export CTSQL_SSL_MODE=\"required\"")
+ _file.write('export CTSQL_SSL_MODE="required"')
_file.write(os.linesep)
- _file.write("export CTSQL_SSL_KEY_PASSWD=\"%s\"" % cipher)
+ _file.write('export CTSQL_SSL_KEY_PASSWD="%s"' % cipher)
_file.write(os.linesep)
_file.flush()
except IOError as ex:
- raise IOError("Failed Set user environment variables about ssl: %s"
- % str(ex))
+ raise IOError(
+ "Failed Set user environment variables about ssl: %s" % str(ex)
+ )
os.environ["CTSQL_SSL_CERT"] = os.path.join(self.ssl_path, "client.crt")
os.environ["CTSQL_SSL_KEY"] = os.path.join(self.ssl_path, "client.key")
@@ -3858,38 +4374,70 @@ class Installer:
if g_opts.install_user_privilege == "withoutroot":
if not g_opts.db_passwd:
# connect database by sysdba
- cmd = ("%s/bin/shutdowndb.sh -h %s -p %s -w -m %s -D %s -T %d"
- % (self.installPath, host_ip, self.lsnr_port,
- "immediate", self.data, timeout))
+ cmd = "%s/bin/shutdowndb.sh -h %s -p %s -w -m %s -D %s -T %d" % (
+ self.installPath,
+ host_ip,
+ self.lsnr_port,
+ "immediate",
+ self.data,
+ timeout,
+ )
ret_code, _, stderr = _exec_popen(cmd)
else:
# connect database by username and password
- cmd = ("%s/bin/shutdowndb.sh -h"
- " %s -p %s -U %s -m %s -W -D %s -T %d" %
- (self.installPath, host_ip,
- self.lsnr_port, g_opts.db_user, "immediate",
- self.data, timeout))
+ cmd = (
+ "%s/bin/shutdowndb.sh -h"
+ " %s -p %s -U %s -m %s -W -D %s -T %d"
+ % (
+ self.installPath,
+ host_ip,
+ self.lsnr_port,
+ g_opts.db_user,
+ "immediate",
+ self.data,
+ timeout,
+ )
+ )
ret_code, _, stderr = _exec_popen(cmd, [g_opts.db_passwd])
else:
if not g_opts.db_passwd:
# connect database by sysdba
- cmd = ("su - '%s' -c \"%s/bin/shutdowndb.sh -h %s"
- " -p %s -w -m %s -D %s -T %d \" "
- % (self.user, self.installPath, host_ip, self.lsnr_port,
- "immediate", self.data, timeout))
+ cmd = (
+ "su - '%s' -c \"%s/bin/shutdowndb.sh -h %s"
+ ' -p %s -w -m %s -D %s -T %d " '
+ % (
+ self.user,
+ self.installPath,
+ host_ip,
+ self.lsnr_port,
+ "immediate",
+ self.data,
+ timeout,
+ )
+ )
ret_code, _, stderr = _exec_popen(cmd)
else:
# connect database by username and password
- cmd = ("su - '%s' -c \" %s/bin/shutdowndb.sh -h"
- " %s -p %s -U %s -m %s -W -D %s -T %d \" " %
- (self.user, self.installPath,
- host_ip, self.lsnr_port, g_opts.db_user, "immediate",
- self.data, timeout))
+ cmd = (
+ "su - '%s' -c \" %s/bin/shutdowndb.sh -h"
+ ' %s -p %s -U %s -m %s -W -D %s -T %d " '
+ % (
+ self.user,
+ self.installPath,
+ host_ip,
+ self.lsnr_port,
+ g_opts.db_user,
+ "immediate",
+ self.data,
+ timeout,
+ )
+ )
ret_code, _, stderr = _exec_popen(cmd, [g_opts.db_passwd])
if ret_code:
- raise Exception("Failed to stop database. Error: %s"
- % (stderr+os.linesep+stderr))
+ raise Exception(
+ "Failed to stop database. Error: %s" % (stderr + os.linesep + stderr)
+ )
log("stop cantian instance successfully.")
def setSslCert(self):
@@ -3913,8 +4461,7 @@ class Installer:
key_, work_key = self.get_ctencrypt_keys_and_file()
# Get the value of SSL_KEY_PASSWORD from ini and
# CTSQL_SSL_KEY_PASSWD from env and set SSL_KEY_PASSWORD into ini
- cipher = self.encrypt_ssl_key_passwd(key_, work_key,
- ssl_constructor.passwd)
+ cipher = self.encrypt_ssl_key_passwd(key_, work_key, ssl_constructor.passwd)
# 3. Modify cantiand.ini by write
# Set the ssl config in cantiand.ini for server
@@ -3953,14 +4500,21 @@ class Installer:
output: NA
"""
try:
- strCmd = ("find '%s'/admin -type f | xargs chmod %s "
- % (self.installPath, CommonValue.MIN_FILE_MODE))
+ strCmd = "find '%s'/admin -type f | xargs chmod %s " % (
+ self.installPath,
+ CommonValue.MIN_FILE_MODE,
+ )
ret_code, _, _ = _exec_popen(strCmd)
if ret_code:
- print("Change file permission to %s failed."
- " Please chmod %s filein directory %s/admin manually."
- % (CommonValue.MIN_FILE_MODE,
- CommonValue.MIN_FILE_MODE, self.installPath))
+ print(
+ "Change file permission to %s failed."
+ " Please chmod %s filein directory %s/admin manually."
+ % (
+ CommonValue.MIN_FILE_MODE,
+ CommonValue.MIN_FILE_MODE,
+ self.installPath,
+ )
+ )
except Exception as err:
logExit(str(err))
@@ -3977,7 +4531,7 @@ class Installer:
log("Changing file permission due to security audit.", True)
# 1. chmod sql file permission
self.chmodInstallSqlfile()
-
+
def set_core_dump_filter(self):
"""
function: set_core_dump_filter, modify num to support core dump shared memory
@@ -4003,12 +4557,22 @@ class Installer:
for cantiand_pid in cantiand_pids.split():
cantiand_pid = cantiand_pid.strip()
if cantiand_pid is not None and len(cantiand_pid) > 0:
- cmd = "echo 0x6f > " + sep_mark + "proc" + sep_mark + str(cantiand_pid) + \
- sep_mark + "coredump_filter"
+ cmd = (
+ "echo 0x6f > "
+ + sep_mark
+ + "proc"
+ + sep_mark
+ + str(cantiand_pid)
+ + sep_mark
+ + "coredump_filter"
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- logExit("can not set coredump_filter, command: %s, err: %s" % (cmd, stderr))
-
+ logExit(
+ "can not set coredump_filter, command: %s, err: %s"
+ % (cmd, stderr)
+ )
+
def set_core_dump_filter_mysql(self):
"""
function: set_core_dump_filter_mysql, modify num to support core dump shared memory
@@ -4034,11 +4598,21 @@ class Installer:
for mysqld_pid in mysqld_pids.split():
mysqld_pid = mysqld_pid.strip()
if mysqld_pid is not None and len(mysqld_pid) > 0:
- cmd = "echo 0x6f > " + sep_mark + "proc" + sep_mark + str(mysqld_pid) + \
- sep_mark + "coredump_filter"
+ cmd = (
+ "echo 0x6f > "
+ + sep_mark
+ + "proc"
+ + sep_mark
+ + str(mysqld_pid)
+ + sep_mark
+ + "coredump_filter"
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- logExit("can not set coredump_filter, command: %s, err: %s" % (cmd, stderr))
+ logExit(
+ "can not set coredump_filter, command: %s, err: %s"
+ % (cmd, stderr)
+ )
######################################################################
# The main process of installation.
@@ -4064,7 +4638,7 @@ class Installer:
pass
else:
self.checkRunner()
- self.checkParameter() # add cantiand, cms, gss config parameter check logic in this method
+ self.checkParameter() # add cantiand, cms, gss config parameter check logic in this method
# user and group is right, change owner of the log file to user
self.chownLogFile()
self.checkOldInstall()
@@ -4073,11 +4647,11 @@ class Installer:
self.checkDIR()
self.checkSHA256()
self.generateSslCert()
- self.decompressBin() #TODO shiyi check CMS, GSS bin when compile success
+ self.decompressBin() # TODO shiyi check CMS, GSS bin when compile success
self.setUserEnv()
self.prepareDataDir()
self.prepareMysqlForSingle()
- self.InitDbInstance() # init db config, including cantiand, cms, gss, ssl
+ self.InitDbInstance() # init db config, including cantiand, cms, gss, ssl
self.generateReplAuthKeys()
log("Successfully Initialize %s instance." % self.instance_name)
if self.option == self.INS_ALL:
@@ -4095,19 +4669,33 @@ class Installer:
def check_parameter_mysql(self):
if g_opts.mysql_config_file_path == "unset":
- g_opts.mysql_config_file_path = os.path.join(MYSQL_CODE_DIR, "scripts/my.cnf")
- log("no mysql config file assigned, set to %s" % g_opts.mysql_config_file_path, True)
+ g_opts.mysql_config_file_path = os.path.join(
+ MYSQL_CODE_DIR, "scripts/my.cnf"
+ )
+ log(
+ "no mysql config file assigned, set to %s"
+ % g_opts.mysql_config_file_path,
+ True,
+ )
real_path = os.path.realpath(g_opts.mysql_config_file_path)
if not os.path.isfile(real_path):
- logExit("mysql config file {} not existed or it is not a file".format(real_path))
+ logExit(
+ "mysql config file {} not existed or it is not a file".format(real_path)
+ )
if not self.is_readable(real_path, self.user):
- logExit("mysql config file {} is not readable by {}".format(real_path, self.user))
+ logExit(
+ "mysql config file {} is not readable by {}".format(
+ real_path, self.user
+ )
+ )
g_opts.mysql_config_file_path = real_path
cmd = "chown {}:{} {};".format(self.user, self.group, real_path)
cmd += "chmod {} {}".format(CommonValue.MAX_FILE_MODE, real_path)
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
- logExit("Can not set mysql config mode, command: %s, output: %s" % (cmd, stderr))
+ logExit(
+ "Can not set mysql config mode, command: %s, output: %s" % (cmd, stderr)
+ )
def prepare_mysql_data_dir(self):
log("Preparing mysql data dir...", True)
@@ -4122,7 +4710,9 @@ class Installer:
log("Preparing mysql bin dir...", True)
self.clean_dir(MYSQL_BIN_DIR)
self.prepareGivenPath(MYSQL_BIN_DIR, True)
- cmd = "cp -arf {} {};".format(os.path.join(MYSQL_CODE_DIR, "mysql_bin/mysql/*"), MYSQL_BIN_DIR)
+ cmd = "cp -arf {} {};".format(
+ os.path.join(MYSQL_CODE_DIR, "mysql_bin/mysql/*"), MYSQL_BIN_DIR
+ )
cmd += "cp %s/cfg/osd.cfg %s/bin/osd.cfg;" % (self.data, MYSQL_BIN_DIR)
cmd += "chown -R {}:{} {};".format(self.user, self.group, MYSQL_BIN_DIR)
@@ -4133,25 +4723,32 @@ class Installer:
def set_mysql_env(self):
log("Preparing mysql running env...", True)
- if 'LD_LIBRARY_PATH' in os.environ:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s:%s:%s:%s" % (
+ if "LD_LIBRARY_PATH" in os.environ:
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s:%s:%s:%s" % (
os.path.join(MYSQL_BIN_DIR, "lib"),
os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
os.path.join(CTC_LIB_DIR, "cantian_lib"),
os.path.join(MYSQL_BIN_DIR, "lib/private"),
- os.environ['LD_LIBRARY_PATH']))
+ os.environ["LD_LIBRARY_PATH"],
+ )
else:
- os.environ['LD_LIBRARY_PATH'] = ("%s:%s:%s:%s" % (
- os.path.join(MYSQL_BIN_DIR, "lib"), os.path.join(MYSQL_BIN_DIR, "lib/private"), os.path.join(
- MYSQL_CODE_DIR, "cantian_lib"), os.path.join(CTC_LIB_DIR, "cantian_lib")))
- cmd = "ldconfig -N %s %s" % (os.path.join(MYSQL_CODE_DIR, "cantian_lib"), os.path.join(MYSQL_BIN_DIR, "lib"))
+ os.environ["LD_LIBRARY_PATH"] = "%s:%s:%s:%s" % (
+ os.path.join(MYSQL_BIN_DIR, "lib"),
+ os.path.join(MYSQL_BIN_DIR, "lib/private"),
+ os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
+ os.path.join(CTC_LIB_DIR, "cantian_lib"),
+ )
+ cmd = "ldconfig -N %s %s" % (
+ os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
+ os.path.join(MYSQL_BIN_DIR, "lib"),
+ )
ret_code, _, stderr = _exec_popen(cmd)
if ret_code:
logExit("Can not link mysql lib, command: %s, output: %s" % (cmd, stderr))
def get_jemalloc_path(self):
for path in ["/usr/lib", "/usr/lib64", "/usr/local/lib", "/usr/local/lib64"]:
- je_path = os.path.join(path, 'libjemalloc.so')
+ je_path = os.path.join(path, "libjemalloc.so")
if os.path.exists(je_path):
return je_path
return ""
@@ -4159,26 +4756,37 @@ class Installer:
def start_mysql(self, is_slave_cluster):
log("Starting mysqld...", True)
if os.path.exists(MYSQL_LOG_FILE) and os.path.isfile(MYSQL_LOG_FILE):
- log("Warning: the mysql log file %s should empty for mysqld start" % MYSQL_LOG_FILE, True)
+ log(
+ "Warning: the mysql log file %s should empty for mysqld start"
+ % MYSQL_LOG_FILE,
+ True,
+ )
# mysql init
# Do not init mysql in slave cluster.
je_path = self.get_jemalloc_path()
if not is_slave_cluster:
# mysql_view_file = self.get_cantian_defs_file()
- cmd = "LD_PRELOAD=%s %s --defaults-file=%s --initialize-insecure --datadir=%s \
- --early-plugin-load=\"ctc_ddl_rewriter=ha_ctc.so;ctc=ha_ctc.so;\" \
- --core-file --log-error=%s" % (
- je_path,
- os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
- g_opts.mysql_config_file_path,
- MYSQL_DATA_DIR,
- MYSQL_LOG_FILE)
+ cmd = (
+ 'LD_PRELOAD=%s %s --defaults-file=%s --initialize-insecure --datadir=%s \
+ --early-plugin-load="ctc_ddl_rewriter=ha_ctc.so;ctc=ha_ctc.so;" \
+ --core-file --log-error=%s'
+ % (
+ je_path,
+ os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
+ g_opts.mysql_config_file_path,
+ MYSQL_DATA_DIR,
+ MYSQL_LOG_FILE,
+ )
+ )
if os.getuid() == 0:
cmd = "su %s -c '" % self.user + cmd + "'"
status, stdout, stderr = _exec_popen(cmd)
if status != 0:
output = stdout + stderr
- raise Exception("Can not init mysqld %s.\nStart cmd: %s.\nOutput: %s" % (self.data, cmd, output))
+ raise Exception(
+ "Can not init mysqld %s.\nStart cmd: %s.\nOutput: %s"
+ % (self.data, cmd, output)
+ )
# start mysqld
cmd = """LD_PRELOAD=%s %s %s --defaults-file=%s --datadir=%s --plugin-dir=%s \
@@ -4186,43 +4794,78 @@ class Installer:
--check_proxy_users=ON --mysql_native_password_proxy_users=ON \
--default-storage-engine=CTC \
--core-file > %s 2>&1 &
- """ % (je_path, self.numactl_str, os.path.join(MYSQL_BIN_DIR, "bin/mysqld"), g_opts.mysql_config_file_path,
- MYSQL_DATA_DIR, os.path.join(MYSQL_BIN_DIR, "lib/plugin"), MYSQL_LOG_FILE)
+ """ % (
+ je_path,
+ self.numactl_str,
+ os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
+ g_opts.mysql_config_file_path,
+ MYSQL_DATA_DIR,
+ os.path.join(MYSQL_BIN_DIR, "lib/plugin"),
+ MYSQL_LOG_FILE,
+ )
if os.getuid() == 0:
cmd = "su %s -c '" % self.user + cmd + "'"
status, stdout, stderr = _exec_popen(cmd)
if status != 0:
output = stdout + stderr
- raise Exception("Can not start mysqld %s.\nStart cmd: %s.\nOutput: %s" % (self.data, cmd, output))
+ raise Exception(
+ "Can not start mysqld %s.\nStart cmd: %s.\nOutput: %s"
+ % (self.data, cmd, output)
+ )
if self.check_has_start_mysqld() == False:
- raise Exception("failed or exceed time to start mysqld\nPlease see the log of %s" % MYSQL_LOG_FILE)
-
+ raise Exception(
+ "failed or exceed time to start mysqld\nPlease see the log of %s"
+ % MYSQL_LOG_FILE
+ )
+
def start_mysql_with_metadata_in_cantian(self):
log("Starting mysqld...", True)
if os.path.exists(MYSQL_LOG_FILE) and os.path.isfile(MYSQL_LOG_FILE):
- log("Warning: the mysql log file %s should empty for mysqld start" % MYSQL_LOG_FILE, True)
+ log(
+ "Warning: the mysql log file %s should empty for mysqld start"
+ % MYSQL_LOG_FILE,
+ True,
+ )
# mysql_view_file = self.get_cantian_defs_file()
je_path = self.get_jemalloc_path()
- cmd_init_metadata_in_cantian = "LD_PRELOAD=%s %s --defaults-file=%s --initialize-insecure --datadir=%s \
- --early-plugin-load=\"ha_ctc.so\" --core-file --log-error=%s" % (
- je_path,
- os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
- g_opts.mysql_config_file_path,
- MYSQL_DATA_DIR,
- MYSQL_LOG_FILE)
+ cmd_init_metadata_in_cantian = (
+ 'LD_PRELOAD=%s %s --defaults-file=%s --initialize-insecure --datadir=%s \
+ --early-plugin-load="ha_ctc.so" --core-file --log-error=%s'
+ % (
+ je_path,
+ os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
+ g_opts.mysql_config_file_path,
+ MYSQL_DATA_DIR,
+ MYSQL_LOG_FILE,
+ )
+ )
if os.path.exists("/.dockerenv") and (g_opts.cantian_in_container != "1"):
cmd_start_mysqld = """LD_PRELOAD=%s %s --defaults-file=%s --datadir=%s --user=root --skip-innodb \
--early-plugin-load="ha_ctc.so" --core-file >> %s 2>&1 &
- """ % (je_path, os.path.join(MYSQL_BIN_DIR, "bin/mysqld"), g_opts.mysql_config_file_path, MYSQL_DATA_DIR,
- MYSQL_LOG_FILE)
+ """ % (
+ je_path,
+ os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
+ g_opts.mysql_config_file_path,
+ MYSQL_DATA_DIR,
+ MYSQL_LOG_FILE,
+ )
else:
cmd_start_mysqld = """LD_PRELOAD=%s %s %s --defaults-file=%s --datadir=%s --plugin-dir=%s \
--early-plugin-load="ha_ctc.so" \
--core-file >> %s 2>&1 &
- """ % (je_path, self.numactl_str, os.path.join(MYSQL_BIN_DIR, "bin/mysqld"), g_opts.mysql_config_file_path,
- MYSQL_DATA_DIR, os.path.join(MYSQL_BIN_DIR, "lib/plugin"), MYSQL_LOG_FILE)
+ """ % (
+ je_path,
+ self.numactl_str,
+ os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
+ g_opts.mysql_config_file_path,
+ MYSQL_DATA_DIR,
+ os.path.join(MYSQL_BIN_DIR, "lib/plugin"),
+ MYSQL_LOG_FILE,
+ )
if os.getuid() == 0:
- cmd_init_metadata_in_cantian = "su %s -c '" % self.user + cmd_init_metadata_in_cantian + "'"
+ cmd_init_metadata_in_cantian = (
+ "su %s -c '" % self.user + cmd_init_metadata_in_cantian + "'"
+ )
cmd_start_mysqld = "su %s -c '" % self.user + cmd_start_mysqld + "'"
# we donot check initialize metadata dir failure since metadata init failed can also succeed to start mysqld.
# metadata maybe initialized by other node
@@ -4237,7 +4880,10 @@ class Installer:
time.sleep(2)
log("retry initilize and start mysqld again...", True)
if retry_time == 0:
- raise Exception("failed or exceed time to start mysqld\nPlease see the log of %s" % MYSQL_LOG_FILE)
+ raise Exception(
+ "failed or exceed time to start mysqld\nPlease see the log of %s"
+ % MYSQL_LOG_FILE
+ )
def check_has_start_mysqld(self):
log_data = ""
@@ -4246,7 +4892,10 @@ class Installer:
try:
with open(MYSQL_LOG_FILE) as file_hanlde:
log_data = "".join(file_hanlde.readlines())
- if log_data.find("ready for connections") != -1 and log_data.find("ctc reg instance success") != -1:
+ if (
+ log_data.find("ready for connections") != -1
+ and log_data.find("ctc reg instance success") != -1
+ ):
log("succeed to start mysqld", True)
return True
except Exception as exerr:
@@ -4256,7 +4905,6 @@ class Installer:
if retry_time == 0:
return False
-
def install_mysql(self):
self.check_parameter_mysql()
# If it is not normalized, do not prepare_mysql_data_dir in slave cluster.
@@ -4267,20 +4915,26 @@ class Installer:
if MYSQL_VERSION == VERSION_DOCKER_META:
print("mysql_meta: going to start docker mysql in meta.")
self.start_mysql_with_metadata_in_cantian()
- #self.execute_mysql_update(self.get_cantian_defs_file())
+ # self.execute_mysql_update(self.get_cantian_defs_file())
elif MYSQL_VERSION == VERSION_DOCKER_NOMETA:
print("mysql_nometa: developer docker deploy.")
self.start_mysql(g_opts.slave_cluster)
- #self.execute_mysql_update(self.get_cantian_defs_file())
+ # self.execute_mysql_update(self.get_cantian_defs_file())
elif MYSQL_VERSION == VERSION_ENV_META:
mysql_plugin_path = os.path.join(MYSQL_BIN_DIR, "lib/plugin")
- print("mysql_meta: going to start mysql in meta. bin_dir:%s" % mysql_plugin_path)
+ print(
+ "mysql_meta: going to start mysql in meta. bin_dir:%s"
+ % mysql_plugin_path
+ )
self.start_mysql_with_metadata_in_cantian()
- #if g_opts.node_id == 0:
- #self.execute_mysql_update(self.get_cantian_defs_file())
+ # if g_opts.node_id == 0:
+ # self.execute_mysql_update(self.get_cantian_defs_file())
elif MYSQL_VERSION == VERSION_ENV_NOMETA:
mysql_plugin_path = os.path.join(MYSQL_BIN_DIR, "lib/plugin")
- print("mysql_nometa: going to start mysql in nometa. bin_dir:%s" % mysql_plugin_path)
+ print(
+ "mysql_nometa: going to start mysql in nometa. bin_dir:%s"
+ % mysql_plugin_path
+ )
self.start_mysql(g_opts.slave_cluster)
# self.execute_mysql_update(self.get_cantian_defs_file())
@@ -4289,31 +4943,34 @@ class Installer:
# self.set_core_dump_filter_mysql()
def checkCreatecantiandefsFile(self):
- '''
+ """
check it is a file; user has read permission,
:return:
- '''
+ """
# check it is a file
if not os.path.isfile(self.create_cantian_defs_file):
- raise Exception("Error: %s does not exists or is not a file"
- " or permission is not right."
- % self.create_cantian_defs_file)
+ raise Exception(
+ "Error: %s does not exists or is not a file"
+ " or permission is not right." % self.create_cantian_defs_file
+ )
if not checkPath(self.create_cantian_defs_file):
- raise Exception("Error: %s file path invalid: "
- % self.create_cantian_defs_file)
+ raise Exception(
+ "Error: %s file path invalid: " % self.create_cantian_defs_file
+ )
# if execute user is root, check common user has read permission
file_path = os.path.dirname(self.create_cantian_defs_file)
# check path of cantian defs sql file that user can cd
permission_ok, _ = self.checkPermission(file_path, True)
if not permission_ok:
- raise Exception("Error: %s can not access %s"
- % (self.user, file_path))
+ raise Exception("Error: %s can not access %s" % (self.user, file_path))
# check cantian defs file is readable for user
if not self.is_readable(self.create_cantian_defs_file, self.user):
- raise Exception("Error: %s is not readable for user %s"
- % (self.create_cantian_defs_file, self.user))
+ raise Exception(
+ "Error: %s is not readable for user %s"
+ % (self.create_cantian_defs_file, self.user)
+ )
# change file to a realpath file
self.create_cantian_defs_file = os.path.realpath(self.create_cantian_defs_file)
@@ -4329,10 +4986,10 @@ class Installer:
file_name = "cantian_defs.sql"
create_cantian_defs_file = os.path.join(sql_file_path, file_name)
return create_cantian_defs_file
-
+
def get_user_profile(self):
strCmd = ""
- if(g_opts.install_user_privilege == "withoutroot"):
+ if g_opts.install_user_privilege == "withoutroot":
strCmd = "echo ~"
else:
strCmd = "su - '%s' -c \"echo ~\"" % self.user
@@ -4341,32 +4998,42 @@ class Installer:
logExit("Can not get user home.")
# Get the profile of user.
output = os.path.realpath(os.path.normpath(stdout))
- if (not checkPath(output)):
+ if not checkPath(output):
logExit("The user home directory is invalid.")
self.userProfile = os.path.join(output, ".bashrc")
print("user profile path:%s" % self.userProfile)
-
+
def set_relaunch_env_for_single(self):
# set necessary environment variables
self.get_user_profile()
self.parseKeyAndValue()
- mysql_library_path = ("%s:%s:%s:%s:%s:%s" % (
- os.path.join(self.installPath, "lib"),
- os.path.join(self.installPath, "add-ons"),
- os.path.join(MYSQL_BIN_DIR, "lib"),
- os.path.join(MYSQL_BIN_DIR, "lib/private"),
- os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
- MYSQL_LIB_OUTPUT_DIR))
- persist_environment_variable("RUN_MODE", g_opts.running_mode.lower(), self.userProfile)
- persist_environment_variable("LD_LIBRARY_PATH", mysql_library_path, self.userProfile)
+ mysql_library_path = "%s:%s:%s:%s:%s:%s" % (
+ os.path.join(self.installPath, "lib"),
+ os.path.join(self.installPath, "add-ons"),
+ os.path.join(MYSQL_BIN_DIR, "lib"),
+ os.path.join(MYSQL_BIN_DIR, "lib/private"),
+ os.path.join(MYSQL_CODE_DIR, "cantian_lib"),
+ MYSQL_LIB_OUTPUT_DIR,
+ )
+ persist_environment_variable(
+ "RUN_MODE", g_opts.running_mode.lower(), self.userProfile
+ )
+ persist_environment_variable(
+ "LD_LIBRARY_PATH", mysql_library_path, self.userProfile
+ )
persist_environment_variable("CANTIAND_MODE", self.OPEN_MODE, self.userProfile)
- persist_environment_variable("CANTIAND_HOME_DIR", self.data.lower(), self.userProfile)
-
+ persist_environment_variable(
+ "CANTIAND_HOME_DIR", self.data.lower(), self.userProfile
+ )
+
def kill_process(self, process_name):
cmd = "pidof %s" % (process_name)
ret_code, pids, stderr = _exec_popen(cmd)
if ret_code:
- print("can not get pid of %s, command: %s, err: %s" % (process_name, cmd, stderr))
+ print(
+ "can not get pid of %s, command: %s, err: %s"
+ % (process_name, cmd, stderr)
+ )
return
for pid in pids.split():
pid = pid.strip()
@@ -4375,31 +5042,41 @@ class Installer:
if ret_code:
print("Failed to kill process %s. Error: %s" % (cmd, stderr))
else:
- print("Process %s with PID %s killed successfully." % (process_name, pid))
-
+ print(
+ "Process %s with PID %s killed successfully." % (process_name, pid)
+ )
+
def relaunch_single_process(self):
- self.kill_process('mysqld')
- self.kill_process('cantiand')
+ self.kill_process("mysqld")
+ self.kill_process("cantiand")
self.set_relaunch_env_for_single()
- relaunch_command = ("""%s --defaults-file=%s --datadir=%s --plugin-dir=%s --early-plugin-load=%s \
+ relaunch_command = """%s --defaults-file=%s --datadir=%s --plugin-dir=%s --early-plugin-load=%s \
--check_proxy_users=ON --mysql_native_password_proxy_users=ON \
--default-storage-engine=CTC --core-file > %s 2>&1 &""" % (
- os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
- g_opts.mysql_config_file_path,
- MYSQL_DATA_DIR,
- os.path.join(MYSQL_BIN_DIR, "lib/plugin"),
- CTC_PLUGIN,
- MYSQL_LOG_FILE))
+ os.path.join(MYSQL_BIN_DIR, "bin/mysqld"),
+ g_opts.mysql_config_file_path,
+ MYSQL_DATA_DIR,
+ os.path.join(MYSQL_BIN_DIR, "lib/plugin"),
+ CTC_PLUGIN,
+ MYSQL_LOG_FILE,
+ )
if os.getuid() == 0:
relaunch_command = "su - %s -c '" % self.user + relaunch_command + "'"
- print('relaunch mysql using command %s' % relaunch_command)
+ print("relaunch mysql using command %s" % relaunch_command)
status, stdout, stderr = _exec_popen(relaunch_command)
if status != 0:
output = stdout + stderr
- raise Exception("Can not start mysqld %s.\nStart cmd: %s.\nOutput: %s" % (self.data, relaunch_command, output))
+ raise Exception(
+ "Can not start mysqld %s.\nStart cmd: %s.\nOutput: %s"
+ % (self.data, relaunch_command, output)
+ )
if not self.check_has_start_mysqld():
- raise Exception("failed or exceed time to start mysqld\nPlease see the log of %s" % MYSQL_LOG_FILE)
-
+ raise Exception(
+ "failed or exceed time to start mysqld\nPlease see the log of %s"
+ % MYSQL_LOG_FILE
+ )
+
+
def main():
"""
main entry
@@ -4414,11 +5091,18 @@ def main():
installer = Installer(g_opts.os_user, g_opts.os_group)
if g_opts.running_mode == MYSQLD:
installer.install_mysql()
- elif g_opts.running_mode.lower() in VALID_SINGLE_MYSQL_RUNNING_MODE and g_opts.is_relaunch:
+ elif (
+ g_opts.running_mode.lower() in VALID_SINGLE_MYSQL_RUNNING_MODE
+ and g_opts.is_relaunch
+ ):
installer.relaunch_single_process()
else:
installer.install()
- log("Install successfully, for more detail information see %s." % g_opts.log_file, True)
+ log(
+ "Install successfully, for more detail information see %s."
+ % g_opts.log_file,
+ True,
+ )
except Exception as err:
logExit("Install failed: " + str(err))
diff --git a/pkg/install/sql_process.py b/pkg/install/sql_process.py
index c8203d0520c3806f0472fdf8e9d771fe59d1a95b..c90f4ff0a94041da7e4555d775cb42e53a58e727 100644
--- a/pkg/install/sql_process.py
+++ b/pkg/install/sql_process.py
@@ -5,6 +5,7 @@
import sys
+
sys.dont_write_bytecode = True
try:
import os
@@ -15,12 +16,7 @@ try:
except ImportError as e:
sys.exit("Unable to import module: %s." % str(e))
-old_core_tables = [
- "COLUMN$",
- "INDEX$",
- "TABLE$",
- "USER$"
- ]
+old_core_tables = ["COLUMN$", "INDEX$", "TABLE$", "USER$"]
old_systables = [
"BACKUP_SET$",
@@ -72,12 +68,7 @@ old_systables = [
"SQL_MAP$",
]
-new_core_tables = [
- "SYS_COLUMNS",
- "SYS_INDEXES",
- "SYS_TABLES",
- "SYS_USERS"
- ]
+new_core_tables = ["SYS_COLUMNS", "SYS_INDEXES", "SYS_TABLES", "SYS_USERS"]
new_systables = [
"SYS_BACKUP_SETS",
@@ -131,10 +122,10 @@ new_systables = [
def isSameSqlList(list1, list2):
- '''
+ """
two lists of string
compare(ignore cases) if they are same
- '''
+ """
if len(list1) != len(list2):
return False
@@ -146,19 +137,20 @@ def isSameSqlList(list1, list2):
class CreateTableSql(object):
- '''
+ """
this class process the SQL:
CREATE TABLE items
- '''
+ """
+
def __init__(self, sql):
- '''
+ """
init a create table sql
- '''
+ """
- sqls = str(sql).split('\n')
- if sqls[0][0:2] == '--':
+ sqls = str(sql).split("\n")
+ if sqls[0][0:2] == "--":
sqls.pop(0)
- self.__sql = '\n'.join(sqls)
+ self.__sql = "\n".join(sqls)
self.__items = []
# divide create table sql to 3 parts
# 1. before the 'relational_properties'
@@ -166,44 +158,45 @@ class CreateTableSql(object):
# 3. after the 'relational_properties'
# the relational_properties embraced by a pair of parentheses
# find the first left parenthesis and it's peer right one
- self.__indexOfLeftParenthesis = self.__sql.index('(')
- self.__indexOfRightParenthesis =\
- self.find_right_parenthesis(self.__sql,
- self.__indexOfLeftParenthesis)
- self.__preSql = self.__sql[0:self.__indexOfLeftParenthesis]
- self.__postSql = self.__sql[self.__indexOfRightParenthesis+1:]
+ self.__indexOfLeftParenthesis = self.__sql.index("(")
+ self.__indexOfRightParenthesis = self.find_right_parenthesis(
+ self.__sql, self.__indexOfLeftParenthesis
+ )
+ self.__preSql = self.__sql[0 : self.__indexOfLeftParenthesis]
+ self.__postSql = self.__sql[self.__indexOfRightParenthesis + 1 :]
- contentSql = self.__sql[self.__indexOfLeftParenthesis+1:
- self.__indexOfRightParenthesis]
+ contentSql = self.__sql[
+ self.__indexOfLeftParenthesis + 1 : self.__indexOfRightParenthesis
+ ]
- for i in contentSql.split('\n'):
- if i.strip().strip(','):
- self.__items.append(i.strip().strip(','))
+ for i in contentSql.split("\n"):
+ if i.strip().strip(","):
+ self.__items.append(i.strip().strip(","))
if not self.__items:
raise Exception("Syntax Error:\n%s" % self.__sql)
def find_right_parenthesis(self, content, index_left_p):
- '''
+ """
find the corresponding right parenthesis
if left parenthesis encountered , level plus 1 : means enter
if right parenthesis encountered, if level is zero , found
else level minus 1
unexpected : retuen -1
- '''
+ """
# verify left index is correct
- if content[index_left_p] != '(':
+ if content[index_left_p] != "(":
return -1
level = 0
count = 0
# start from left plus 1
- for i in content[index_left_p+1:]:
- if i == '(':
+ for i in content[index_left_p + 1 :]:
+ if i == "(":
level += 1
- elif i == ')':
+ elif i == ")":
if level == 0:
return index_left_p + 1 + count
else:
@@ -216,20 +209,20 @@ class CreateTableSql(object):
return -1
def tableSpace(self):
- '''
+ """
return tablespace of this table
- '''
+ """
contents = self.__postSql.split()
try:
i = contents.index("TABLESPACE")
- ts = contents[i+1]
+ ts = contents[i + 1]
return ts
except ValueError as e:
print(str(e))
return "UNKNOWN"
def isSamePreContent(self, other):
- '''
+ """
self should from new initdb
other should from old initdb
we split this sql to 3 parts:
@@ -237,49 +230,49 @@ class CreateTableSql(object):
2. colum items
3. ) xxx
this function compare part1
- '''
+ """
pre1 = self.__preSql.split()
pre2 = other.__preSql.split()
return isSameSqlList(pre1, pre2)
def ignore_key_int(self, sql_str, key_str):
- '''
+ """
ignore the 'key word' + 'int'
- '''
+ """
content = sql_str.split()
for i in range(len(content)):
if content[i] == key_str:
- if content[i+1].isdigit():
+ if content[i + 1].isdigit():
content.pop(i)
content.pop(i)
- return ' '.join(content)
+ return " ".join(content)
return sql_str
def ignore_pctfree(self, sql_str):
- '''
+ """
ignore the pctfree key word
- '''
+ """
if sql_str.find("PCTFREE") < 0:
return sql_str
return self.ignore_key_int(sql_str, "PCTFREE")
def ignore_storage(self, sql_str):
- '''
+ """
ignore the storage key word
- '''
+ """
index_of_storage = sql_str.find("STORAGE")
if index_of_storage < 0:
return sql_str
- index_of_end = sql_str.find(')', index_of_storage)
+ index_of_end = sql_str.find(")", index_of_storage)
- return sql_str[0:index_of_storage] + sql_str[index_of_end+1:]
+ return sql_str[0:index_of_storage] + sql_str[index_of_end + 1 :]
def isSamePostContent(self, other):
- '''
+ """
self should from new initdb
other should from old initdb
we split this sql to 3 parts:
@@ -289,16 +282,15 @@ class CreateTableSql(object):
this function compare part3
ignore PCTFREE xx (xx is a integer number)
ignore STORAGE (INITIAL xxxK) (xxx is a integer number)
- '''
+ """
storage_info = other.ignore_storage(other.__postSql)
- post1 =\
- self.ignore_pctfree(self.ignore_storage(self.__postSql)).split()
+ post1 = self.ignore_pctfree(self.ignore_storage(self.__postSql)).split()
post2 = other.ignore_pctfree(storage_info).split()
return isSameSqlList(post1, post2)
def incrementItems(self, other):
- '''
+ """
self should from new initdb
other should from old initdb
we split this sql to 3 parts:
@@ -307,45 +299,47 @@ class CreateTableSql(object):
3. ) xxx
this function process part2
output new col items in create sql
- '''
+ """
increment = []
length1 = len(self.__items)
length2 = len(other.__items)
if length1 == length2:
- '''
+ """
__isSame of SqlItem may not correct when sql has ','
use strip(',') to pass the right sql to isSameSqlList
- '''
+ """
for i in range(length1):
- li = self.__items[i].strip().strip(',')
- ll = other.__items[i].strip().strip(',')
+ li = self.__items[i].strip().strip(",")
+ ll = other.__items[i].strip().strip(",")
if not isSameSqlList(li.split(), ll.split()):
# output entire sqls for easy debug
- raise Exception("Decrement items:\n%s\n%s\n"
- % (self.__sql, other.__sql))
+ raise Exception(
+ "Decrement items:\n%s\n%s\n" % (self.__sql, other.__sql)
+ )
return increment
if length1 < length2:
# output entire sqls for easy debug
- raise Exception("Decrement items:\n%s\n%s\n"
- % (self.__sql, other.__sql))
+ raise Exception("Decrement items:\n%s\n%s\n" % (self.__sql, other.__sql))
for i in range(length2):
- if not isSameSqlList(self.__items[i].strip().strip(',').split(),
- other.__items[i].strip().strip(',').split()):
+ if not isSameSqlList(
+ self.__items[i].strip().strip(",").split(),
+ other.__items[i].strip().strip(",").split(),
+ ):
# output entire sqls for easy debug
- raise Exception("Decrement items:\n%s\n%s\n"
- % (self.__sql, other.__sql))
+ raise Exception(
+ "Decrement items:\n%s\n%s\n" % (self.__sql, other.__sql)
+ )
for i in range(length2, length1):
- increment.append(self.__items[i].strip().strip(','))
+ increment.append(self.__items[i].strip().strip(","))
return increment
class SqlItem(object):
-
"""
this class manage sql commands items
compare 2 sql
@@ -353,20 +347,20 @@ class SqlItem(object):
"""
def __init__(self, sql, flag=False, is_target=False, ignore=False):
- '''
+ """
init from raw sql
some sql use SYS.tablename
will strip 'SYS.' prefix
- '''
+ """
self.__sql = str(sql)
self.__flag = flag
self.__sql_type = 0
- self.__table_name = ''
- self.__index_name = ''
- self.__role_name = ''
- self.__sequence_name = ''
- self.__privilege_name = ''
- self.__grantee = ''
+ self.__table_name = ""
+ self.__index_name = ""
+ self.__role_name = ""
+ self.__sequence_name = ""
+ self.__privilege_name = ""
+ self.__grantee = ""
self.__is_target = is_target
self.__is_ignore = ignore
self.__sql_version = 0
@@ -374,52 +368,51 @@ class SqlItem(object):
self.__diffs = []
self.__add_table_items = []
- self.nameStyle = ''
+ self.nameStyle = ""
if self.__table_name:
table_name = self.__table_name
- if table_name[0:4] == 'SYS.':
+ if table_name[0:4] == "SYS.":
table_name = table_name[4:]
if table_name in old_systables:
- self.nameStyle = 'old'
+ self.nameStyle = "old"
elif table_name in new_systables:
- self.nameStyle = 'new'
+ self.nameStyle = "new"
else:
- self.nameStyle = ''
+ self.nameStyle = ""
def _replace(self, s, t):
- '''
+ """
replace sql contents
- '''
+ """
self.__sql = self.__sql.replace(s, t)
def rename2old(self):
- '''
+ """
rename table name to old style
- '''
+ """
self.__rename()
def rename2new(self):
- '''
+ """
rename table name to new style
- '''
+ """
self.__rename(to_new=True)
def version(self):
- '''
+ """
get the version
- '''
+ """
return self.__sql_version
def unique2normal(self):
- '''
- '''
+ """ """
self.__sql = self.__sql.replace("CREATE UNIQUE", "CREATE")
self.__analyse()
def originSql(self):
- '''
+ """
return the origin sql
- '''
+ """
return self.__sql
def index_name(self):
@@ -430,17 +423,17 @@ class SqlItem(object):
return ""
def tableName(self):
- '''
+ """
return the related table name of this sql
- '''
+ """
return self.__table_name
def name(self):
- '''
+ """
if create role return role name
elif create sequence return sequence name
else return table name
- '''
+ """
if self.isCreateRoleSql():
return self.__role_name
elif self.isCreateSeqenceSql():
@@ -449,141 +442,143 @@ class SqlItem(object):
return self.__table_name
def roleName(self):
- '''
+ """
return the related role name of this sql
- '''
+ """
return self.__role_name
def genDrop(self):
- '''
+ """
generate the Drop sql for index sql
- '''
+ """
if self.isCreateIndexSql():
drop = "DROP INDEX %s on %s" % (self.__index_name, self.__table_name)
return drop
- return ''
+ return ""
def isIndexSpecialCondition1(self, other):
- '''
+ """
CREATE UNIQUE INDEX IX_PROCARGU_001 ON SYS_PROC_ARGS(USER#,
OBJECT_NAME, PACKAGE, SEQUENCE, OVERLOAD) TABLESPACE SYSTEM
CREATE UNIQUE INDEX IX_PROCARGU_001 ON PROC_ARGS
$(USER#, OBJECT_NAME, SEQUENCE) TABLESPACE SYSTEM
- '''
- sqls1 = ['CREATE UNIQUE INDEX IX_PROCARGU_001 ON SYS_PROC_ARGS'
- '(USER#, OBJECT_NAME, PACKAGE, SEQUENCE, OVERLOAD)'
- ' TABLESPACE SYSTEM',
- 'CREATE UNIQUE INDEX IX_PROCARGU_001 ON PROC_ARGS'
- '$(USER#, OBJECT_NAME, PACKAGE, SEQUENCE, OVERLOAD)'
- ' TABLESPACE SYSTEM']
- sqls2 = ['CREATE UNIQUE INDEX IX_PROCARGU_001 ON SYS_PROC_ARGS'
- '(USER#, OBJECT_NAME, SEQUENCE) TABLESPACE SYSTEM',
- 'CREATE UNIQUE INDEX IX_PROCARGU_001 ON PROC_ARGS'
- '$(USER#, OBJECT_NAME, SEQUENCE) TABLESPACE SYSTEM']
+ """
+ sqls1 = [
+ "CREATE UNIQUE INDEX IX_PROCARGU_001 ON SYS_PROC_ARGS"
+ "(USER#, OBJECT_NAME, PACKAGE, SEQUENCE, OVERLOAD)"
+ " TABLESPACE SYSTEM",
+ "CREATE UNIQUE INDEX IX_PROCARGU_001 ON PROC_ARGS"
+ "$(USER#, OBJECT_NAME, PACKAGE, SEQUENCE, OVERLOAD)"
+ " TABLESPACE SYSTEM",
+ ]
+ sqls2 = [
+ "CREATE UNIQUE INDEX IX_PROCARGU_001 ON SYS_PROC_ARGS"
+ "(USER#, OBJECT_NAME, SEQUENCE) TABLESPACE SYSTEM",
+ "CREATE UNIQUE INDEX IX_PROCARGU_001 ON PROC_ARGS"
+ "$(USER#, OBJECT_NAME, SEQUENCE) TABLESPACE SYSTEM",
+ ]
if not self.isCreateIndexSql():
return False
if not other.isCreateIndexSql():
return False
- if self.__index_name != 'IX_PROCARGU_001':
+ if self.__index_name != "IX_PROCARGU_001":
return False
if self.originSql() in sqls2 and other.originSql() in sqls1:
return True
return False
def setFlag(self, flag):
- '''
+ """
this flag indicate whether this item has been fetched or not
- '''
+ """
self.__flag = flag
def isFlagTrue(self):
- '''
+ """
retrun the internal flag
- '''
+ """
return self.__flag
def isTableSql(self):
- '''
+ """
return if this sql is a table related sql
- '''
+ """
if self.__sql_type in [1, 2, 3, 4, 5, 6, 10, 11]:
return True
return False
def isCreateRoleSql(self):
- '''
+ """
return if this sql is a create role sql
- '''
+ """
if self.__sql_type in [7]:
return True
return False
def isCreateSeqenceSql(self):
- '''
+ """
return if this sql is a create sequence sql
- '''
+ """
if self.__sql_type in [8]:
return True
return False
def isGrantSql(self):
- '''
+ """
return if this sql is a grant xxx|ALL to role sql
- '''
+ """
if self.__sql_type in [9]:
return True
return False
def isCreateTableSql(self):
- '''
+ """
return if this sql is a create table sql
- '''
+ """
if self.__sql_type in [1, 2, 3]:
return True
return False
def isCreateIndexSql(self):
- '''
+ """
return if this sql is a create index sql
- '''
+ """
if self.__sql_type in [4, 5]:
return True
return False
def isCreateNormalIndexSql(self):
- '''
+ """
return if this sql is a create index sql
- '''
+ """
if self.__sql_type in [4]:
return True
return False
def isCreateUniqueIndexSql(self):
- '''
+ """
return if this sql is a create index sql
- '''
+ """
if self.__sql_type in [5]:
return True
return False
def isAlterSystemSql(self):
- '''
+ """
return if this sql is a create index sql
- '''
+ """
if self.__sql_type in [6]:
return True
return False
def isViewDropableSql(self):
- '''
- '''
+ """ """
if self.__sql_type in [1, 2, 3, 4, 5, 8, 12, 13, 14, 15, 16, 17, 18]:
return True
return False
def isCreateOrReplaceView(self):
- '''
- '''
+ """ """
if self.__sql_type in [12]:
return True
return False
@@ -593,8 +588,8 @@ class SqlItem(object):
if self.__sql_type != 15:
return ""
- end_symbol = '\n/\n\n'
- drop_sql = 'DROP PROCEDURE IF EXISTS %s' % p_name
+ end_symbol = "\n/\n\n"
+ drop_sql = "DROP PROCEDURE IF EXISTS %s" % p_name
drop_sql += end_symbol
drop_sql += "BEGIN\n"
drop_sql += " FOR ITEM IN "
@@ -607,48 +602,50 @@ class SqlItem(object):
return drop_sql
def generateDropSql(self):
- '''
- '''
+ """ """
if not self.isViewDropableSql():
- return ''
+ return ""
if self.__sql_type == 8:
- return 'DROP SEQUENCE IF EXISTS %s' % self.__sequence_name
+ return "DROP SEQUENCE IF EXISTS %s" % self.__sequence_name
if self.__sql_type == 12:
- return 'DROP VIEW IF EXISTS %s' % self.__table_name
+ return "DROP VIEW IF EXISTS %s" % self.__table_name
if self.__sql_type == 13:
- return 'DROP PUBLIC SYNONYM IF EXISTS %s' % self.__table_name
+ return "DROP PUBLIC SYNONYM IF EXISTS %s" % self.__table_name
if self.__sql_type == 14:
- return 'DROP SYNONYM IF EXISTS %s' % self.__table_name
+ return "DROP SYNONYM IF EXISTS %s" % self.__table_name
if self.__sql_type == 15:
return self.generate_drop_procedure(self.__table_name)
if self.__sql_type in [1, 2, 3, 16]:
- return 'DROP TABLE IF EXISTS %s' % self.__table_name
+ return "DROP TABLE IF EXISTS %s" % self.__table_name
if self.__sql_type in [4, 5, 17, 18]:
- return 'DROP INDEX IF EXISTS %s ON %s' % (self.__index_name, self.__table_name)
+ return "DROP INDEX IF EXISTS %s ON %s" % (
+ self.__index_name,
+ self.__table_name,
+ )
def __isTableMatched(self, other):
- '''
+ """
return if
the 2 sqls operate the same table
- '''
+ """
if self.__table_name == other.__table_name:
return True
return False
def __isIndexMatched(self, other):
- '''
+ """
return if the 2 sqls operate the same index
- '''
+ """
if self.__index_name == other.__index_name:
return True
return False
def __isSame(self, other):
- '''
+ """
return if
the 2 sqls are identical
- '''
+ """
sql1 = self.__sql.split()
sql2 = other.__sql.split()
@@ -656,18 +653,18 @@ class SqlItem(object):
return isSameSqlList(sql1, sql2)
def __isAllMatched(self, other):
- '''
+ """
return if
the 2 sqls are identical
- '''
+ """
return self.__isSame(other)
def isMatched(self, other):
- '''
+ """
return if
the 2 sqls create the same table
or are identical
- '''
+ """
if not isinstance(other, SqlItem):
return False
@@ -693,9 +690,9 @@ class SqlItem(object):
return False
def __incrementOfCreateTable(self, other):
- '''
+ """
find increment items of Create Table
- '''
+ """
createTable1 = CreateTableSql(self.__sql)
createTable2 = CreateTableSql(other.__sql)
@@ -711,58 +708,61 @@ class SqlItem(object):
return createTable1.incrementItems(createTable2)
def generateDegradeSql(self, other):
- '''
+ """
this is generate interface
self is from old initdb file
other is from new initdb file
old and new is for upgrade script
actually the old initdb file is newer one
other may be None if self is totally new
- '''
+ """
up = []
self.__diffs = []
if other:
if not isinstance(other, SqlItem):
- raise Exception('unrecognized object %s' % str(other))
+ raise Exception("unrecognized object %s" % str(other))
if self.__isSame(other):
- '''
+ """
process for:
the same sqls
- '''
+ """
return []
else:
- '''
+ """
process for:
DECREMENT of CREATE TABLE
- '''
+ """
# CREATE TABLE decrement
decrementItems = other.__incrementOfCreateTable(self)
for item in decrementItems:
- if ' '.join(item.split()).upper().find("NOT NULL") >= 0:
+ if " ".join(item.split()).upper().find("NOT NULL") >= 0:
if item.upper().find("DEFAULT") < 0:
- raise Exception("Can not handle"
- " decrement sql: %s" % item)
+ raise Exception(
+ "Can not handle" " decrement sql: %s" % item
+ )
else:
- '''
+ """
process for:
new sqls
- '''
+ """
if self.isCreateTableSql():
up.append("DROP TABLE %s" % self.__table_name)
elif self.isCreateIndexSql():
- up.append("DROP INDEX IF EXISTS %s on %s" % (self.__index_name, self.__table_name))
+ up.append(
+ "DROP INDEX IF EXISTS %s on %s"
+ % (self.__index_name, self.__table_name)
+ )
elif self.__sql_type in [7]:
up.append("DROP ROLE %s" % self.__role_name)
elif self.__sql_type in [8]:
up.append("DROP SEQUENCE IF EXISTS %s" % self.__sequence_name)
elif self.__sql_type in [9]:
- up.append("REVOKE %s FROM %s"
- % (self.__privilege_name, self.__grantee))
+ up.append("REVOKE %s FROM %s" % (self.__privilege_name, self.__grantee))
elif self.__sql_type == 6:
pass
else:
@@ -772,12 +772,12 @@ class SqlItem(object):
return up
def generateUpgradeSql(self, other):
- '''
+ """
this is generate interface
self is from new initdb file
other is from old initdb file
other may be None if self is totally new
- '''
+ """
up = []
extra_sqls = []
@@ -785,51 +785,58 @@ class SqlItem(object):
if other:
if not isinstance(other, SqlItem):
- raise Exception('unrecognized object %s' % str(other))
+ raise Exception("unrecognized object %s" % str(other))
if self.__isSame(other):
- '''
+ """
process for:
the same sqls
- '''
+ """
return [], []
elif self.isCreateIndexSql():
- up.append("DROP INDEX %s on %s" % (other.__index_name, other.__table_name))
+ up.append(
+ "DROP INDEX %s on %s" % (other.__index_name, other.__table_name)
+ )
up.append(self.__sql)
else:
- '''
+ """
process for:
INCREMENT of CREATE TABLE
- '''
+ """
# CREATE TABLE INCREMENT
incrementItems = self.__incrementOfCreateTable(other)
for item in incrementItems:
- if ' '.join(item.split()).upper().find("NOT NULL") >= 0:
+ if " ".join(item.split()).upper().find("NOT NULL") >= 0:
if item.upper().find("DEFAULT") < 0:
- raise Exception("Can not handle"
- " increment sql: %s" % item)
+ raise Exception(
+ "Can not handle" " increment sql: %s" % item
+ )
else:
sql_content = [i.upper() for i in item.split()]
- default_value =\
- sql_content[sql_content.index("DEFAULT")+1]
- update_sql = "UPDATE %s SET %s=%s"\
- % (self.__table_name,
- sql_content[0], default_value)
+ default_value = sql_content[
+ sql_content.index("DEFAULT") + 1
+ ]
+ update_sql = "UPDATE %s SET %s=%s" % (
+ self.__table_name,
+ sql_content[0],
+ default_value,
+ )
extra_sqls.append(update_sql)
extra_sqls.append("COMMIT")
- upgrade_sql = "ALTER TABLE %s ADD %s"\
- % (self.__table_name,
- ' '.join(item.strip(',').split()))
- self.__add_table_items.append(item.strip(','))
+ upgrade_sql = "ALTER TABLE %s ADD %s" % (
+ self.__table_name,
+ " ".join(item.strip(",").split()),
+ )
+ self.__add_table_items.append(item.strip(","))
up.append(upgrade_sql)
else:
- '''
+ """
process for:
new sqls
- '''
+ """
up.append(self.__sql)
self.__diffs.extend(up)
@@ -837,15 +844,15 @@ class SqlItem(object):
return up, extra_sqls
def __str__(self):
- '''
+ """
for easy output an object
- '''
+ """
return "\nSQL:\n%s\nFetched:%s\n" % (self.__sql, str(self.__flag))
def __analyse(self):
- '''
+ """
analyse the syntax of a single sql string
- '''
+ """
sql = self.__sql
@@ -864,75 +871,81 @@ class SqlItem(object):
# GRANT -- 9
###########################################
tokens = sql.split()
- if tokens[0][0:2] == '--':
+ if tokens[0][0:2] == "--":
if tokens[0][2:].isdigit():
self.__sql_version = int(tokens[0][2:])
tokens.pop(0)
- self.__sql = sql[sql.find(tokens[0]):]
+ self.__sql = sql[sql.find(tokens[0]) :]
- if (tokens[0].upper() == 'CREATE' and tokens[1].upper() == 'TABLE'):
- if (tokens[2] == 'IF'
- and tokens[3] == 'NOT'
- and tokens[4] == 'EXISTS'):
+ if tokens[0].upper() == "CREATE" and tokens[1].upper() == "TABLE":
+ if tokens[2] == "IF" and tokens[3] == "NOT" and tokens[4] == "EXISTS":
self.__sql_type = 16
self.__table_name = tokens[5]
else:
self.__sql_type = 1
self.__table_name = tokens[2]
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'TEMPORARY'
- and tokens[2].upper() == 'TABLE'):
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "TEMPORARY"
+ and tokens[2].upper() == "TABLE"
+ ):
self.__sql_type = 2
self.__table_name = tokens[3]
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'GLOBAL'
- and tokens[2].upper() == 'TEMPORARY'
- and tokens[3].upper() == 'TABLE'):
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "GLOBAL"
+ and tokens[2].upper() == "TEMPORARY"
+ and tokens[3].upper() == "TABLE"
+ ):
self.__sql_type = 3
self.__table_name = tokens[4]
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'INDEX'
- and tokens[3].upper() == 'ON'):
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "INDEX"
+ and tokens[3].upper() == "ON"
+ ):
self.__sql_type = 4
self.__index_name = tokens[2]
- self.__table_name = tokens[4].split('(')[0]
-
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'UNIQUE'
- and tokens[2].upper() == 'INDEX'
- and tokens[4].upper() == 'ON'):
+ self.__table_name = tokens[4].split("(")[0]
+
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "UNIQUE"
+ and tokens[2].upper() == "INDEX"
+ and tokens[4].upper() == "ON"
+ ):
self.__sql_type = 5
self.__index_name = tokens[3]
- self.__table_name = tokens[5].split('(')[0]
+ self.__table_name = tokens[5].split("(")[0]
- elif (tokens[0].upper() == 'ALTER'
- and tokens[1].upper() == 'SYSTEM'
- and tokens[2].upper() == 'LOAD'
- and tokens[3].upper() == 'DICTIONARY'
- and tokens[4].upper() == 'FOR'):
+ elif (
+ tokens[0].upper() == "ALTER"
+ and tokens[1].upper() == "SYSTEM"
+ and tokens[2].upper() == "LOAD"
+ and tokens[3].upper() == "DICTIONARY"
+ and tokens[4].upper() == "FOR"
+ ):
self.__sql_type = 6
self.__table_name = tokens[5].strip()
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'ROLE'):
+ elif tokens[0].upper() == "CREATE" and tokens[1].upper() == "ROLE":
self.__sql_type = 7
self.__role_name = tokens[2].strip()
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'SEQUENCE'):
+ elif tokens[0].upper() == "CREATE" and tokens[1].upper() == "SEQUENCE":
self.__sql_type = 8
self.__sequence_name = tokens[2].strip()
- elif (tokens[0].upper() == 'GRANT'):
+ elif tokens[0].upper() == "GRANT":
indexOfTO = 0
endOfIndex = len(tokens)
for tok in tokens:
- if tok.upper() == 'TO':
+ if tok.upper() == "TO":
indexOfTO = tokens.index(tok)
break
else:
@@ -940,89 +953,104 @@ class SqlItem(object):
privileges = tokens[1:indexOfTO]
self.__sql_type = 9
- self.__privilege_name = ' '.join(privileges)
- self.__role_name = tokens[indexOfTO+1]
+ self.__privilege_name = " ".join(privileges)
+ self.__role_name = tokens[indexOfTO + 1]
- if tokens[-1].upper() == 'OPTION':
- if tokens[-2].upper() == 'ADMIN'\
- and tokens[-3].upper() == 'WITH':
+ if tokens[-1].upper() == "OPTION":
+ if tokens[-2].upper() == "ADMIN" and tokens[-3].upper() == "WITH":
endOfIndex -= 3
- self.__grantee = ' '.join(tokens[indexOfTO+1:endOfIndex])
+ self.__grantee = " ".join(tokens[indexOfTO + 1 : endOfIndex])
- elif (tokens[0].upper() == 'ALTER'
- and tokens[1].upper() == 'TABLE'
- and self.__is_target):
+ elif (
+ tokens[0].upper() == "ALTER"
+ and tokens[1].upper() == "TABLE"
+ and self.__is_target
+ ):
self.__sql_type = 10
self.__table_name = tokens[2].strip()
- elif (tokens[0].upper() == 'DROP'
- and tokens[1].upper() == 'INDEX'
- and self.__is_target):
+ elif (
+ tokens[0].upper() == "DROP"
+ and tokens[1].upper() == "INDEX"
+ and self.__is_target
+ ):
self.__sql_type = 11
self.__index_name = tokens[2].strip()
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'OR'
- and tokens[2].upper() == 'REPLACE'
- and tokens[3].upper() == 'VIEW'
- and self.__is_target):
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "OR"
+ and tokens[2].upper() == "REPLACE"
+ and tokens[3].upper() == "VIEW"
+ and self.__is_target
+ ):
self.__sql_type = 12
self.__table_name = tokens[4].strip()
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'OR'
- and tokens[2].upper() == 'REPLACE'
- and tokens[3].upper() == 'PUBLIC'
- and tokens[4].upper() == 'SYNONYM'
- and self.__is_target):
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "OR"
+ and tokens[2].upper() == "REPLACE"
+ and tokens[3].upper() == "PUBLIC"
+ and tokens[4].upper() == "SYNONYM"
+ and self.__is_target
+ ):
self.__sql_type = 13
self.__table_name = tokens[5].strip()
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'OR'
- and tokens[2].upper() == 'REPLACE'
- and tokens[3].upper() == 'SYNONYM'
- and self.__is_target):
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "OR"
+ and tokens[2].upper() == "REPLACE"
+ and tokens[3].upper() == "SYNONYM"
+ and self.__is_target
+ ):
self.__sql_type = 14
self.__table_name = tokens[4].strip()
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'OR'
- and tokens[2].upper() == 'REPLACE'
- and tokens[3].upper() == 'PROCEDURE'
- and self.__is_target):
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "OR"
+ and tokens[2].upper() == "REPLACE"
+ and tokens[3].upper() == "PROCEDURE"
+ and self.__is_target
+ ):
self.__sql_type = 15
# procedure may have paramters after the name
# like : create or replace procedure_name(parameter list)
- self.__table_name = tokens[4].strip().split('(')[0]
-
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'INDEX'
- and tokens[2].upper() == 'IF'
- and tokens[3].upper() == 'NOT'
- and tokens[4].upper() == 'EXISTS'
- and tokens[6].upper() == 'ON'):
+ self.__table_name = tokens[4].strip().split("(")[0]
+
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "INDEX"
+ and tokens[2].upper() == "IF"
+ and tokens[3].upper() == "NOT"
+ and tokens[4].upper() == "EXISTS"
+ and tokens[6].upper() == "ON"
+ ):
self.__sql_type = 17
self.__index_name = tokens[5]
- self.__table_name = tokens[7].split('(')[0]
-
- elif (tokens[0].upper() == 'CREATE'
- and tokens[1].upper() == 'UNIQUE'
- and tokens[2].upper() == 'INDEX'
- and tokens[3].upper() == 'IF'
- and tokens[4].upper() == 'NOT'
- and tokens[5].upper() == 'EXISTS'
- and tokens[7].upper() == 'ON'):
+ self.__table_name = tokens[7].split("(")[0]
+
+ elif (
+ tokens[0].upper() == "CREATE"
+ and tokens[1].upper() == "UNIQUE"
+ and tokens[2].upper() == "INDEX"
+ and tokens[3].upper() == "IF"
+ and tokens[4].upper() == "NOT"
+ and tokens[5].upper() == "EXISTS"
+ and tokens[7].upper() == "ON"
+ ):
self.__sql_type = 18
self.__index_name = tokens[6]
- self.__table_name = tokens[8].split('(')[0]
+ self.__table_name = tokens[8].split("(")[0]
else:
if not self.__is_ignore:
@@ -1039,10 +1067,10 @@ class SqlItem(object):
return self.__diffs
def __rename(self, to_new=False):
- '''
+ """
rename all table_name with old name
- '''
- if self.__table_name.find('SYS.') == 0:
+ """
+ if self.__table_name.find("SYS.") == 0:
self.__table_name = self.__table_name[4:]
source_tables = new_systables
@@ -1054,12 +1082,13 @@ class SqlItem(object):
if self.__table_name:
if self.__table_name in source_tables:
- target_name =\
- target_tables[source_tables.index(self.__table_name)]
- self.__sql = self.__sql.replace(' '+self.__table_name,
- ' '+target_name)
- self.__sql = self.__sql.replace(' SYS.'+self.__table_name,
- ' SYS.'+target_name)
+ target_name = target_tables[source_tables.index(self.__table_name)]
+ self.__sql = self.__sql.replace(
+ " " + self.__table_name, " " + target_name
+ )
+ self.__sql = self.__sql.replace(
+ " SYS." + self.__table_name, " SYS." + target_name
+ )
self.__table_name = target_name
@@ -1067,19 +1096,19 @@ g_later_delete_flag = False
class TableGroup(object):
- '''
+ """
a group include all table sqls:
create table
create index
create SEQUENCE
- '''
+ """
def __init__(self, create_sql):
- '''
+ """
init a TableGroup object from create table sql
- '''
+ """
- self.__table_name = ''
+ self.__table_name = ""
self.__sqls = []
self.__sqls_ct = []
self.__sqls_ci = []
@@ -1087,17 +1116,25 @@ class TableGroup(object):
self.__sqls_other = []
self.__unmatched = []
self.__sql_version = create_sql.version()
- self.__special_tables = ["SYS_HISTGRAM_ABSTR", "HIST_HEAD$",
- "MON_MODS_ALL$", "SYS_DML_STATS"]
- self.__sysaux_tables = ["SYS_HISTGRAM_ABSTR", "HIST_HEAD$",
- "SYS_HISTGRAM", "HISTGRAM$"]
+ self.__special_tables = [
+ "SYS_HISTGRAM_ABSTR",
+ "HIST_HEAD$",
+ "MON_MODS_ALL$",
+ "SYS_DML_STATS",
+ ]
+ self.__sysaux_tables = [
+ "SYS_HISTGRAM_ABSTR",
+ "HIST_HEAD$",
+ "SYS_HISTGRAM",
+ "HISTGRAM$",
+ ]
if not isinstance(create_sql, SqlItem):
- raise Exception("Unexpected type:%s,"
- " SqlItem is expected" % type(create_sql))
+ raise Exception(
+ "Unexpected type:%s," " SqlItem is expected" % type(create_sql)
+ )
if not create_sql.isCreateTableSql():
- raise Exception("Need 'Create table' sql :\n%s"
- % create_sql.originSql())
+ raise Exception("Need 'Create table' sql :\n%s" % create_sql.originSql())
self.__table_name = create_sql.tableName()
self.__sqls.append(create_sql)
@@ -1131,45 +1168,45 @@ class TableGroup(object):
return self.__modified_unique_index
def __str__(self):
- '''
+ """
this function for easy debug
- '''
- s = ''
+ """
+ s = ""
for item in self.__sqls:
s += str(item)
return s
def version(self):
- '''
+ """
get the version
- '''
+ """
return self.__sql_version
def tableName(self):
- '''
+ """
return the table name
- '''
+ """
return self.__table_name
def name(self):
- '''
+ """
return the table name
- '''
+ """
return self.__table_name
def append(self, sql):
- '''
+ """
append to internal lists
- '''
+ """
- if sql.tableName() not in [self.__table_name,
- 'SYS.'+self.__table_name]:
- raise Exception("Cannot append %s to table:%s"
- % (sql.originSql(), self.__table_name))
+ if sql.tableName() not in [self.__table_name, "SYS." + self.__table_name]:
+ raise Exception(
+ "Cannot append %s to table:%s" % (sql.originSql(), self.__table_name)
+ )
self.__sqls.append(sql)
@@ -1183,23 +1220,23 @@ class TableGroup(object):
self.__sqls_other.append(sql)
def setFlag(self, flag):
- '''
+ """
set flag for fetch
- '''
+ """
self.__flag = flag
def isFlagTrue(self):
- '''
+ """
getreturn the flag
- '''
+ """
return self.__flag
def rename2old(self):
- '''
+ """
rename table name to old
- '''
+ """
for i in self.__sqls:
i.rename2old()
@@ -1207,14 +1244,13 @@ class TableGroup(object):
# rename table group table name
if self.__table_name:
if self.__table_name in new_systables:
- old_name =\
- old_systables[new_systables.index(self.__table_name)]
+ old_name = old_systables[new_systables.index(self.__table_name)]
self.__table_name = old_name
def rename2new(self):
- '''
+ """
rename table name to new
- '''
+ """
for i in self.__sqls:
i.rename2new()
@@ -1225,9 +1261,9 @@ class TableGroup(object):
self.__table_name = _name
def isMatched(self, other):
- '''
+ """
if table name is the same
- '''
+ """
if not isinstance(other, TableGroup):
return False
@@ -1238,9 +1274,9 @@ class TableGroup(object):
return False
def __fetch(self, sql, update_flag=True):
- '''
+ """
fetch matched item
- '''
+ """
matched = None
for i in self.__sqls:
if i.isMatched(sql):
@@ -1251,9 +1287,9 @@ class TableGroup(object):
return matched
def __isAllMatched(self, other):
- '''
+ """
check if all items matched
- '''
+ """
for i in self.__sqls:
if not i.isCreateIndexSql():
@@ -1266,9 +1302,9 @@ class TableGroup(object):
return True
def __isDropIndex(self, other):
- '''
+ """
shall we drop all index?
- '''
+ """
if not other:
return False
@@ -1285,12 +1321,10 @@ class TableGroup(object):
return True
if self.__table_name in self.__special_tables:
- if i.isCreateUniqueIndexSql()\
- and matched.isCreateNormalIndexSql():
+ if i.isCreateUniqueIndexSql() and matched.isCreateNormalIndexSql():
i.unique2normal()
- if not isSameSqlList(i.originSql().split(),
- matched.originSql().split()):
+ if not isSameSqlList(i.originSql().split(), matched.originSql().split()):
if i.isCreateUniqueIndexSql():
self.__modified_unique_index.append(i.index_name())
if i.isIndexSpecialCondition1(matched):
@@ -1304,9 +1338,9 @@ class TableGroup(object):
return False
def __geneDropAllIndexSqls(self):
- '''
+ """
generate drop index sqls
- '''
+ """
drops = []
@@ -1317,68 +1351,68 @@ class TableGroup(object):
return drops
def tableSpace(self):
- '''
+ """
return the tablespace name
- '''
+ """
return CreateTableSql(self.__sqls_ct[0].originSql()).tableSpace()
def __checkTableSpaceChange(self, other):
- '''
+ """
check if tablespace changes
- '''
+ """
tablespace1 = self.tableSpace()
tablespace2 = other.tableSpace()
- if tablespace1 == 'SYSTEM' and tablespace2 == 'SYSAUX':
+ if tablespace1 == "SYSTEM" and tablespace2 == "SYSAUX":
return True
- if tablespace1 == 'SYSAUX' and tablespace2 == 'SYSTEM':
+ if tablespace1 == "SYSAUX" and tablespace2 == "SYSTEM":
return True
return False
def generateDegradeSql(self, other):
- '''
+ """
generate degrade sqls
- '''
+ """
self.__table_diff = []
upgrade_sqls = []
- load_sql = 'ALTER SYSTEM LOAD DICTIONARY FOR %s' % self.__table_name
+ load_sql = "ALTER SYSTEM LOAD DICTIONARY FOR %s" % self.__table_name
if not other:
- '''
+ """
other is empty or none
means all sqls are new
- '''
+ """
upgrade_sqls.append(load_sql)
upgrade_sqls.append("DROP TABLE %s" % self.__table_name)
self.__table_diff.append("DROP TABLE %s" % self.__table_name)
else:
- '''
+ """
other is not none
check is all matched
then check if need drop all index
then generate the sqls
- '''
+ """
if not isinstance(other, TableGroup):
raise Exception("Unexpected type %s" % type(other))
- if self.__checkTableSpaceChange(other)\
- and self.__table_name in self.__sysaux_tables:
+ if (
+ self.__checkTableSpaceChange(other)
+ and self.__table_name in self.__sysaux_tables
+ ):
for i in self.__sqls_ct:
i._replace("TABLESPACE SYSAUX", "TABLESPACE SYSTEM")
for i in other.__sqls_ct:
- i._replace("TABLESPACE SYSAUX",
- "TABLESPACE SYSTEM")
+ i._replace("TABLESPACE SYSAUX", "TABLESPACE SYSTEM")
if not self.__isAllMatched(other):
- raise Exception("Unmatched item %s"
- % self.__unmatched[0].originSql())
+ raise Exception("Unmatched item %s" % self.__unmatched[0].originSql())
if other.__isDropIndex(self):
@@ -1387,39 +1421,43 @@ class TableGroup(object):
global g_later_delete_flag
if self.__table_name in self.__special_tables:
- self.__table_diff.append("DELETE FROM %s"
- % self.__table_name)
+ self.__table_diff.append("DELETE FROM %s" % self.__table_name)
self.__table_diff.append("COMMIT")
- if self.__table_name == 'SYS_PROC_ARGS'\
- and other.__special_case1:
+ if self.__table_name == "SYS_PROC_ARGS" and other.__special_case1:
g_later_delete_flag = True
- self.__table_diff.append("DELETE FROM SYS_PROCS"
- " WHERE TYPE IN ('S','B')")
- self.__table_diff.append("DELETE FROM SYS_PROC_ARGS"
- " WHERE LENGTH(PACKAGE)>0")
+ self.__table_diff.append(
+ "DELETE FROM SYS_PROCS" " WHERE TYPE IN ('S','B')"
+ )
+ self.__table_diff.append(
+ "DELETE FROM SYS_PROC_ARGS" " WHERE LENGTH(PACKAGE)>0"
+ )
self.__table_diff.append("COMMIT")
- if self.__table_name == 'PROC_ARGS$'\
- and other.__special_case1:
+ if self.__table_name == "PROC_ARGS$" and other.__special_case1:
g_later_delete_flag = True
- self.__table_diff.append("DELETE FROM PROC$"
- " WHERE TYPE IN ('S','B')")
- self.__table_diff.append("DELETE FROM PROC_ARGS$"
- " WHERE LENGTH(PACKAGE)>0")
+ self.__table_diff.append(
+ "DELETE FROM PROC$" " WHERE TYPE IN ('S','B')"
+ )
+ self.__table_diff.append(
+ "DELETE FROM PROC_ARGS$" " WHERE LENGTH(PACKAGE)>0"
+ )
self.__table_diff.append("COMMIT")
- if self.__table_name == 'SYS_DEPENDENCIES'\
- and g_later_delete_flag:
- self.__table_diff.append("DELETE FROM SYS_DEPENDENCIES"
- " WHERE D_TYPE# IN (15, 16)"
- " OR P_TYPE# IN (15, 16)")
+ if self.__table_name == "SYS_DEPENDENCIES" and g_later_delete_flag:
+ self.__table_diff.append(
+ "DELETE FROM SYS_DEPENDENCIES"
+ " WHERE D_TYPE# IN (15, 16)"
+ " OR P_TYPE# IN (15, 16)"
+ )
self.__table_diff.append("COMMIT")
- if self.__table_name == 'DEPENDENCY$' and g_later_delete_flag:
- self.__table_diff.append("DELETE FROM DEPENDENCY$"
- " WHERE D_TYPE# IN (15, 16)"
- " OR P_TYPE# IN (15, 16)")
+ if self.__table_name == "DEPENDENCY$" and g_later_delete_flag:
+ self.__table_diff.append(
+ "DELETE FROM DEPENDENCY$"
+ " WHERE D_TYPE# IN (15, 16)"
+ " OR P_TYPE# IN (15, 16)"
+ )
self.__table_diff.append("COMMIT")
up = self.__geneDropAllIndexSqls()
@@ -1458,21 +1496,21 @@ class TableGroup(object):
return upgrade_sqls
def generateUpgradeSql(self, other):
- '''
+ """
generate upgrade sqls
- '''
+ """
self.__table_diff = []
upgrade_sqls = []
extra_sqls = []
- load_sql = 'ALTER SYSTEM LOAD DICTIONARY FOR %s' % self.__table_name
+ load_sql = "ALTER SYSTEM LOAD DICTIONARY FOR %s" % self.__table_name
if not other:
- '''
+ """
other is empty or none
means all sqls are new
- '''
+ """
self.__new_table.append(self.__table_name)
@@ -1484,26 +1522,27 @@ class TableGroup(object):
self.__table_diff.extend(up)
else:
- '''
+ """
other is not none
check is all matched
then check if need drop all index
then generate the sqls
- '''
+ """
if not isinstance(other, TableGroup):
raise Exception("Unexpected type %s" % type(other))
- if self.__checkTableSpaceChange(other)\
- and self.__table_name in self.__sysaux_tables:
+ if (
+ self.__checkTableSpaceChange(other)
+ and self.__table_name in self.__sysaux_tables
+ ):
for i in self.__sqls_ct:
i._replace("TABLESPACE SYSAUX", "TABLESPACE SYSTEM")
for i in other.__sqls_ct:
i._replace("TABLESPACE SYSAUX", "TABLESPACE SYSTEM")
if not self.__isAllMatched(other):
- raise Exception("Unmatched item %s"
- % self.__unmatched[0].originSql())
+ raise Exception("Unmatched item %s" % self.__unmatched[0].originSql())
if self.__isDropIndex(other):
@@ -1557,13 +1596,14 @@ class TableGroup(object):
class ViewGroup(object):
- '''
+ """
ex: __01(No.) is a view group
The No. is unique
- '''
+ """
+
def __init__(self):
self.__number = 0
- self.__sqls = ''
+ self.__sqls = ""
def init(self, number):
self.__number = number
@@ -1582,7 +1622,7 @@ class ViewGroup(object):
sql2 = sql2.strip()
if len(sql1) != len(sql2):
return False
- return (sql1 == sql2)
+ return sql1 == sql2
def is_same(self, other):
@@ -1594,16 +1634,16 @@ class ViewGroup(object):
class RoleGroup(object):
- '''
+ """
a role group include sqls:
create role
grant xxx,xxx|ALL to role
- '''
+ """
def __init__(self, role_sql):
- '''
+ """
init a TableGroup object from greate table sql
- '''
+ """
self.__sqls = []
self.__sqls.append(role_sql)
@@ -1613,52 +1653,52 @@ class RoleGroup(object):
self.__flag = False
def setFlag(self, flag):
- '''
+ """
set flag for fetch
- '''
+ """
self.__flag = flag
def tableName(self):
- '''
+ """
empty
- '''
- return ''
+ """
+ return ""
def roleName(self):
- '''
+ """
return the role name
- '''
+ """
return self.__role_name
def name(self):
- '''
+ """
empty
- '''
+ """
return self.__role_name
def rename2old(self):
- '''
+ """
do nothing (this group do not need rename)
- '''
+ """
return
def rename2new(self):
- '''
+ """
do nothing (this group do not need rename)
- '''
+ """
return
def append(self, sql):
- '''
+ """
append to internal lists
- '''
+ """
self.__sqls.append(sql)
def isMatched(self, other):
- '''
+ """
if table name is the same
- '''
+ """
if not isinstance(other, RoleGroup):
return False
@@ -1669,9 +1709,9 @@ class RoleGroup(object):
return False
def create_role_sql(self):
- '''
+ """
return the create role sql in this group
- '''
+ """
for sql in self.__sqls:
if sql.isCreateRoleSql():
@@ -1679,9 +1719,9 @@ class RoleGroup(object):
return None
def grant_sql(self):
- '''
+ """
get the grant sql in this group
- '''
+ """
for sql in self.__sqls:
if sql.isGrantSql():
@@ -1689,9 +1729,9 @@ class RoleGroup(object):
return None
def generateDegradeSql(self, other):
- '''
+ """
generate degrade sqls
- '''
+ """
self.__diffs = []
if not other:
@@ -1710,9 +1750,9 @@ class RoleGroup(object):
return self.__diffs
def generateUpgradeSql(self, other):
- '''
+ """
generate upgrade sqls
- '''
+ """
self.__diffs = []
if not other:
@@ -1732,9 +1772,9 @@ class RoleGroup(object):
return self.__diffs, []
def isFlagTrue(self):
- '''
+ """
retrun the internal flag
- '''
+ """
return self.__flag
def last_generated_diff(self):
@@ -1742,9 +1782,9 @@ class RoleGroup(object):
return self.__diffs
def version(self):
- '''
+ """
get the version
- '''
+ """
return self.__sql_version
@@ -1752,9 +1792,9 @@ class RoleGroup(object):
class InitDbSqls(object):
def __init__(self):
- '''
+ """
init item list
- '''
+ """
self.__all_items = []
self.__style = set()
self.__fast_ref = {}
@@ -1762,53 +1802,53 @@ class InitDbSqls(object):
self.__last_version = None
def __str__(self):
- '''
+ """
this function for easy debug
- '''
- s = ''
+ """
+ s = ""
for item in self.__all_items:
s += str(item)
return s
def get_last_version(self):
- '''
+ """
get last version
- '''
+ """
return self.__last_version
def getStyle(self):
- '''
+ """
return table name style
- '''
- if 'old' in self.__style:
- return 'old'
+ """
+ if "old" in self.__style:
+ return "old"
else:
- return 'new'
+ return "new"
def rename2old(self):
- '''
+ """
rename all item to old style
- '''
+ """
for i in self.__all_items:
i.rename2old()
def rename2new(self):
- '''
+ """
rename all item to new style
- '''
+ """
for i in self.__all_items:
i.rename2new()
def init(self, sql_file):
- '''
+ """
init from a initdb.sql like file
- '''
- with open(sql_file, 'r') as fp:
+ """
+ with open(sql_file, "r") as fp:
content = fp.read()
# translate to unix format
- content = content.replace('\r\n', '\n')
- cmds = content.split('/')
+ content = content.replace("\r\n", "\n")
+ cmds = content.split("/")
for cmd in cmds:
if cmd.strip():
self.__append(cmd.strip())
@@ -1817,18 +1857,20 @@ class InitDbSqls(object):
raise Exception("Error: mixed table name")
def __iter__(self):
- '''
+ """
iterator of item list
- '''
+ """
+
def item_iter():
for item in self.__all_items:
yield item
+
return item_iter()
def __check_version(self, sql_item):
- '''
+ """
check table, role, sequence 's version
- '''
+ """
if self.__last_version is None:
self.__last_version = sql_item.version()
@@ -1840,9 +1882,9 @@ class InitDbSqls(object):
raise Exception("Unsupported versions! \n%s" % str(sql_item))
def __append(self, sql):
- '''
+ """
append sql item to item list
- '''
+ """
item = SqlItem(sql)
@@ -1851,7 +1893,7 @@ class InitDbSqls(object):
group = TableGroup(item)
self.__all_items.append(group)
self.__fast_ref[item.tableName()] = group
- self.__fast_ref['SYS.'+item.tableName()] = group
+ self.__fast_ref["SYS." + item.tableName()] = group
self.__check_version(item)
elif item.isTableSql():
@@ -1860,8 +1902,9 @@ class InitDbSqls(object):
self.__fast_ref[item.tableName()].append(item)
except Exception as e:
- raise Exception("%s before create table %s"
- % (item.originSql(), str(e)))
+ raise Exception(
+ "%s before create table %s" % (item.originSql(), str(e))
+ )
elif item.isCreateRoleSql():
@@ -1876,8 +1919,7 @@ class InitDbSqls(object):
self.__fast_ref_role[item.roleName()].append(item)
except Exception as e:
- raise Exception("%s before create role %s"
- % (item.originSql(), str(e)))
+ raise Exception("%s before create role %s" % (item.originSql(), str(e)))
else:
self.__all_items.append(item)
@@ -1887,9 +1929,9 @@ class InitDbSqls(object):
self.__style.add(item.nameStyle)
def fetch(self, sql):
- '''
+ """
fetch a sql from item list
- '''
+ """
for item in self.__all_items:
if item.isMatched(sql):
@@ -1898,9 +1940,9 @@ class InitDbSqls(object):
return None
def checkUnMatchedItem(self):
- '''
+ """
check if some item have not been fetched
- '''
+ """
unmatched = []
for item in self.__all_items:
@@ -1909,51 +1951,53 @@ class InitDbSqls(object):
if unmatched:
for item in unmatched:
- print('Error unmatched: %s' % str(item))
+ print("Error unmatched: %s" % str(item))
raise Exception("Some item(s) unmatched!")
class InitViewSqls(object):
def __init__(self):
- '''
+ """
init item list
- '''
+ """
self.__all_items = []
def init(self, sql_file):
- '''
+ """
init from a initdb.sql like file
- '''
- with open(sql_file, 'r') as fp:
+ """
+ with open(sql_file, "r") as fp:
content = fp.read()
# translate to unix format
- content = content.replace('\r\n', '\n')
- cmds = content.split('\n/')
+ content = content.replace("\r\n", "\n")
+ cmds = content.split("\n/")
for cmd in cmds:
if cmd.strip():
self.__append(cmd.strip())
def __iter__(self):
- '''
+ """
iterator of item list
- '''
+ """
+
def item_iter():
for item in self.__all_items:
yield item
+
return item_iter()
def __append(self, sql):
- '''
+ """
append sql item to item list
- '''
+ """
item = SqlItem(sql, is_target=True, ignore=True)
self.__all_items.append(item)
def fetch(self, sql):
- '''
+ """
fetch a sql from item list
- '''
+ """
for item in self.__all_items:
if item.isMatched(sql):
@@ -1969,36 +2013,33 @@ class InitIncreaseView(object):
self.__path = ""
def add_group(self, viewGroup):
- if viewGroup.all_sqls().rstrip() != '':
+ if viewGroup.all_sqls().rstrip() != "":
self.__numberList.append(viewGroup.number())
self.__viewGroups.append(viewGroup)
elif viewGroup.number() != 0:
gnum = viewGroup.number()
- raise Exception("In %s, the %d module is empty!" % (self.__path,
- gnum))
+ raise Exception("In %s, the %d module is empty!" % (self.__path, gnum))
def init(self, sqlfile):
self.__path = sqlfile
- with open(sqlfile, 'r') as fp:
+ with open(sqlfile, "r") as fp:
line = fp.readline()
if line.rstrip() != "--01":
raise Exception("The %s should start with --01 !" % sqlfile)
viewgroup = ViewGroup()
- while(line):
- if line.rstrip()[0:2] == '--' and line.rstrip()[2:].isdigit():
+ while line:
+ if line.rstrip()[0:2] == "--" and line.rstrip()[2:].isdigit():
self.add_group(viewgroup)
number = int(line[2:])
viewgroup = ViewGroup()
viewgroup.init(number)
else:
- line = line.replace('\r\n', '\n')
+ line = line.replace("\r\n", "\n")
viewgroup.add_sql(line)
line = fp.readline()
self.add_group(viewgroup)
-
-
def get_all_group(self):
return self.__viewGroups
@@ -2038,12 +2079,13 @@ def writeFile(filename, contents):
write file
"""
- with open(filename, 'w') as fp:
+ with open(filename, "w") as fp:
fp.write(contents)
- cmd = 'chmod %s %s' % (DefaultValue.KEY_FILE_MODE, filename)
- p = subprocess.Popen(['bash', '-c', cmd],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd = "chmod %s %s" % (DefaultValue.KEY_FILE_MODE, filename)
+ p = subprocess.Popen(
+ ["bash", "-c", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
(stdoutdata, stderrdata) = p.communicate()
status = p.returncode
output = stdoutdata + stderrdata
@@ -2053,8 +2095,7 @@ def writeFile(filename, contents):
output = output.decode()
if status != 0:
- raise Exception("Can not change %s mode.\nError:%s"
- % (filename, output))
+ raise Exception("Can not change %s mode.\nError:%s" % (filename, output))
def generateRenameSqlFile(outputFilePath, appeared, is_rename2new=True):
@@ -2067,7 +2108,7 @@ def generateRenameSqlFile(outputFilePath, appeared, is_rename2new=True):
upgradeRename = os.path.join(outputFilePath, "upgradeRename.sql")
- tables_name = 'SYS_TABLES'
+ tables_name = "SYS_TABLES"
if is_rename2new:
s_core_tables = old_core_tables
t_core_tables = new_core_tables
@@ -2082,21 +2123,25 @@ def generateRenameSqlFile(outputFilePath, appeared, is_rename2new=True):
updateList = []
for item in s_core_tables:
- updateNameSql = 'UPDATE %s SET NAME=\'%s\' WHERE NAME=\'%s\''\
- % (tables_name,
- t_core_tables[s_core_tables.index(item)], item)
+ updateNameSql = "UPDATE %s SET NAME='%s' WHERE NAME='%s'" % (
+ tables_name,
+ t_core_tables[s_core_tables.index(item)],
+ item,
+ )
updateList.append(updateNameSql)
- updateList.append('ALTER SYSTEM LOAD DICTIONARY FOR %s'
- % tables_name)
+ updateList.append("ALTER SYSTEM LOAD DICTIONARY FOR %s" % tables_name)
for item in s_systables:
if item not in s_core_tables:
if item in appeared:
- updateNameSql = 'ALTER TABLE %s rename to %s'\
- % (item, t_systables[s_systables.index(item)])
+ updateNameSql = "ALTER TABLE %s rename to %s" % (
+ item,
+ t_systables[s_systables.index(item)],
+ )
updateList.append(updateNameSql)
- updateNameSql = 'ALTER SYSTEM LOAD DICTIONARY FOR %s'\
- % (t_systables[s_systables.index(item)])
+ updateNameSql = "ALTER SYSTEM LOAD DICTIONARY FOR %s" % (
+ t_systables[s_systables.index(item)]
+ )
updateList.append(updateNameSql)
if updateList:
@@ -2108,6 +2153,7 @@ class CmdOption(object):
"""
:define global parameters
"""
+
def __init__(self):
"""
initial assignment
@@ -2131,37 +2177,37 @@ g_opts = CmdOption()
def usage():
"""
-sql_process.py is a utility to upgrade.
-
-Usage:
- python sql_process.py -? | --help
- python sql_process.py -t generate --new-initdb=NEW_INITDB_SQL_FILE
- --old-initdb=OLD_INITDB_SQL_FILE --outdir=OUT_DIR
- --sqls-path=SQLS_DIR [--degrade]
- python sql_process.py -t gen-view --new=NEW_SQL_FILE --old=OLD_SQL_FILE
- --outfile=OUT_FILE
- python sql_process.py -t gen-dict --initdb=INITDB_SQL_FILE
- --outdir=OUT_DIR
- python sql_process.py -t check-initdb --old-initdb=LOW_INITDB_SQL_FILE
- --new-initdb=HIGH_INITDB_SQL_FILE [--adjacent]
- python sql_process.py -t check-whitelist --sqls-path=SQLS_DIR
- python sql_process.py -t gen-increase-view --new=NEW_SQL_FILE
- --old=OLD_SQL_FILE --outfile=OUT_FILE
-
-General options:
- -t Specified the action.
- --new-initdb Specified new initdb file.
- --old-initdb Specified old initdb file.
- --initdb Specified initdb file.
- --outdir Specified output directory.
- --new Specified new sql file.
- --old Specified old sql file.
- --outfile Specified output file.
- --sqls-path Specified upgrade and degrade sqls' path
- --degrade From high version to low version.
- --adjacent The two initdb.sql are adjacent versions.
- -?, --help Show help information for this utility,
- and exit the command line mode.
+ sql_process.py is a utility to upgrade.
+
+ Usage:
+ python sql_process.py -? | --help
+ python sql_process.py -t generate --new-initdb=NEW_INITDB_SQL_FILE
+ --old-initdb=OLD_INITDB_SQL_FILE --outdir=OUT_DIR
+ --sqls-path=SQLS_DIR [--degrade]
+ python sql_process.py -t gen-view --new=NEW_SQL_FILE --old=OLD_SQL_FILE
+ --outfile=OUT_FILE
+ python sql_process.py -t gen-dict --initdb=INITDB_SQL_FILE
+ --outdir=OUT_DIR
+ python sql_process.py -t check-initdb --old-initdb=LOW_INITDB_SQL_FILE
+ --new-initdb=HIGH_INITDB_SQL_FILE [--adjacent]
+ python sql_process.py -t check-whitelist --sqls-path=SQLS_DIR
+ python sql_process.py -t gen-increase-view --new=NEW_SQL_FILE
+ --old=OLD_SQL_FILE --outfile=OUT_FILE
+
+ General options:
+ -t Specified the action.
+ --new-initdb Specified new initdb file.
+ --old-initdb Specified old initdb file.
+ --initdb Specified initdb file.
+ --outdir Specified output directory.
+ --new Specified new sql file.
+ --old Specified old sql file.
+ --outfile Specified output file.
+ --sqls-path Specified upgrade and degrade sqls' path
+ --degrade From high version to low version.
+ --adjacent The two initdb.sql are adjacent versions.
+ -?, --help Show help information for this utility,
+ and exit the command line mode.
"""
print(usage.__doc__)
@@ -2174,68 +2220,96 @@ def parseCommandLine():
"""
try:
- (opts, args) = getopt.getopt(sys.argv[1:], "t:?", ["new-initdb=",
- "old-initdb=",
- "initdb=",
- "outdir=",
- "new=",
- "old=",
- "outfile=",
- "sqls-path=",
- "degrade",
- "adjacent",
- "help"])
+ (opts, args) = getopt.getopt(
+ sys.argv[1:],
+ "t:?",
+ [
+ "new-initdb=",
+ "old-initdb=",
+ "initdb=",
+ "outdir=",
+ "new=",
+ "old=",
+ "outfile=",
+ "sqls-path=",
+ "degrade",
+ "adjacent",
+ "help",
+ ],
+ )
except Exception as e:
print(str(e))
sys.exit(2)
# The error is exited if an illegal parameter appears
- if (len(args) > 0):
+ if len(args) > 0:
raise Exception("Unknown parameter [%s]." % args[0])
# the command must contains parameters
- if (len(opts) == 0):
+ if len(opts) == 0:
raise Exception("Missing required parameters.")
# the list of invalid characters
- VALUE_CHECK_LIST = ["|", ";", "&", "$", "<", ">",
- "`", "\\", "'", "\"", "{", "}",
- "(", ")", "[", "]", "~", "*", "?", "!", "\n"]
+ VALUE_CHECK_LIST = [
+ "|",
+ ";",
+ "&",
+ "$",
+ "<",
+ ">",
+ "`",
+ "\\",
+ "'",
+ '"',
+ "{",
+ "}",
+ "(",
+ ")",
+ "[",
+ "]",
+ "~",
+ "*",
+ "?",
+ "!",
+ "\n",
+ ]
# start to parse the parameter value
- for (key, value) in opts:
+ for key, value in opts:
for role in VALUE_CHECK_LIST:
if value.find(role) >= 0:
- raise Exception("The value of parameter [%s]"
- " contains invalid characters:"
- " '%s'" % (key, role))
+ raise Exception(
+ "The value of parameter [%s]"
+ " contains invalid characters:"
+ " '%s'" % (key, role)
+ )
# output version information and exit
- if (key == "--help" or key == "-?"):
+ if key == "--help" or key == "-?":
usage()
sys.exit(0)
# get parameter value
- elif (key == "-t"):
+ elif key == "-t":
g_opts.action = value
- elif (key == "--new-initdb"):
+ elif key == "--new-initdb":
g_opts.new_init_db = os.path.realpath(value)
- elif (key == "--old-initdb"):
+ elif key == "--old-initdb":
g_opts.old_init_db = os.path.realpath(value)
- elif (key == "--initdb"):
+ elif key == "--initdb":
g_opts.init_db = os.path.realpath(value)
- elif (key == "--outdir"):
+ elif key == "--outdir":
if value:
g_opts.generate_path = os.path.realpath(value)
- elif (key == "--new"):
+ elif key == "--new":
g_opts.new = os.path.realpath(value)
- elif (key == "--old"):
+ elif key == "--old":
g_opts.old = os.path.realpath(value)
- elif (key == "--outfile"):
+ elif key == "--outfile":
if value:
g_opts.generate_file = os.path.realpath(value)
- elif (key == "--sqls-path"):
+ elif key == "--sqls-path":
if value:
g_opts.sqls_path = os.path.realpath(value)
- elif (key == "--degrade"):
+ elif key == "--degrade":
g_opts.isDegrade = True
- elif (key == "--adjacent"):
+ elif key == "--adjacent":
g_opts.isAdjacent = True
else:
raise Exception("unknown paramter: [-%s]." % key)
@@ -2254,12 +2328,12 @@ def checkFile(parame, filename):
raise Exception("less necessary parameter [%s]." % parame)
# file does not exist
if not os.path.exists(filename):
- raise Exception("The value of necessary parameter"
- " [%s] is not exists." % parame)
+ raise Exception(
+ "The value of necessary parameter" " [%s] is not exists." % parame
+ )
# not a file type
elif not os.path.isfile(filename):
- raise Exception("The value of parameter [%s]"
- " is not file type." % parame)
+ raise Exception("The value of parameter [%s]" " is not file type." % parame)
def parseParams():
@@ -2273,24 +2347,23 @@ def parseParams():
checkFile("--old-initdb", g_opts.old_init_db)
if g_opts.generate_path:
if not os.path.exists(g_opts.generate_path):
- raise Exception("The value of parameter"
- " [--outdir] is not exists.")
+ raise Exception("The value of parameter" " [--outdir] is not exists.")
if os.path.isfile(g_opts.generate_path):
- raise Exception("The value of parameter"
- " [--outdir] is not dirctory.")
+ raise Exception("The value of parameter" " [--outdir] is not dirctory.")
else:
raise Exception("The value of parameter [--outdir] is necessary.")
if g_opts.sqls_path:
if not os.path.exists(g_opts.sqls_path):
- raise Exception("The value of parameter"
- " [--sqls-path] is not exists.")
+ raise Exception(
+ "The value of parameter" " [--sqls-path] is not exists."
+ )
if os.path.isfile(g_opts.sqls_path):
- raise Exception("The value of parameter"
- " [--sqls-path] is not dirctory.")
+ raise Exception(
+ "The value of parameter" " [--sqls-path] is not dirctory."
+ )
else:
- raise Exception("The value of parameter"
- " [--sqls-path] is necessary.")
+ raise Exception("The value of parameter" " [--sqls-path] is necessary.")
elif g_opts.action == "gen-view":
if g_opts.new:
@@ -2300,11 +2373,11 @@ def parseParams():
if g_opts.generate_file:
if os.path.exists(g_opts.generate_path):
if not os.path.isfile(g_opts.generate_path):
- raise Exception("The value of parameter"
- " [--outfile] is not a file.")
+ raise Exception(
+ "The value of parameter" " [--outfile] is not a file."
+ )
else:
- raise Exception("The value of parameter"
- " [--outfile] is necessary.")
+ raise Exception("The value of parameter" " [--outfile] is necessary.")
elif g_opts.action == "gen-dict":
@@ -2312,11 +2385,9 @@ def parseParams():
if g_opts.generate_path:
if not os.path.exists(g_opts.generate_path):
- raise Exception("The value of parameter"
- " [--outdir] is not exists.")
+ raise Exception("The value of parameter" " [--outdir] is not exists.")
if os.path.isfile(g_opts.generate_path):
- raise Exception("The value of parameter"
- " [--outdir] is not dirctory.")
+ raise Exception("The value of parameter" " [--outdir] is not dirctory.")
else:
raise Exception("The value of parameter [--outdir] is necessary.")
@@ -2329,34 +2400,33 @@ def parseParams():
if g_opts.sqls_path:
if not os.path.exists(g_opts.sqls_path):
- raise Exception("The value of parameter"
- " [--sqls-path] is not exists.")
+ raise Exception(
+ "The value of parameter" " [--sqls-path] is not exists."
+ )
if os.path.isfile(g_opts.sqls_path):
- raise Exception("The value of parameter"
- " [--sqls-path] is not dirctory.")
+ raise Exception(
+ "The value of parameter" " [--sqls-path] is not dirctory."
+ )
else:
- raise Exception("The value of parameter"
- " [--sqls-path] is necessary.")
+ raise Exception("The value of parameter" " [--sqls-path] is necessary.")
elif g_opts.action == "gen-increase-view":
if g_opts.new:
checkFile("--new", g_opts.new)
else:
- raise Exception("The value of parameter"
- " [--new] is necessary.")
+ raise Exception("The value of parameter" " [--new] is necessary.")
if g_opts.old:
checkFile("--old", g_opts.old)
else:
- raise Exception("The value of parameter"
- " [--old] is necessary.")
+ raise Exception("The value of parameter" " [--old] is necessary.")
if g_opts.generate_file:
if os.path.exists(g_opts.generate_path):
if not os.path.isfile(g_opts.generate_path):
- raise Exception("The value of parameter"
- " [--outfile] is not a file.")
+ raise Exception(
+ "The value of parameter" " [--outfile] is not a file."
+ )
else:
- raise Exception("The value of parameter"
- " [--outfile] is necessary.")
+ raise Exception("The value of parameter" " [--outfile] is necessary.")
else:
raise Exception("The value of parameter [-t] is illegal.")
@@ -2383,7 +2453,7 @@ def generate_1_0_degrade(zero_init_db, ver1_init_db):
it = iter(ver1_init_db)
new_init_db = zero_init_db
- output_path = os.path.join(g_opts.generate_path, '01')
+ output_path = os.path.join(g_opts.generate_path, "01")
if os.path.exists(output_path):
for _root, _dirs, _files in os.walk(output_path, topdown=False):
for fname in _files:
@@ -2407,8 +2477,10 @@ def generate_1_0_degrade(zero_init_db, ver1_init_db):
table_update = i.last_generated_diff()
if table_update:
outputSqlString = "\n/\n\n".join(table_update) + "\n/\n"
- writeFile(os.path.join(g_opts.generate_path, '01',
- name+'_degrade_1.sql'), outputSqlString)
+ writeFile(
+ os.path.join(g_opts.generate_path, "01", name + "_degrade_1.sql"),
+ outputSqlString,
+ )
def generate_0_1_upgrade(zero_init_db, ver1_init_db):
@@ -2418,7 +2490,7 @@ def generate_0_1_upgrade(zero_init_db, ver1_init_db):
it = iter(ver1_init_db)
old_init_db = zero_init_db
- output_path = os.path.join(g_opts.generate_path, '01')
+ output_path = os.path.join(g_opts.generate_path, "01")
if os.path.exists(output_path):
for _root, _dirs, _files in os.walk(output_path, topdown=False):
for fname in _files:
@@ -2443,8 +2515,10 @@ def generate_0_1_upgrade(zero_init_db, ver1_init_db):
table_update = i.last_generated_diff()
if table_update:
outputSqlString = "\n/\n\n".join(table_update) + "\n/\n"
- writeFile(os.path.join(g_opts.generate_path, '01',
- name+'_upgrade_1.sql'), outputSqlString)
+ writeFile(
+ os.path.join(g_opts.generate_path, "01", name + "_upgrade_1.sql"),
+ outputSqlString,
+ )
def generate_degrade():
@@ -2464,7 +2538,7 @@ def generate_degrade():
new_init_db.init(g_opts.new_init_db)
styleB = new_init_db.getStyle()
- if styleA == 'new' and styleB == 'old':
+ if styleA == "new" and styleB == "old":
new_init_db.rename2new()
old_last_version = old_init_db.get_last_version()
@@ -2473,36 +2547,35 @@ def generate_degrade():
if new_last_version == 0 and old_last_version >= 1:
generate_01_degrade = True
- initdb_01_file = os.path.join(g_opts.sqls_path, 'initdb_01.sql')
+ initdb_01_file = os.path.join(g_opts.sqls_path, "initdb_01.sql")
if not os.path.exists(initdb_01_file):
raise Exception("Can not find file %s" % initdb_01_file)
initdb_01 = InitDbSqls()
initdb_01.init(initdb_01_file)
- if styleA == 'new':
+ if styleA == "new":
initdb_01.rename2new()
if generate_01_degrade:
generate_1_0_degrade(new_init_db, initdb_01)
- if styleA == 'old' or styleB == 'new':
+ if styleA == "old" or styleB == "new":
systable_new = []
else:
systable_new = get_system_table_names(old_init_db)
- generateRenameSqlFile(g_opts.generate_path,
- systable_new, is_rename2new=False)
+ generateRenameSqlFile(g_opts.generate_path, systable_new, is_rename2new=False)
it = iter(old_init_db)
- output_content = ''
- output_file_name = os.path.join(g_opts.generate_path, 'upgradeFile.sql')
+ output_content = ""
+ output_file_name = os.path.join(g_opts.generate_path, "upgradeFile.sql")
for i in it:
- output_content = generateDegreFiles(i, new_init_db,
- initdb_01, old_systables,
- new_systables, output_content)
+ output_content = generateDegreFiles(
+ i, new_init_db, initdb_01, old_systables, new_systables, output_content
+ )
sql = "ALTER SYSTEM INIT DICTIONARY\n/\n\n"
output_content += sql
writeFile(output_file_name, output_content)
@@ -2513,25 +2586,27 @@ def generate_degrade():
def checkDegredFile(item_01, name):
if item_01:
- degrade_01_file = os.path.join(g_opts.generate_path, '01',
- name+'_degrade_1.sql')
+ degrade_01_file = os.path.join(
+ g_opts.generate_path, "01", name + "_degrade_1.sql"
+ )
if not os.path.exists(degrade_01_file):
- raise Exception("Cannot find degrade file for"
- " version_1 to version_0 %s"
- % degrade_01_file)
+ raise Exception(
+ "Cannot find degrade file for"
+ " version_1 to version_0 %s" % degrade_01_file
+ )
else:
- degrade_01_file = os.path.join(g_opts.sqls_path,
- name+'_degrade_1.sql')
+ degrade_01_file = os.path.join(g_opts.sqls_path, name + "_degrade_1.sql")
if not os.path.exists(degrade_01_file):
- raise Exception("Cannot find degrade file for"
- " version_1 to version_0 %s"
- % degrade_01_file)
- upgrade_01_file = os.path.join(g_opts.sqls_path,
- name+'_degrade_1.sql')
+ raise Exception(
+ "Cannot find degrade file for"
+ " version_1 to version_0 %s" % degrade_01_file
+ )
+ upgrade_01_file = os.path.join(g_opts.sqls_path, name + "_degrade_1.sql")
if not os.path.exists(upgrade_01_file):
- raise Exception("Cannot find upgrade file for"
- " version_0 to version_1 %s"
- % upgrade_01_file)
+ raise Exception(
+ "Cannot find upgrade file for"
+ " version_0 to version_1 %s" % upgrade_01_file
+ )
return degrade_01_file
@@ -2550,16 +2625,15 @@ def generateDegreFiles(i, ndb, i01, osystables, nsystables, output):
if not low_item:
degrade_01_file = checkDegredFile(item_01, name)
else:
- degrade_01_file = os.path.join(g_opts.generate_path, '01',
- name+'_degrade_1.sql')
+ degrade_01_file = os.path.join(
+ g_opts.generate_path, "01", name + "_degrade_1.sql"
+ )
if os.path.exists(degrade_01_file):
diff_files.append(degrade_01_file)
- start_ver = max(2, low_ver+1)
- for t in range(start_ver, high_ver+1):
- de_file = os.path.join(g_opts.sqls_path,
- name+'_degrade_'+str(t)+'.sql')
- up_file = os.path.join(g_opts.sqls_path,
- name+'_upgrade_'+str(t)+'.sql')
+ start_ver = max(2, low_ver + 1)
+ for t in range(start_ver, high_ver + 1):
+ de_file = os.path.join(g_opts.sqls_path, name + "_degrade_" + str(t) + ".sql")
+ up_file = os.path.join(g_opts.sqls_path, name + "_upgrade_" + str(t) + ".sql")
if not os.path.exists(de_file):
raise Exception("Cannot find file %s" % de_file)
if not os.path.exists(up_file):
@@ -2569,15 +2643,15 @@ def generateDegreFiles(i, ndb, i01, osystables, nsystables, output):
sql = "ALTER SYSTEM LOAD DICTIONARY FOR %s\n/\n\n" % i.name()
output += sql
- white_list_file = os.path.join(g_opts.sqls_path, 'degrade_whitelist')
+ white_list_file = os.path.join(g_opts.sqls_path, "degrade_whitelist")
wl_rules = get_whitelist_rules(white_list_file)
for f in diff_files[::-1]:
check_wl_on_file(f, wl_rules)
- with open(f, 'r') as fp:
+ with open(f, "r") as fp:
content = fp.read()
output += content
- output += '\n'
+ output += "\n"
if low_item and diff_files and isinstance(i, TableGroup):
sql = "ALTER SYSTEM LOAD DICTIONARY FOR %s\n/\n\n" % i.name()
output += sql
@@ -2585,21 +2659,21 @@ def generateDegreFiles(i, ndb, i01, osystables, nsystables, output):
def remove_comment(text):
- '''
+ """
remove comment which begin with '--'
- '''
+ """
all_text = []
if not text:
- return ''
+ return ""
- lines = text.strip().split('\n')
+ lines = text.strip().split("\n")
for line in lines:
- if line.strip().find('--') != 0:
+ if line.strip().find("--") != 0:
all_text.append(line)
- return '\n'.join(all_text)
+ return "\n".join(all_text)
def generate_upgrade():
@@ -2619,7 +2693,7 @@ def generate_upgrade():
new_init_db.init(g_opts.new_init_db)
styleB = new_init_db.getStyle()
- if styleA == 'old' and styleB == 'new':
+ if styleA == "old" and styleB == "new":
new_init_db.rename2old()
old_last_version = old_init_db.get_last_version()
@@ -2628,20 +2702,20 @@ def generate_upgrade():
if old_last_version == 0 and new_last_version >= 1:
generate_01_upgrade = True
- initdb_01_file = os.path.join(g_opts.sqls_path, 'initdb_01.sql')
+ initdb_01_file = os.path.join(g_opts.sqls_path, "initdb_01.sql")
if not os.path.exists(initdb_01_file):
raise Exception("Can not find file %s" % initdb_01_file)
initdb_01 = InitDbSqls()
initdb_01.init(initdb_01_file)
- if styleA == 'old':
+ if styleA == "old":
initdb_01.rename2old()
if generate_01_upgrade:
generate_0_1_upgrade(old_init_db, initdb_01)
- if styleA == 'new' or styleB == 'old':
+ if styleA == "new" or styleB == "old":
systable_new = []
else:
systable_new = get_system_table_names(new_init_db)
@@ -2650,15 +2724,15 @@ def generate_upgrade():
it = iter(new_init_db)
- output_content = ''
- output_file_name = os.path.join(g_opts.generate_path, 'upgradeFile.sql')
+ output_content = ""
+ output_file_name = os.path.join(g_opts.generate_path, "upgradeFile.sql")
for i in it:
diff_files = []
- diff_files, old_item = generateFileName(diff_files, i, old_systables,
- old_init_db, initdb_01,
- new_systables)
+ diff_files, old_item = generateFileName(
+ diff_files, i, old_systables, old_init_db, initdb_01, new_systables
+ )
if old_item:
if isinstance(i, TableGroup):
sql = "ALTER SYSTEM LOAD DICTIONARY FOR %s\n/\n\n" % i.name()
@@ -2684,16 +2758,15 @@ def generateFileName(diff_files, i, otables, old_db, initdb_01, ntables):
if not old_item:
upgrade_01_file = checkFileExist(item_01, name)
else:
- upgrade_01_file = os.path.join(g_opts.generate_path, '01',
- name+'_upgrade_1.sql')
+ upgrade_01_file = os.path.join(
+ g_opts.generate_path, "01", name + "_upgrade_1.sql"
+ )
if os.path.exists(upgrade_01_file):
diff_files.append(upgrade_01_file)
- start_ver = max(2, old_ver+1)
- for t in range(start_ver, new_ver+1):
- up_file = os.path.join(g_opts.sqls_path,
- name+'_upgrade_'+str(t)+'.sql')
- de_file = os.path.join(g_opts.sqls_path,
- name+'_degrade_'+str(t)+'.sql')
+ start_ver = max(2, old_ver + 1)
+ for t in range(start_ver, new_ver + 1):
+ up_file = os.path.join(g_opts.sqls_path, name + "_upgrade_" + str(t) + ".sql")
+ de_file = os.path.join(g_opts.sqls_path, name + "_degrade_" + str(t) + ".sql")
if not os.path.exists(up_file):
raise Exception("Cannot find file %s" % up_file)
if not os.path.exists(de_file):
@@ -2704,51 +2777,52 @@ def generateFileName(diff_files, i, otables, old_db, initdb_01, ntables):
def checkFileExist(item_01, name):
if item_01:
- upgrade_01_file = os.path.join(g_opts.generate_path, '01',
- name+'_upgrade_1.sql')
+ upgrade_01_file = os.path.join(
+ g_opts.generate_path, "01", name + "_upgrade_1.sql"
+ )
if not os.path.exists(upgrade_01_file):
- raise Exception("Cannot find upgrade file for"
- " version_0 to version_1 %s"
- % upgrade_01_file)
+ raise Exception(
+ "Cannot find upgrade file for"
+ " version_0 to version_1 %s" % upgrade_01_file
+ )
else:
- upgrade_01_file = os.path.join(g_opts.sqls_path,
- name+'_upgrade_1.sql')
+ upgrade_01_file = os.path.join(g_opts.sqls_path, name + "_upgrade_1.sql")
if not os.path.exists(upgrade_01_file):
- raise Exception("Cannot find upgrade file for"
- " version_0 to version_1 %s"
- % upgrade_01_file)
- degrade_01_file = os.path.join(g_opts.sqls_path,
- name+'_degrade_1.sql')
+ raise Exception(
+ "Cannot find upgrade file for"
+ " version_0 to version_1 %s" % upgrade_01_file
+ )
+ degrade_01_file = os.path.join(g_opts.sqls_path, name + "_degrade_1.sql")
if not os.path.exists(degrade_01_file):
- raise Exception("Cannot find degrade file for"
- " version_1 to version_0 %s"
- % degrade_01_file)
+ raise Exception(
+ "Cannot find degrade file for"
+ " version_1 to version_0 %s" % degrade_01_file
+ )
return upgrade_01_file
def readDiffFiles(i, diff_files, output_content, styleA):
for f in diff_files:
- with open(f, 'r') as fp:
+ with open(f, "r") as fp:
content = fp.read()
- if styleA == 'old':
- content = content.replace('\r\n', '\n')
+ if styleA == "old":
+ content = content.replace("\r\n", "\n")
content = remove_comment(content)
- sqls = content.split('\n/')
+ sqls = content.split("\n/")
sql_items = []
for sql in sqls:
if sql.strip():
- sql_item = SqlItem(sql,
- is_target=True,
- ignore=True)
+ sql_item = SqlItem(sql, is_target=True, ignore=True)
sql_item.rename2old()
sql_items.append(sql_item)
- content = "\n/\n\n".join([sql.originSql().strip()
- for sql in sql_items])\
- + "\n/\n\n"
+ content = (
+ "\n/\n\n".join([sql.originSql().strip() for sql in sql_items])
+ + "\n/\n\n"
+ )
output_content += content
else:
output_content += content
- output_content += '\n'
+ output_content += "\n"
if diff_files and isinstance(i, TableGroup):
sql = "ALTER SYSTEM LOAD DICTIONARY FOR %s\n/\n\n" % i.name()
@@ -2780,14 +2854,14 @@ def generateViewSqlFile(sqlList, outputFilename):
def generate_view():
- '''
+ """
generate view file's diff
- '''
+ """
drop_sqls = []
drop_all = False
drop_jobs = []
- drop_user_jobs = '''BEGIN
+ drop_user_jobs = """BEGIN
FOR ITEM IN (SELECT * FROM USER_JOBS WHERE
WHAT IN ('WSR$CREATE_SNAPSHOT();',
'WSR$DROP_SNAPSHOT_TIME();',
@@ -2796,8 +2870,8 @@ def generate_view():
DBE_TASK.CANCEL(ITEM.JOB);
END LOOP;
COMMIT;
- END;'''
- drop_my_jobs = '''BEGIN
+ END;"""
+ drop_my_jobs = """BEGIN
FOR ITEM IN (SELECT * FROM MY_JOBS WHERE
WHAT IN ('WSR$CREATE_SNAPSHOT();',
'WSR$DROP_SNAPSHOT_TIME();',
@@ -2806,7 +2880,7 @@ def generate_view():
DBE_TASK.CANCEL(ITEM.JOB);
END LOOP;
COMMIT;
- END;'''
+ END;"""
if not g_opts.old:
return
@@ -2822,13 +2896,19 @@ def generate_view():
new_view_sql = InitViewSqls()
new_view_sql.init(g_opts.new)
- drop_sqls, drop_jobs =\
- init_jobsAndSqls(old_view_sql, new_view_sql, drop_jobs, drop_sqls,
- drop_all, drop_my_jobs, drop_user_jobs)
+ drop_sqls, drop_jobs = init_jobsAndSqls(
+ old_view_sql,
+ new_view_sql,
+ drop_jobs,
+ drop_sqls,
+ drop_all,
+ drop_my_jobs,
+ drop_user_jobs,
+ )
if drop_sqls:
generateViewSqlFile(drop_sqls, g_opts.generate_file)
if drop_jobs:
- generateViewSqlFile(drop_jobs, g_opts.generate_file+'_jobs')
+ generateViewSqlFile(drop_jobs, g_opts.generate_file + "_jobs")
def init_jobsAndSqls(oldSql, newSql, dropJobs, dropSqls, is_all, mjob, uJob):
@@ -2842,23 +2922,23 @@ def init_jobsAndSqls(oldSql, newSql, dropJobs, dropSqls, is_all, mjob, uJob):
new_item = newSql.fetch(i)
if not new_item:
drop_sql = i.generateDropSql()
- if (i.tableName() == 'USER_JOBS'
- or i.tableName() == 'SYS.USER_JOBS')\
- and i.isCreateOrReplaceView():
+ if (
+ i.tableName() == "USER_JOBS" or i.tableName() == "SYS.USER_JOBS"
+ ) and i.isCreateOrReplaceView():
dropJobs.append(uJob)
- if (i.tableName() == 'MY_JOBS'
- or i.tableName() == 'SYS.MY_JOBS')\
- and i.isCreateOrReplaceView():
+ if (
+ i.tableName() == "MY_JOBS" or i.tableName() == "SYS.MY_JOBS"
+ ) and i.isCreateOrReplaceView():
dropJobs.append(mjob)
dropSqls.append(drop_sql)
return dropSqls, dropJobs
def generate_increase_view():
- '''
+ """
generate increase sql between old initview/initplsql/initwsr
and new initview/initplsql/initwsr
- '''
+ """
oldview = InitIncreaseView()
newview = InitIncreaseView()
@@ -2868,10 +2948,11 @@ def generate_increase_view():
oldview.check_sequence()
newview.check_sequence()
if len(oldview.get_numberlist()) > len(newview.get_numberlist()):
- raise Exception("Please check %s, that module is less than %s"
- % (g_opts.new, g_opts.old))
+ raise Exception(
+ "Please check %s, that module is less than %s" % (g_opts.new, g_opts.old)
+ )
viewgroups = newview.get_all_group()
- sqls = ''
+ sqls = ""
for viewgroup in viewgroups:
matched = oldview.fetch(viewgroup)
if not matched:
@@ -2879,20 +2960,20 @@ def generate_increase_view():
number = "--%s\n" % str(viewgroup.number())
sql = number + sql
sqls += sql
- if sqls != '':
+ if sqls != "":
writeFile(g_opts.generate_file, sqls)
def generate_dictionary():
- '''
+ """
generate alter system load dictionary for xxx sql file
- '''
+ """
init_db = InitDbSqls()
init_db.init(g_opts.init_db)
it = iter(init_db)
- output_content = ''
- output_file_name = os.path.join(g_opts.generate_path, 'upgradeFile.sql')
+ output_content = ""
+ output_file_name = os.path.join(g_opts.generate_path, "upgradeFile.sql")
for i in it:
if isinstance(i, TableGroup):
@@ -2905,10 +2986,10 @@ def generate_dictionary():
def check_initdb():
- '''
+ """
check initdb.sql low version -- old high version -- new
if table has modified , version must change
- '''
+ """
# instance of the old initdb file content as an object
low_init_db = InitDbSqls()
low_init_db.init(g_opts.old_init_db)
@@ -2925,8 +3006,7 @@ def check_initdb():
return
if high_version == 0:
- raise Exception("Versions in %s should larger than zero!"
- % g_opts.new_init_db)
+ raise Exception("Versions in %s should larger than zero!" % g_opts.new_init_db)
it = iter(high_init_db)
for i in it:
@@ -2961,8 +3041,7 @@ def check_initdb():
if diff:
if low_item.version() == i.version():
- raise Exception("Version have to change"
- " when %s changes." % i.name())
+ raise Exception("Version have to change" " when %s changes." % i.name())
elif g_opts.isAdjacent:
if low_item.version() + 1 != i.version():
raise Exception("Version of %s shoud add one." % i.name())
@@ -2971,8 +3050,7 @@ def check_initdb():
else:
if g_opts.isAdjacent:
if low_item.version() != i.version():
- raise Exception("Version of %s"
- " shoud not change." % i.name())
+ raise Exception("Version of %s" " shoud not change." % i.name())
if error_flag:
sys.exit(1)
@@ -2982,16 +3060,16 @@ def check_initdb():
def is_upgrade_file(file_name, up_type):
- '''
+ """
if file_name format as:
xxx_upgrade_NN.sql or
xxx_degrade_NN.sql where NN is digit numbers
- '''
+ """
- fname = file_name.rsplit('.', 1)
- if fname[-1] != 'sql':
+ fname = file_name.rsplit(".", 1)
+ if fname[-1] != "sql":
return False
- table_up_ver = fname[0].split('_')
+ table_up_ver = fname[0].split("_")
if len(table_up_ver) < 2:
return False
if table_up_ver[-2] == up_type:
@@ -3001,13 +3079,13 @@ def is_upgrade_file(file_name, up_type):
def check_wl_on_sql(sql, rules):
- '''
+ """
check one sql follow the white list
- '''
+ """
for rule in rules:
for i in range(len(rule)):
- if rule[i].lower() == 'xxx':
+ if rule[i].lower() == "xxx":
continue
try:
if rule[i].upper() != sql[i].upper():
@@ -3020,20 +3098,20 @@ def check_wl_on_sql(sql, rules):
def check_wl_on_file(file_name, rules):
- '''
+ """
check the sql file follow the white list
- '''
+ """
all_sqls = []
- with open(file_name, 'r') as fp:
+ with open(file_name, "r") as fp:
content = fp.read()
# translate to unix format
- content = content.replace('\r\n', '\n')
- all_sqls = content.split('\n/')
+ content = content.replace("\r\n", "\n")
+ all_sqls = content.split("\n/")
for sql in all_sqls:
sql = sql.strip()
- if sql.find('--') == 0:
+ if sql.find("--") == 0:
continue
if sql:
if not check_wl_on_sql(sql.split(), rules):
@@ -3041,16 +3119,16 @@ def check_wl_on_file(file_name, rules):
def get_whitelist_rules(file_name):
- '''
+ """
return the whitelist rules of list
- '''
+ """
rules = []
- with open(file_name, 'r') as fp:
+ with open(file_name, "r") as fp:
content = fp.read()
- content = content.replace('\r\n', '\n')
- content = content.split('\n')
+ content = content.replace("\r\n", "\n")
+ content = content.split("\n")
for i in content:
if i:
rules.append(i.split())
@@ -3059,20 +3137,19 @@ def get_whitelist_rules(file_name):
def check_whitelist():
- '''
+ """
check xxx_upgrade_xx.sql xxx_degrade_xx.sql if pass the whitelist
- '''
- search_key = 'upgrade'
+ """
+ search_key = "upgrade"
if g_opts.isDegrade:
- white_list_file = os.path.join(g_opts.sqls_path, 'degrade_whitelist')
- search_key = 'degrade'
+ white_list_file = os.path.join(g_opts.sqls_path, "degrade_whitelist")
+ search_key = "degrade"
else:
- white_list_file = os.path.join(g_opts.sqls_path, 'upgrade_whitelist')
+ white_list_file = os.path.join(g_opts.sqls_path, "upgrade_whitelist")
if not os.path.exists(white_list_file):
- raise Exception("Can not find whitelist"
- " in %s" % g_opts.sqls_path)
+ raise Exception("Can not find whitelist" " in %s" % g_opts.sqls_path)
wl_rules = get_whitelist_rules(white_list_file)
@@ -3083,7 +3160,6 @@ def check_whitelist():
check_wl_on_file(os.path.join(_root, fname), wl_rules)
-
def main():
"""
according to different action, the corresponding method is called
@@ -3108,9 +3184,9 @@ def main():
generate_increase_view()
-if __name__ == '__main__':
+if __name__ == "__main__":
- if(os.getuid() == 0):
+ if os.getuid() == 0:
print("Failed: Cannot use root user for this operation!")
sys.exit(1)
diff --git a/pkg/install/uninstall.py b/pkg/install/uninstall.py
index ce2799ae1a5c3a48fd9f56efc1b144f5491b920b..d80ac4698423928859ca4c283d7a322795e93f7c 100644
--- a/pkg/install/uninstall.py
+++ b/pkg/install/uninstall.py
@@ -5,6 +5,7 @@
import sys
+
# If run by root, the import behavior will create folder '__pycache__'
# whose owner will be root. The database owner has'nt permission to
# remove the folder. So we can't create it.
@@ -35,10 +36,12 @@ if os.path.exists("/.dockerenv"):
else:
MYSQL_BIN_DIR = "/opt/cantian/mysql/install/mysql"
+
class Options(object):
"""
class for command line options
"""
+
def __init__(self):
# user information
self.user_info = pwd.getpwuid(os.getuid())
@@ -61,7 +64,7 @@ class Options(object):
# The object of opened log file.
self.fp = None
-
+
self.use_gss = False
self.in_container = False
@@ -77,8 +80,13 @@ def _exec_popen(cmd):
:return: status code, standard output, error output
"""
bash_cmd = ["bash"]
- p = subprocess.Popen(bash_cmd, shell=False, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ bash_cmd,
+ shell=False,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
if gPyVersion[0] == "3":
stdout, stderr = p.communicate(cmd.encode())
@@ -111,22 +119,22 @@ def _get_input(msg):
def usage():
"""uninstall.py is a utility to uninstall cantiand server.
-Usage:
- python uninstall.py --help
- python uninstall.py [-U user] [-F] [-D DATADIR] [-g withoutroot] [-d] [-s]
-
-Common options:
- -U user who install the db
- -F clean the database storage area
- -D location of the database cluster storage area,
- it will be available after -F
- -g run uninstall script without root privilege,
- but you must have permission of uninstallation folder
- -d uninstall inside docker container
- -P if sysdba login is disabled by configuration,
- specify this option the end
- -s uninstall with gss
- --help show this help, then exit
+ Usage:
+ python uninstall.py --help
+ python uninstall.py [-U user] [-F] [-D DATADIR] [-g withoutroot] [-d] [-s]
+
+ Common options:
+ -U user who install the db
+ -F clean the database storage area
+ -D location of the database cluster storage area,
+ it will be available after -F
+ -g run uninstall script without root privilege,
+ but you must have permission of uninstallation folder
+ -d uninstall inside docker container
+ -P if sysdba login is disabled by configuration,
+ specify this option the end
+ -s uninstall with gss
+ --help show this help, then exit
"""
print(usage.__doc__)
@@ -161,16 +169,16 @@ def parse_parameter():
elif key == "-U":
pass
# Determine whether to delete the data directory
- elif key == '-F':
+ elif key == "-F":
g_opts.clean_data_dir_on = 0
# Get the specified data directory
- elif key == '-D':
+ elif key == "-D":
# If the file is checked directly here, the attacker can
# query it through the error message here. A valid file
# that is not found by it, so the correctness of the
# parameter value will be checked in the following function.
g_opts.clean_data_dir = value.strip()
- elif key == '-P':
+ elif key == "-P":
print("Need database connector's name and password:")
g_opts.db_user = _get_input("Username:")
check_invalid_symbol(g_opts.db_user)
@@ -179,7 +187,7 @@ def parse_parameter():
if (not g_opts.db_user) or (not g_opts.db_passwd):
print("Username and password can not be empty.")
sys.exit(1)
- elif key == '-d':
+ elif key == "-d":
g_opts.in_container = True
except getopt.GetoptError as err:
@@ -201,8 +209,10 @@ def check_parameter():
sys.exit(1)
if g_opts.install_user_privilege != "withoutroot":
- print("Error: User has no root privilege, "
- "do uninstall, need specify parameter '-g withoutroot'.")
+ print(
+ "Error: User has no root privilege, "
+ "do uninstall, need specify parameter '-g withoutroot'."
+ )
sys.exit(1)
else:
print("Error:Check os failed:current os is not linux")
@@ -210,12 +220,14 @@ def check_parameter():
if g_opts.clean_data_dir_on == 1:
if g_opts.clean_data_dir:
- print("Error: Parameter input error: "
- "you can not use -D without using -F")
+ print(
+ "Error: Parameter input error: " "you can not use -D without using -F"
+ )
sys.exit(1)
if g_opts.clean_data_dir:
g_opts.clean_data_dir = os.path.realpath(
- os.path.normpath(g_opts.clean_data_dir))
+ os.path.normpath(g_opts.clean_data_dir)
+ )
DefaultValue.checkInvalidPath(g_opts.clean_data_dir)
@@ -226,8 +238,9 @@ def check_log():
"""
# Get the log path
home_path = g_opts.user_info.pw_dir
- g_opts.log_file = os.path.join(os.path.realpath(
- os.path.normpath(home_path)), "cantianduninstall.log")
+ g_opts.log_file = os.path.join(
+ os.path.realpath(os.path.normpath(home_path)), "cantianduninstall.log"
+ )
# Clean the old log file.
if os.path.exists(g_opts.log_file):
@@ -283,8 +296,10 @@ def logExit(msg):
os.chmod(g_opts.log_file, stat.S_IRUSR)
g_opts.fp = None
- print("Please refer to uninstall log \"%s\" for more detailed information."
- % g_opts.log_file)
+ print(
+ 'Please refer to uninstall log "%s" for more detailed information.'
+ % g_opts.log_file
+ )
sys.exit(1)
@@ -313,7 +328,8 @@ def get_user_environment_file():
log("Getting user environment variables file path...", True)
home_path = g_opts.user_info.pw_dir
g_opts.user_env_path = os.path.realpath(
- os.path.normpath(os.path.join(home_path, ".bashrc")))
+ os.path.normpath(os.path.join(home_path, ".bashrc"))
+ )
if not os.path.isfile(os.path.realpath(g_opts.user_env_path)):
logExit("Can't get the environment variables file.")
log("End get user environment variables file path")
@@ -327,7 +343,8 @@ def find_before_slice(slice_, str_):
find '#' in the head of line
"""
place = str_.find(slice_)
- return str_.find('#', 0, place)
+ return str_.find("#", 0, place)
+
####################################################################
# Check if there is an installation path in the environment variable
@@ -340,26 +357,27 @@ def check_environment_install_path():
input: NA
output: NA
"""
- log("Checking whether install path in the user environment variables...",
- True)
+ log("Checking whether install path in the user environment variables...", True)
f = None
try:
f = open(g_opts.user_env_path)
except IOError:
- logExit("Check environment variables failed:can not open "
- "environment variables file,please check the user that "
- "you offered is right")
+ logExit(
+ "Check environment variables failed:can not open "
+ "environment variables file,please check the user that "
+ "you offered is right"
+ )
LINE = f.readline()
while LINE:
# Obtain 'export CTDB_HOME'
- if LINE.find('export CTDB_HOME') != -1:
+ if LINE.find("export CTDB_HOME") != -1:
# Determine whether there is "#" before CTDB_HOME, the
# function returns a value of -1, indicating that it is
# not found, CTDB_HOME is valid.
- if find_before_slice(LINE, 'CTDB_HOME') == -1:
- INSTALL_ENV_DIC_L = LINE.split('=')
+ if find_before_slice(LINE, "CTDB_HOME") == -1:
+ INSTALL_ENV_DIC_L = LINE.split("=")
INSTALL_ENV_TEMP_L = INSTALL_ENV_DIC_L[1].rstrip()
INSTALL_ENV_L = os.path.normpath(INSTALL_ENV_TEMP_L)
INSTALL_ENV_L = os.path.realpath(INSTALL_ENV_L[1:-1])
@@ -369,9 +387,11 @@ def check_environment_install_path():
return 0
LINE = f.readline()
f.close()
- logExit("Check install path in user environment variables failed:"
- "can not find install path in user: %s environment variables"
- % g_opts.user_info.pw_name)
+ logExit(
+ "Check install path in user environment variables failed:"
+ "can not find install path in user: %s environment variables"
+ % g_opts.user_info.pw_name
+ )
log("End check install path in user environment variables")
@@ -404,36 +424,41 @@ def get_gsdata_path_env():
# Determine whether there is "#" before CTDB_DATA, the
# function returns a value of -1, indicating that it is
# not found, CTDB_DATA is valid.
- if find_before_slice('export CTDB_DATA', LINE) == -1:
- GSDATA_PATH_DIC_TEMP = LINE.split('=')
+ if find_before_slice("export CTDB_DATA", LINE) == -1:
+ GSDATA_PATH_DIC_TEMP = LINE.split("=")
GSDATA_PATH_TEMP = GSDATA_PATH_DIC_TEMP[1].rstrip()
GSDATA_PATH = os.path.normpath(GSDATA_PATH_TEMP)
g_opts.gs_data_path = os.path.realpath(GSDATA_PATH[1:-1])
DefaultValue.checkInvalidPath(g_opts.gs_data_path)
if not os.path.exists(g_opts.gs_data_path):
f.close()
- logExit("Get data directory in user environment variables"
- " failed:data directory have been destroyed,"
- "can not uninstall")
+ logExit(
+ "Get data directory in user environment variables"
+ " failed:data directory have been destroyed,"
+ "can not uninstall"
+ )
log("End find data directory in user environment variables")
f.close()
return 0
# deal with the CTDB_HOME with """
# Obtain 'export CTDB_DATA'
- elif LINE.find('export CTDB_DATA') != -1:
+ elif LINE.find("export CTDB_DATA") != -1:
# Determine whether there is "#" before CTDB_DATA, the
# function returns a value of -1, indicating that it is
# not found, CTDB_DATA is valid.
- if find_before_slice('export CTDB_DATA', LINE) == -1:
- GSDATA_PATH_DIC_TEMP = LINE.split('=')
+ if find_before_slice("export CTDB_DATA", LINE) == -1:
+ GSDATA_PATH_DIC_TEMP = LINE.split("=")
GSDATA_PATH_TEMP = GSDATA_PATH_DIC_TEMP[1].rstrip()
g_opts.gs_data_path = os.path.realpath(
- os.path.normpath(GSDATA_PATH_TEMP))
+ os.path.normpath(GSDATA_PATH_TEMP)
+ )
if not os.path.exists(g_opts.gs_data_path):
f.close()
- logExit("Get data directory in user environment variables "
- "failed:data directory have been destroyed,"
- "can not uninstall")
+ logExit(
+ "Get data directory in user environment variables "
+ "failed:data directory have been destroyed,"
+ "can not uninstall"
+ )
log("End find data directory in user environment variables")
f.close()
return 0
@@ -456,12 +481,14 @@ def check_data_dir():
"""
log("Begin check data dir...", True)
if g_opts.clean_data_dir:
- if os.path.exists(g_opts.clean_data_dir) \
- and os.path.isdir(g_opts.clean_data_dir) \
- and g_opts.clean_data_dir == g_opts.gs_data_path:
- log("path: \"%s\" is correct" % g_opts.clean_data_dir)
+ if (
+ os.path.exists(g_opts.clean_data_dir)
+ and os.path.isdir(g_opts.clean_data_dir)
+ and g_opts.clean_data_dir == g_opts.gs_data_path
+ ):
+ log('path: "%s" is correct' % g_opts.clean_data_dir)
else:
- logExit("path: \"%s\" is incorrect" % g_opts.clean_data_dir)
+ logExit('path: "%s" is incorrect' % g_opts.clean_data_dir)
log("end check,match")
@@ -481,28 +508,37 @@ def clean_data_dir():
try:
shutil.rmtree(DefaultValue.DOCKER_DATA_DIR)
except OSError as err:
- logExit("Clean share data path failed:can not delete share data path "
- "%s\nPlease manually delete it." % str(err))
+ logExit(
+ "Clean share data path failed:can not delete share data path "
+ "%s\nPlease manually delete it." % str(err)
+ )
try:
shutil.rmtree(g_opts.gs_data_path)
except OSError as err:
- logExit("Clean data path failed:can not delete data path "
- "%s\nPlease manually delete it." % str(err))
+ logExit(
+ "Clean data path failed:can not delete data path "
+ "%s\nPlease manually delete it." % str(err)
+ )
else:
- logExit("Clean data failed:can not find data directory path"
- " in user environment variables,"
- "it might be destroyed or not exist")
+ logExit(
+ "Clean data failed:can not find data directory path"
+ " in user environment variables,"
+ "it might be destroyed or not exist"
+ )
if not g_opts.use_gss:
if g_opts.in_container and os.path.exists(DefaultValue.DOCKER_GCC_DIR):
try:
shutil.rmtree(DefaultValue.DOCKER_GCC_DIR)
except OSError as err:
- logExit("Clean gcc path failed:can not delete gcc path "
- "%s\nPlease manually delete it." % str(err))
+ logExit(
+ "Clean gcc path failed:can not delete gcc path "
+ "%s\nPlease manually delete it." % str(err)
+ )
else:
log("Not clean data path")
log("End clean data path")
+
#########################################################################
# Check the uninstall script location
#########################################################################
@@ -515,38 +551,51 @@ def check_uninstall_pos():
output: NA
"""
log("Checking uninstall.py position...", True)
- bin_path = g_opts.install_path_l + os.sep + 'bin'
- addons_path = g_opts.install_path_l + os.sep + 'add-ons'
- admin_path = g_opts.install_path_l + os.sep + 'admin'
- lib_path = g_opts.install_path_l + os.sep + 'lib'
- pkg_file = g_opts.install_path_l + os.sep + 'package.xml'
+ bin_path = g_opts.install_path_l + os.sep + "bin"
+ addons_path = g_opts.install_path_l + os.sep + "add-ons"
+ admin_path = g_opts.install_path_l + os.sep + "admin"
+ lib_path = g_opts.install_path_l + os.sep + "lib"
+ pkg_file = g_opts.install_path_l + os.sep + "package.xml"
# Check if the install path exists
if not os.path.exists(g_opts.install_path_l):
- logExit("Check uninstall.py position failed:You have"
- " changed uninstall.py position,install path not exist")
+ logExit(
+ "Check uninstall.py position failed:You have"
+ " changed uninstall.py position,install path not exist"
+ )
# Check if the bin path exists
if not os.path.exists(bin_path):
- logExit("Check uninstall.py position failed:You have"
- " changed uninstall.py position,can not find path bin")
+ logExit(
+ "Check uninstall.py position failed:You have"
+ " changed uninstall.py position,can not find path bin"
+ )
# Check if the addons path exists
if not os.path.exists(addons_path):
- logExit("Check uninstall.py position failed:You have"
- " changed uninstall.py position,can not find path add-ons")
+ logExit(
+ "Check uninstall.py position failed:You have"
+ " changed uninstall.py position,can not find path add-ons"
+ )
# Check if the admin path exists
if not os.path.exists(admin_path):
- logExit("Check uninstall.py position failed:You have"
- " changed uninstall.py position,can not find path admin")
+ logExit(
+ "Check uninstall.py position failed:You have"
+ " changed uninstall.py position,can not find path admin"
+ )
# Check if the lib path exists
if not os.path.exists(lib_path):
- logExit("Check uninstall.py position failed:You have"
- " changed uninstall.py position,can not find file lib")
+ logExit(
+ "Check uninstall.py position failed:You have"
+ " changed uninstall.py position,can not find file lib"
+ )
# Check if the package path exists
if not os.path.isfile(pkg_file):
- logExit("Check uninstall.py position failed:You have"
- " changed uninstall.py position,can not find file package.xml")
+ logExit(
+ "Check uninstall.py position failed:You have"
+ " changed uninstall.py position,can not find file package.xml"
+ )
log("End check uninstall.py position")
+
#########################################################################
# Clear the installation path
#########################################################################
@@ -568,8 +617,10 @@ def clean_install_path():
# Remove the install path
shutil.rmtree(g_opts.install_path_l)
except OSError as err:
- logExit("Clean install path failed:can not delete install path "
- "%s\nPlease manually delete it." % str(err))
+ logExit(
+ "Clean install path failed:can not delete install path "
+ "%s\nPlease manually delete it." % str(err)
+ )
log("Clean install path success")
log("End clean Install path")
@@ -578,6 +629,7 @@ def clean_install_path():
# Clear environment variables
###########################################################################
+
# Resolution path
def Genregstring(text):
"""
@@ -591,12 +643,13 @@ def Genregstring(text):
insList = insStr.split(os.sep)
regString = ""
for i in insList:
- if(i == ""):
+ if i == "":
continue
else:
regString += r"\/" + i
return regString
+
# Clear environment variables
@@ -610,13 +663,15 @@ def clean_environment():
# Clear environment variable CTDB_DATA
data_cmd = r"/^\s*export\s*CTDB_DATA=\".*\"$/d"
# Clear environment variable PATH about database
- path_cmd = (r"/^\s*export\s*PATH=\"%s\/bin\":\$PATH$/d"
- % Genregstring(g_opts.install_path_l))
+ path_cmd = r"/^\s*export\s*PATH=\"%s\/bin\":\$PATH$/d" % Genregstring(
+ g_opts.install_path_l
+ )
# Clear environment variable LD_LIBRARY_PATH about database
- lib_cmd = (r"/^\s*export\s*LD_LIBRARY_PATH=\"%s\/lib\":\"%s\/add-ons\":"
- r"\$LD_LIBRARY_PATH$/d"
- % (Genregstring(g_opts.install_path_l),
- Genregstring(g_opts.install_path_l)))
+ lib_cmd = (
+ r"/^\s*export\s*LD_LIBRARY_PATH=\"%s\/lib\":\"%s\/add-ons\":"
+ r"\$LD_LIBRARY_PATH$/d"
+ % (Genregstring(g_opts.install_path_l), Genregstring(g_opts.install_path_l))
+ )
# Clear environment variable CTDB_HOME
home_cmd = r"/^\s*export\s*CTDB_HOME=\".*\"$/d"
# Clear environment variable CMS_HOME
@@ -629,8 +684,17 @@ def clean_environment():
mode_cmd = r"/^\s*export\s*CTSQL_SSL_MODE=.*$/d"
cipher_cmd = r"/^\s*export\s*CTSQL_SSL_KEY_PASSWD=.*$/d"
- cmds = [path_cmd, lib_cmd, home_cmd, cms_cmd,
- ca_cmd, cert_cmd, key_cmd, mode_cmd, cipher_cmd]
+ cmds = [
+ path_cmd,
+ lib_cmd,
+ home_cmd,
+ cms_cmd,
+ ca_cmd,
+ cert_cmd,
+ key_cmd,
+ mode_cmd,
+ cipher_cmd,
+ ]
if g_opts.clean_data_dir_on == 0:
cmds.insert(0, data_cmd)
@@ -691,12 +755,12 @@ def get_instance_id():
input: NA
output: NA
"""
- cmd = ("ps ux | grep -v grep | grep cantiand "
- "| grep -w '\-D %s' |awk '{print $2}'") % g_opts.gs_data_path
+ cmd = (
+ "ps ux | grep -v grep | grep cantiand " "| grep -w '\-D %s' |awk '{print $2}'"
+ ) % g_opts.gs_data_path
status, output, _ = _exec_popen(cmd)
if status:
- logExit("Failed to execute cmd: %s. Error:%s." % (str(cmd),
- str(output)))
+ logExit("Failed to execute cmd: %s. Error:%s." % (str(cmd), str(output)))
# process exists
return output
@@ -714,12 +778,17 @@ def kill_instance(instance_pid):
logExit("kill process %s failed" % instance_pid)
log("Kill cantiand instance succeed")
+
def kill_process(process_name):
# kill process
- kill_cmd = (r"proc_pid_list=`ps ux | grep %s | grep -v grep"
- r"|awk '{print $2}'` && " % process_name)
- kill_cmd += (r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
- r"$proc_pid_list | xargs kill -9; exit 0; fi)")
+ kill_cmd = (
+ r"proc_pid_list=`ps ux | grep %s | grep -v grep"
+ r"|awk '{print $2}'` && " % process_name
+ )
+ kill_cmd += (
+ r"(if [ X\"$proc_pid_list\" != X\"\" ];then echo "
+ r"$proc_pid_list | xargs kill -9; exit 0; fi)"
+ )
log("kill process cmd: %s" % kill_cmd)
ret_code, _, _ = _exec_popen(kill_cmd)
if ret_code:
@@ -743,7 +812,7 @@ def stop_instance():
lsnr_addr = read_cantiand_cfg("LSNR_ADDR")
if not lsnr_addr:
logExit("Failed to get the listen address of database.")
- host_ip = lsnr_addr.split(',')[0]
+ host_ip = lsnr_addr.split(",")[0]
# if the cantian process not exists, and disable sysdba user
# tell user the user name and password input interactive are
@@ -751,11 +820,15 @@ def stop_instance():
instance_pid = get_instance_id()
# specify -P parameter, db password is supported
if not instance_pid and g_opts.db_passwd:
- log("Notice: Instance '%s' has been stopped." %
- g_opts.gs_data_path, True)
- log(("Notice: The Database username and password"
- " that are interactive entered "
- "will not be verified correct and used.", True))
+ log("Notice: Instance '%s' has been stopped." % g_opts.gs_data_path, True)
+ log(
+ (
+ "Notice: The Database username and password"
+ " that are interactive entered "
+ "will not be verified correct and used.",
+ True,
+ )
+ )
kill_process("mysqld")
kill_process("cms")
if g_opts.use_gss:
@@ -775,24 +848,32 @@ def stop_instance():
# not specify -P, db password is empty, login database by sysdba
if not g_opts.db_passwd:
cmd = "%s/bin/shutdowndb.sh -h %s -p %s -w -m immediate -D %s" % (
- g_opts.install_path_l, host_ip, lsnr_port, g_opts.gs_data_path)
+ g_opts.install_path_l,
+ host_ip,
+ lsnr_port,
+ g_opts.gs_data_path,
+ )
else:
- cmd = ("echo '%s' | %s/bin/shutdowndb.sh"
- " -h %s -p %s -U %s -m immediate -W -D %s") % (
+ cmd = (
+ "echo '%s' | %s/bin/shutdowndb.sh"
+ " -h %s -p %s -U %s -m immediate -W -D %s"
+ ) % (
g_opts.db_passwd,
g_opts.install_path_l,
host_ip,
lsnr_port,
g_opts.db_user,
- g_opts.gs_data_path)
+ g_opts.gs_data_path,
+ )
return_code, stdout, stderr = _exec_popen(cmd)
if return_code:
g_opts.db_passwd = ""
stdout = get_error_msg(stdout, stderr)
- if (not g_opts.db_passwd) and stdout.find(
- "login as sysdba is prohibited") >= 0:
- stdout += ("\nsysdba login is disabled, please specify -P "
- "parameter to input password, refer to --help.")
+ if (not g_opts.db_passwd) and stdout.find("login as sysdba is prohibited") >= 0:
+ stdout += (
+ "\nsysdba login is disabled, please specify -P "
+ "parameter to input password, refer to --help."
+ )
logExit("stop cantian instance failed. Error: %s" % stdout)
@@ -822,12 +903,31 @@ def check_invalid_symbol(para):
:return: NA
"""
symbols = (
- "|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", "{", "}",
- "(", ")", "[", "]", "~", "*", "?", "!", "\n",
+ "|",
+ ";",
+ "&",
+ "$",
+ "<",
+ ">",
+ "`",
+ "\\",
+ "'",
+ '"',
+ "{",
+ "}",
+ "(",
+ ")",
+ "[",
+ "]",
+ "~",
+ "*",
+ "?",
+ "!",
+ "\n",
)
for symbol in symbols:
if para.find(symbol) > -1:
- logExit("There is invalid symbol \"%s\" in %s" % (symbol, para))
+ logExit('There is invalid symbol "%s" in %s' % (symbol, para))
def main():
@@ -862,8 +962,11 @@ def main():
clean_environment()
clean_install_path()
- log("Cantiand was successfully removed from your computer, "
- "for more message please see %s." % g_opts.log_file, True)
+ log(
+ "Cantiand was successfully removed from your computer, "
+ "for more message please see %s." % g_opts.log_file,
+ True,
+ )
log("Cantiand was successfully removed from your computer")
os.chmod(g_opts.log_file, stat.S_IRUSR)
diff --git a/pkg/test/om_test/python_LLT.py b/pkg/test/om_test/python_LLT.py
index 1a1e447e24c863d0ae33b484941ce44594de7d19..c1838b5391ade5abf69246be89894d1f86e0f389 100644
--- a/pkg/test/om_test/python_LLT.py
+++ b/pkg/test/om_test/python_LLT.py
@@ -6,11 +6,13 @@ import warnings
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path))
-src_path = os.path.join(cur_path, '..', '..', 'deploy', 'action')
+src_path = os.path.join(cur_path, "..", "..", "deploy", "action")
sys.path.append(src_path)
-cov = coverage.coverage(branch=True,
- source=[src_path],
- omit=["__init__.py", "test_*", "*config.py", "*log.py", "*constant.py"])
+cov = coverage.coverage(
+ branch=True,
+ source=[src_path],
+ omit=["__init__.py", "test_*", "*config.py", "*log.py", "*constant.py"],
+)
warnings.filterwarnings("ignore", category=SyntaxWarning)
@@ -26,10 +28,10 @@ def main():
# 保存覆盖率数据
cov.save()
cov.report()
- cov.xml_report(outfile=os.path.join(cur_path, 'coverage.xml'))
+ cov.xml_report(outfile=os.path.join(cur_path, "coverage.xml"))
return result.wasSuccessful()
-if __name__ == '__main__':
+if __name__ == "__main__":
ret = main()
sys.exit(0 if ret else 1)
diff --git a/pkg/test/om_test/test_dr_deploy_operate/test_dr_deploy_pre_check.py b/pkg/test/om_test/test_dr_deploy_operate/test_dr_deploy_pre_check.py
index 0c96a13131dc94da449ae43c658452a084db151f..cbc6f4501cd8fa2bd1428f10453816fd6ec1c004 100644
--- a/pkg/test/om_test/test_dr_deploy_operate/test_dr_deploy_pre_check.py
+++ b/pkg/test/om_test/test_dr_deploy_operate/test_dr_deploy_pre_check.py
@@ -2,6 +2,7 @@ import sys
import mock
import unittest
import collections
+
sys.modules["requests"] = mock.MagicMock()
sys.modules["termios"] = mock.MagicMock()
sys.modules["pty"] = mock.MagicMock()
@@ -10,17 +11,26 @@ import storage_operate.dr_deploy_operate.dr_deploy_pre_check as pre_check
class getConfigTestCase(unittest.TestCase):
- @mock.patch("storage_operate.dr_deploy_operate.dr_deploy_pre_check.read_json_config")
+ @mock.patch(
+ "storage_operate.dr_deploy_operate.dr_deploy_pre_check.read_json_config"
+ )
def test_get_config_values_normal(self, mock_json_config):
- mock_json_config.return_value = {"deploy_policy": "ModeA", "ModeA": {"config": {"test": "test keys"}}}
- result = pre_check.get_config_values("test")
+ mock_json_config.return_value = {
+ "deploy_policy": "ModeA",
+ "ModeA": {"config": {"test": "test keys"}},
+ }
+ result = pre_check.get_config_values("test")
self.assertEqual("test keys", result)
def test_get_config_values_abnormal(self):
- pre_check.read_json_config = mock.Mock(return_value={"deploy_policy": "default",
- "ModeA": {"config": {"test": "test keys"}}})
- result = pre_check.get_config_values("test")
- self.assertEqual('', result)
+ pre_check.read_json_config = mock.Mock(
+ return_value={
+ "deploy_policy": "default",
+ "ModeA": {"config": {"test": "test keys"}},
+ }
+ )
+ result = pre_check.get_config_values("test")
+ self.assertEqual("", result)
class FakeDRDeployPreCheck(pre_check.DRDeployPreCheck):
@@ -61,8 +71,10 @@ class DRDeployPreCheckTestCase(unittest.TestCase):
@mock.patch("storage_operate.dr_deploy_operate.dr_deploy_pre_check.exec_popen")
def test_check_dr_process_all_error(self, mock_exec_popen):
mock_exec_popen.return_value = (2, 1, 2)
- target_error = ("Dr deploy is executing, please check, details:\n1Dr undeploy is executing, please " +\
- "check, details:\n1Dr full sync is executing, please check, details:\n1")
+ target_error = (
+ "Dr deploy is executing, please check, details:\n1Dr undeploy is executing, please "
+ + "check, details:\n1Dr full sync is executing, please check, details:\n1"
+ )
with self.assertRaisesRegex(Exception, target_error):
pre_check.DRDeployPreCheck.check_dr_process()
@@ -76,7 +88,9 @@ class FakeParamCheck(pre_check.ParamCheck):
self.dr_deploy_params = {"dm_ip": "127.0.0.1", "dm_user": "admin"}
-args = collections.namedtuple("args", ["action", "site", "mysql_cmd", "mysql_user", "display"])
+args = collections.namedtuple(
+ "args", ["action", "site", "mysql_cmd", "mysql_user", "display"]
+)
class ParamCheckTestCase(unittest.TestCase):
@@ -89,7 +103,14 @@ class ParamCheckTestCase(unittest.TestCase):
@mock.patch("cantian_common.mysql_shell.MysqlShell.start_session", return_value="")
@mock.patch("logic.storage_operate.StorageInf.login", return_value="")
@mock.patch("argparse.ArgumentParser")
- def test_execute_normal(self, mock_parser, mock_login, mock_start_session, mocke_close_session, mock_input):
+ def test_execute_normal(
+ self,
+ mock_parser,
+ mock_login,
+ mock_start_session,
+ mocke_close_session,
+ mock_input,
+ ):
args.action = "deploy"
args.site = "active"
mock_parser.parse_args.return_value = args
@@ -100,10 +121,16 @@ class ParamCheckTestCase(unittest.TestCase):
@mock.patch("cantian_common.mysql_shell.MysqlShell.start_session", return_value="")
@mock.patch("logic.storage_operate.StorageInf.login", side_effect=Exception("test"))
@mock.patch("argparse.ArgumentParser")
- def test_execute_abnormal(self, mock_parser, mock_login, mock_start_session, mocke_close_session, mock_input):
+ def test_execute_abnormal(
+ self,
+ mock_parser,
+ mock_login,
+ mock_start_session,
+ mocke_close_session,
+ mock_input,
+ ):
args.action = "deploy"
args.site = "active"
mock_parser.parse_args.return_value = args
with self.assertRaisesRegex(Exception, "test"):
self.param_check.execute()
-