diff --git a/CI/script/Dev_ct_cantian_regress.sh b/CI/script/Dev_ct_cantian_regress.sh index 53cddb7b18a789026f75954996e3977652a78b22..e88158f4581bab59de35defb13c7b4b85949b754 100644 --- a/CI/script/Dev_ct_cantian_regress.sh +++ b/CI/script/Dev_ct_cantian_regress.sh @@ -46,6 +46,8 @@ function run_ct_regress() { exit 1 fi echo "Regress Success" + regress_result="Test Result: Success"; + echo ${regress_result} > ${ROOT_PATH}/regress_output/test_result.txt echo "LCOV_ENABLE is ${LCOV_ENABLE}" if [ "${LCOV_ENABLE}" = TRUE ]; then echo "make lcov report" @@ -123,12 +125,6 @@ function compile_code() { then lcov_build_flag="lcov=1" cp -f ${ROOT_PATH}/pkg/src/server/srv_main.c ${ROOT_PATH}/pkg/src/server/srv_main.c.bak - tmp_hllt_code1="#include " - tmp_hllt_code2="void save_llt_data(int signo){\nprintf(\"srv_main get signal=%d\",signo);\nexit(0);\n}" - tmp_hllt_code3=" signal(35,save_llt_data);" - sed -i "/cm_coredump.h/a$tmp_hllt_code1" ${ROOT_PATH}/pkg/src/server/srv_main.c - sed -i "/$tmp_hllt_code1/a$tmp_hllt_code2" ${ROOT_PATH}/pkg/src/server/srv_main.c - sed -i "/cantiand_lib_main(argc, argv);/i$tmp_hllt_code3" ${ROOT_PATH}/pkg/src/server/srv_main.c echo "finish modify main function" fi @@ -154,7 +150,7 @@ function compile_code() { cd ${ROOT_PATH}/build source ./common.sh cd ${ROOT_PATH}/build/pkg/test/ct_regress - strip -N main ${CANTIANDB_LIB}/libzeserver.a + strip -N main ${ROOT_PATH}/output/lib/libzeserver.a make -sj 8 | tee -a ${COMPILE_LOG} # error_num=`cat ${COMPILE_LOG} |grep 'error:'|wc -l` # if [ $error_num -ne 0 ];then @@ -289,8 +285,8 @@ main() { parse_parameter "$@" check_old_install init_test_environment - - if [ -z "${pass_build}" ] || [ $pass_build -eq 0 ]; then + echo "pass_build : ${pass_build}" + if [ "${pass_build,,}" = "need_compile" ]; then echo "Start compile, source code root path: ${ROOT_PATH}" > ${COMPILE_LOG} echo "ROOT_PATH: ${ROOT_PATH}" compile_code # local debug, if only change sql test file can annotate this step diff --git a/CI/script/Dev_unit_test.sh b/CI/script/Dev_unit_test.sh index c0d9d8f6f5832df0795eecfda9a6943361a12dc7..66e8415ab51b819c5be086719c80b8563bb34891 100644 --- a/CI/script/Dev_unit_test.sh +++ b/CI/script/Dev_unit_test.sh @@ -47,6 +47,15 @@ function make_cantian_pkg(){ fi } +function prepare_cantian_cfg() +{ + echo "DBWR_PROCESSES = 8" >> ${CTDB_HOME}/cfg/cantiand.ini + echo "_SYS_PASSWORD = Ck311QUAECd2bdgppbA85VqPjjV/Wn/1jByyAWxktckKOjFf59olvzHvQWeKumrSJBjcV8RxupSQveBbc1i0J63n4kpk1+m43FyDL2XyBzQ50cVWsOFNXw==" >> ${CTDB_HOME}/cfg/cantiand.ini + echo "MYSQL_DEPLOY_GROUP_ID = 5000" >> ${CTDB_HOME}/cfg/cantiand.ini + echo "SHM_MYSQL_CPU_GROUP_INFO = 0-15" >> ${CTDB_HOME}/cfg/cantiand.ini + echo "SHM_CPU_GROUP_INFO = 0-15" >> ${CTDB_HOME}/cfg/cantiand.ini +} + echo -n "make test ..." dots 5 & DOTS_BG_PID=$! @@ -68,6 +77,8 @@ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CTDB_CODE_PATH}/library/mockcpp/lib/ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CTDB_CODE_PATH}/library/dbstor/lib/ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CTDB_CODE_PATH}/library/dbstor/lib/nomlnx/ +export CTDB_HOME=/home/cantiandb/install + UNAME=$(uname -a) if [[ "${UNAME}" =~ .*aarch64.* ]];then export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CTDB_CODE_PATH}/library/xnet/lib_arm/ @@ -76,20 +87,19 @@ elif [[ "${UNAME}" =~ .*x86_64.* ]];then else error "error: unknown arch!" fi + mkdir -p ${CTDB_HOME}/cfg -#chmod 777 ${CTDB_CODE_PATH}/pkg/test/unit_test/ut/cms_test/cfg -#chmod 777 ${CTDB_CODE_PATH}/pkg/test/mes_test/config -#chmod 600 ${CTDB_CODE_PATH}/pkg/test/mes_test/config/ca.crt -echo "DBWR_PROCESSES = 8" >> ${CTDB_HOME}/cfg/cantiand.ini +rm -f ${CTDB_HOME}/cfg/cantiand.ini +prepare_cantian_cfg echo -echo -n "run message_queue_test ..." -${CTDB_CODE_PATH}/output/bin/message_queue_test --gtest_output=xml:${GTEST_RESULT_PATH}/ >> ${GTEST_RUN_LOG} 2>&1 +echo -n "run ctsql_test ..." +${CTDB_CODE_PATH}/output/bin/ctsql_test --gtest_output=xml:${GTEST_RESULT_PATH}/ >> ${GTEST_RUN_LOG} 2>&1 if [ "$?" != "0" ]; then - error "run message_queue_test error!" + error "run ctsql_test error!" fi echo -echo "run message_queue_test success!" +echo "run ctsql_test success!" echo echo -n "run ctc_srv_test ..." diff --git a/build/Makefile.sh b/build/Makefile.sh index f47f70297da7eca5f86e29f06a9a314c2853ba95..0ace915c2c8cf2a93c5b599ba8b462b0c0f136da 100644 --- a/build/Makefile.sh +++ b/build/Makefile.sh @@ -4,7 +4,9 @@ set -e -CURRENT_PATH=$(dirname $(readlink -f $0)) +CODE_BUILD_PATH=$(dirname $(readlink -f $0)) +CODE_HOME_PATH="${CODE_BUILD_PATH}"/.. + PS4=':${LINENO}+' declare VERSION_DESCRIP="" declare VERSION_MYSQLIP="" @@ -24,21 +26,15 @@ export BUILD_MODE="" export PYTHON_INCLUDE_DIR="" export LOGICREP_PERSONAL_PKG_DIR="" export WORKSPACE=$(dirname $(dirname $(pwd))) -DFT_WORKSPACE="/home/regress" -source ./common.sh -source ./function.sh +source ${CODE_BUILD_PATH}/common.sh CONFIG_IN_FILE=${CANTIANDB_BUILD}/include/config.h -CTDB_CODE_PATH="${CURRENT_PATH}"/.. CMAKE_C_COMPILER=$(which gcc) CMAKE_CXX_COMPILER=$(which g++) PYTHON3_HOME=${PYTHON3_HOME} -MYSQL_CODE_PATH=${WORKSPACE}/cantian-connector-mysql/mysql-source -INSTALL_DIR=/opt/cantiandb -INITSQL_DIR=../ -func_prepare_git_msg + PROJECT_VERSION=$(cat ${CONFIG_IN_FILE} | grep 'PROJECT_VERSION' | awk '{print $3}') CANTIAND_BIN=cantiand-${PROJECT_VERSION} JDBC_DIR=${CANTIANDB_HOME}/src/jdbc/cantian-jdbc/build/Cantian_PKG @@ -60,11 +56,7 @@ BUILD_MYSQL_SO=${BUILD_MYSQL_SO:-"YES"} FEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL:-"0"} OS_ARCH=$(uname -i) -export INTERNAL_BUILD="TRUE" - -if [[ ! -d "${CTDB_CODE_PATH}"/../ProductComm_DoradoAA ]];then - export INTERNAL_BUILD="FALSE" -fi +export INTERNAL_BUILD="FALSE" if [[ ${OS_ARCH} =~ "x86_64" ]]; then export CPU_CORES_NUM=`cat /proc/cpuinfo |grep "cores" |wc -l` @@ -87,6 +79,7 @@ if [ "${BUILD_MYSQL_SO}" == "YES" ]; then fi echo ${CANTIANDB_HOME} + func_prepare_pkg_name() { cd ${CANTIANDB_HOME} @@ -446,103 +439,6 @@ func_prepare_header_files() rm -rf ${MYSQL_DIR}/3rdPartyPkg fi } -func_make_mysql_debug() -{ - echo "Start build Mysql Debug..." - func_prepare_header_files - - rm -rf ${MYSQL_CODE_PATH}/cantian_lib - mkdir -p ${MYSQL_CODE_PATH}/cantian_lib - cp -arf ${CANTIAN_LIB_DIR}/* ${MYSQL_CODE_PATH}/cantian_lib/ - mkdir -p ${MYSQL_CODE_PATH}/bld_debug - local LLT_TEST_TYPE="NORMAL" - if [ "${ENABLE_LLT_GCOV}" == "YES" ]; then - LLT_TEST_TYPE="GCOV" - elif [ "${ENABLE_LLT_ASAN}" == "YES" ]; then - LLT_TEST_TYPE="ASAN" - fi - prepareGetMysqlClientStaticLibToCantianlib ${MYSQL_CODE_PATH} "DEBUG" ${LLT_TEST_TYPE} ${BOOST_PATH} ${CPU_CORES_NUM} ${MYSQL_CODE_PATH}/bld_debug - - cd ${MYSQL_CODE_PATH}/bld_debug - cp -arf "${CANTIANDB_LIBRARY}"/shared_lib/lib/libsecurec.so /usr/lib64/ - if [ "${MYSQL_BUILD_MODE}" == "multiple" ]; then - if [ "${ENABLE_LLT_GCOV}" == "YES" ]; then - cmake .. -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DENABLE_GCOV=1 -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Debug -DWITH_BOOST=${BOOST_PATH} -DWITHOUT_SERVER=OFF - elif [ "${ENABLE_LLT_ASAN}" == "YES" ]; then - cmake .. -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DWITH_ASAN=ON -DWITH_ASAN_SCOPE=ON -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Debug -DWITH_BOOST=${BOOST_PATH} -DWITHOUT_SERVER=OFF - else - cmake .. -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Debug -DWITH_BOOST=${BOOST_PATH} -DWITHOUT_SERVER=OFF -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS="-Wno-error=attributes" -DCMAKE_CXX_FLAGS="-Wno-error=attributes" - fi - elif [ "${MYSQL_BUILD_MODE}" == "single" ]; then - cmake .. -DWITH_CANTIAN=1 -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Debug -DWITH_BOOST=${BOOST_PATH} -DWITHOUT_SERVER=OFF -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS="-Wno-error=attributes" -DCMAKE_CXX_FLAGS="-Wno-error=attributes" - fi - - MYSQL_BUILD_TYPE="debug" - - if [ -f ${MYSQL_BINARY_CODE_PATH}/mysql_${MYSQL_BUILD_TYPE}_${OS_ARCH}_${MYSQL_COMMIT_ID}.tar.gz ]; then - cd ${MYSQL_CODE_PATH}/bld_debug/storage/ctc && make -j${CPU_CORES_NUM} && make install - cd ${MYSQL_BINARY_CODE_PATH} && tar -xzf mysql_${MYSQL_BUILD_TYPE}_${OS_ARCH}_${MYSQL_COMMIT_ID}.tar.gz -C /usr/local/ - echo "mysql binary code untar succeed" - chmod +x /usr/local/mysql/bin/* - cp -arf /${MYSQL_CODE_PATH}/mysql-test /usr/local/mysql/ - else - make -j${CPU_CORES_NUM} - make install - fi - cp -r -f -p ${MYSQL_CODE_PATH}/cantian_lib/libctc_proxy.so /usr/lib64 - echo 'log_raw=ON' >> /usr/local/mysql/mysql-test/include/default_mysqld.cnf - cd - -} - -func_make_mysql_release() -{ - echo "Start build Mysql Release..." - func_prepare_header_files - rm -rf ${MYSQL_CODE_PATH}/cantian_lib - mkdir -p ${MYSQL_CODE_PATH}/cantian_lib - cp -arf ${CANTIAN_LIB_DIR}/* ${MYSQL_CODE_PATH}/cantian_lib/ - mkdir -p ${MYSQL_CODE_PATH}/bld_debug - local LLT_TEST_TYPE="NORMAL" - if [ "${ENABLE_LLT_GCOV}" == "YES" ]; then - LLT_TEST_TYPE="GCOV" - elif [ "${ENABLE_LLT_ASAN}" == "YES" ]; then - LLT_TEST_TYPE="ASAN" - fi - prepareGetMysqlClientStaticLibToCantianlib ${MYSQL_CODE_PATH} "RELEASE" ${LLT_TEST_TYPE} ${BOOST_PATH} ${CPU_CORES_NUM} ${MYSQL_CODE_PATH}/bld_debug - - cd ${MYSQL_CODE_PATH}/bld_debug - cp -arf "${CANTIANDB_LIBRARY}"/shared_lib/lib/libsecurec.so /usr/lib64/ - if [ "${MYSQL_BUILD_MODE}" == "multiple" ]; then - if [[ ${OS_ARCH} =~ "aarch64" ]]; then - cmake .. -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Release -DWITH_BOOST=${BOOST_PATH} -DCMAKE_C_FLAGS="-g -march=armv8.2-a+crc+lse" -DCMAKE_CXX_FLAGS="-g -march=armv8.2-a+crc+lse" -DWITHOUT_SERVER=OFF -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - else - cmake .. -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Release -DWITH_BOOST=${BOOST_PATH} -DCMAKE_C_FLAGS=-g -DCMAKE_CXX_FLAGS=-g -DWITHOUT_SERVER=OFF -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - fi - elif [ "${MYSQL_BUILD_MODE}" == "single" ]; then - if [[ ${OS_ARCH} =~ "aarch64" ]]; then - cmake .. -DWITH_CANTIAN=1 -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Release -DWITH_BOOST=${BOOST_PATH} -DCMAKE_C_FLAGS="-g -march=armv8.2-a+crc+lse" -DCMAKE_CXX_FLAGS="-g -march=armv8.2-a+crc+lse" -DWITHOUT_SERVER=OFF -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - else - cmake .. -DWITH_CANTIAN=1 -DWITH_CTC_STORAGE_ENGINE=${WITH_CTC_STORAGE_ENGINE} -DFEATURE_FOR_EVERSQL=${FEATURE_FOR_EVERSQL} -DCMAKE_BUILD_TYPE=Release -DWITH_BOOST=${BOOST_PATH} -DCMAKE_C_FLAGS=-g -DCMAKE_CXX_FLAGS=-g -DWITHOUT_SERVER=OFF -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - fi - - fi - - - MYSQL_BUILD_TYPE="release" - if [ -f ${MYSQL_BINARY_CODE_PATH}/mysql_${MYSQL_BUILD_TYPE}_${OS_ARCH}_${MYSQL_COMMIT_ID}.tar.gz ]; then - cd ${MYSQL_CODE_PATH}/bld_debug/storage/ctc && make -j${CPU_CORES_NUM} && make install - cd ${MYSQL_BINARY_CODE_PATH} && tar -xzf mysql_${MYSQL_BUILD_TYPE}_${OS_ARCH}_${MYSQL_COMMIT_ID}.tar.gz -C /usr/local/ - echo "mysql binary code untar succeed" - chmod +x /usr/local/mysql/bin/* - cp -arf /${MYSQL_CODE_PATH}/mysql-test /usr/local/mysql/ - else - make -j${CPU_CORES_NUM} - make install - fi - cp -r -f -p ${MYSQL_CODE_PATH}/cantian_lib/libctc_proxy.so /usr/lib64 - echo 'log_raw=ON' >> /usr/local/mysql/mysql-test/include/default_mysqld.cnf - cd - -} func_test() { @@ -742,29 +638,11 @@ func_making_package() func_download_3rdparty() { - if [[ "${WORKSPACE}" == *"regress"* ]]; then - DOWNLOAD_PATH=$DFT_WORKSPACE"/CantianKernel" - else - DOWNLOAD_PATH=${WORKSPACE}"/cantian" - fi - - cd ${DOWNLOAD_PATH} - if [[ ${INTERNAL_BUILD} == "FALSE" ]]; then - git submodule init - git submodule update --recursive - else - echo "Clone source start" - if [[ x"${proxy_user}" != x"" ]]; then - export http_proxy=http://${proxy_user}:${proxy_pwd}@${proxy_url} - export https_proxy=${http_proxy} - export no_proxy=127.0.0.1,.huawei.com,localhost,local,.local - fi - git clone https://gitee.com/cantian-repo/cantian-repo.git - rm -rf open_source - mv cantian-repo open_source - fi - - cd - + cd ${CODE_HOME_PATH} + git submodule init + git submodule update --recursive + + cd ${CODE_BUILD_PATH} echo "start compile 3rdparty : " sh compile_opensource_new.sh @@ -809,7 +687,7 @@ func_prepare_dependency() func_prepare_LLT_dependency() { - echo "Prepare LCRP_HOME dependency func : " + echo "Prepare LLT dependency func : " if [[ ! -d ${CANTIANDB_LIBRARY} ]]; then echo "library dir not exist" mkdir -p ${CANTIANDB_LIBRARY} @@ -835,28 +713,9 @@ func_prepare_LLT_dependency() mkdir -p ${MYSQL_BINARY_CODE_PATH} fi - echo ${LCRP_HOME} - mkdir -p /root/.ArtGet/conf - cp -f ${CANTIANDB_CI_PATH}/CMC/Setting.xml /root/.ArtGet/conf - OS_Version=`sh ${CANTIANDB_CI_PATH}/CMC/get_OS_Version.sh` - #下载三方库并编译 - if [[ ${INTERNAL_BUILD} == "FALSE" ]]; then - func_download_3rdparty - fi - if [[ "${WORKSPACE}" == *"regress"* ]]; then - DOWNLOAD_PATH=$DFT_WORKSPACE"/CantianKernel" - else - DOWNLOAD_PATH=${WORKSPACE}"/cantian" - fi - - echo "start download 3rdparty : ${DOWNLOAD_PATH}" - python ${CANTIANDB_CI_PATH}/CMC/manifest_opensource_download.py manifest_opensource.xml ${DOWNLOAD_PATH} - sh download_opensource_cmc.sh - echo "start download 3rdparty lib: " - artget pull -d ${CANTIANDB_CI_PATH}/CMC/CantianKernel_opensource_dependency.xml -p "{'OS_Version':'${OS_Version}'}" -user ${cmc_username} -pwd ${cmc_password} + func_download_3rdparty - artget pull -d ${CANTIANDB_CI_PATH}/CMC/CantianKernel_dependency_new.xml -p "{'OS_Version':'${OS_Version}'}" -user ${cmc_username} -pwd ${cmc_password} if [[ $? -ne 0 ]]; then echo "dependency download failed" exit 1 @@ -1118,12 +977,6 @@ main() 'mysqllib') fun_pkg_mysql_lib ;; - 'mysql'|'mysql_debug') - func_make_mysql_debug - ;; - 'mysql_release') - func_make_mysql_release - ;; 'mysql_package_node0') func_collect_mysql_target node0 ;; @@ -1148,11 +1001,11 @@ main() prepare_bazel_dependency ;; 'make_regress_test') - COMPILE_OPTS="${COMPILE_OPTS} -DUSE_PROTECT_VM=ON -DCMS_UT_TEST=ON" + COMPILE_OPTS="${COMPILE_OPTS} -DUSE_PROTECT_VM=OFF -DCMS_UT_TEST=ON" func_regress_test ;; 'make_cantian_pkg_test') - COMPILE_OPTS="${COMPILE_OPTS} -DUSE_PROTECT_VM=ON" + COMPILE_OPTS="${COMPILE_OPTS} -DUSE_PROTECT_VM=OFF" func_making_package_test Debug ;; *) diff --git a/build/common.sh b/build/common.sh index a1d38e5c77eaf53ec5b8dc983a06a117978477fd..cb781abdf06747a42ec41b7347506cb668e20652 100644 --- a/build/common.sh +++ b/build/common.sh @@ -6,13 +6,10 @@ declare OS_SUFFIX="" declare OS_MAJOR_VERSION="" declare OS_MINOR_VERSION="" -declare OPENSSL_LIB_PATH="" -declare SECUREC_LIB_PATH="" declare PCRE_LIB_PATH="" declare Z_LIB_PATH="" declare ZSTD_LIB_PATH="" declare LZ4_LIB_PATH="" -declare XNET_LIB_PATH="" declare OS_ARCH="" declare WHOLE_COMMIT_ID="" declare git_id="" @@ -27,7 +24,9 @@ ARCH=$(getconf LONG_BIT) SYMBOLFIX=symbol SO=so -CODE_HOME_PATH=$(echo $(dirname $(pwd))) +CURRENT_PATH=$(dirname $(readlink -f $0)) +CODE_HOME_PATH="${CURRENT_PATH}"/.. + CANTIANDB_CI_PATH=${CODE_HOME_PATH}/CI CANTIANDB_HOME=${CODE_HOME_PATH}/pkg @@ -56,15 +55,12 @@ else OS_ARCH=$(uname -i) fi -OPENSSL_LIB_PATH=${CANTIANDB_LIBRARY}/openssl/lib PCRE_LIB_PATH=${CANTIANDB_LIBRARY}/pcre/lib Z_LIB_PATH=${CANTIANDB_LIBRARY}/zlib/lib ODBC_LIB_PATH=${CANTIANDB_LIBRARY}/odbc/lib ZSTD_LIB_PATH=${CANTIANDB_LIBRARY}/Zstandard/lib LZ4_LIB_PATH=${CANTIANDB_LIBRARY}/lz4/lib -SECUREC_LIB_PATH=${CANTIANDB_LIBRARY}/security/lib KMC_LIB_PATH=${CANTIANDB_LIBRARY}/kmc/lib -XNET_LIB_PATH=${CANTIANDB_LIBRARY}/xnet/lib SUSE_VERSION_PATH=/etc/SuSE-release REDHAT_VERSION_PATH=/etc/redhat-release diff --git a/build/compile_opensource_new.sh b/build/compile_opensource_new.sh index 10fc5c707910e68b7fdb5e3550d7a13da9eb14b2..6b0eef4745242109188b6bc83249b8be0447e7b3 100644 --- a/build/compile_opensource_new.sh +++ b/build/compile_opensource_new.sh @@ -15,9 +15,11 @@ echo $DFT_WORKSPACE " " $WORKSPACE if [[ "$WORKSPACE" == *"regress"* ]]; then echo $DFT_WORKSPACE " eq " $WORKSPACE else - export OPEN_SOURCE=${WORKSPACE}/cantian/open_source - export LIBRARY=${WORKSPACE}/cantian/library - export PLATFORM=${WORKSPACE}/cantian/platform + CURRENT_PATH=$(dirname $(readlink -f $0)) + CODE_PATH=$(cd "${CURRENT_PATH}/.."; pwd) + export OPEN_SOURCE=${CODE_PATH}/open_source + export LIBRARY=${CODE_PATH}/library + export PLATFORM=${CODE_PATH}/platform fi #pcre diff --git a/build/download_opensource_cmc.sh b/build/download_opensource_cmc.sh deleted file mode 100644 index 34a1138ba7b5d2a021ec385d3fe2acdf8fb99374..0000000000000000000000000000000000000000 --- a/build/download_opensource_cmc.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash -# Copyright Huawei Technologies Co., Ltd. 2010-2018. All rights reserved. -set -e - -declare BEP - -export WORKSPACE=$(dirname $(dirname $(pwd))) -export OPEN_SOURCE=${WORKSPACE}/CantianKernel/open_source -export LIBRARY=${WORKSPACE}/CantianKernel/library -export OS_ARCH=$(uname -i) -DFT_WORKSPACE="/home/regress" - -echo $DFT_WORKSPACE " " $WORKSPACE -if [[ "$WORKSPACE" == *"regress"* ]]; then - echo $DFT_WORKSPACE " eq " $WORKSPACE -else - export OPEN_SOURCE=${WORKSPACE}/cantian/open_source - export LIBRARY=${WORKSPACE}/cantian/library -fi - -#pcre -cd ${OPEN_SOURCE}/pcre/pcre2-10.40 -touch configure.ac aclocal.m4 Makefile.in configure config.h.in -mkdir -p pcre-build;chmod 755 -R ./* -aclocal;autoconf;autoreconf -vif -#判断系统是否是centos,并且参数bep是否为true,都是则删除。 -if [[ ! -z ${BEP} ]]; then - if [[ -n "$(cat /etc/os-release | grep CentOS)" ]] && [[ ${BEP} == "true" ]] && [[ "${BUILD_TYPE}" == "RELEASE" ]];then - sed -i "2656,2690d" configure #从2656到2690行是构建环境检查,检查系统时间的。做bep固定时间戳时,若是centos系统,系统时间固定,必须删除构建环境检查,才能编译,才能保证两次出包bep一致;若是euler系统,可不用删除,删除了也不影响编译。 - fi -fi -./configure -mkdir -p ${OPEN_SOURCE}/pcre/include/ -cp ${OPEN_SOURCE}/pcre/pcre2-10.40/src/pcre2.h ${OPEN_SOURCE}/pcre/include/ - -#lz4 -cd ${OPEN_SOURCE}/lz4/lz4-1.9.4/lib -mkdir -p ${OPEN_SOURCE}/lz4/include/ -cp lz4frame.h lz4.h ${OPEN_SOURCE}/lz4/include - -#zstd -cd ${OPEN_SOURCE}/Zstandard/zstd-1.5.2 -mkdir -p ${OPEN_SOURCE}/Zstandard/include -cp lib/zstd.h ${OPEN_SOURCE}/Zstandard/include -cd lib/;rm -f libzstd.so libzstd.so.1 -ln -s libzstd.so.1.5.2 libzstd.so -ln -s libzstd.so.1.5.2 libzstd.so.1 - - -#protobuf -# cd ${OPEN_SOURCE}/protobuf/protobuf.3.13.0 -# ./autogen.sh -# # 流水线是否设置BEP -# if [[ ! -z ${BEP} ]]; then -# if [[ -n "$(cat /etc/os-release | grep CentOS)" ]] && [[ ${BEP} == "true" ]] && [[ "${BUILD_TYPE}" == "RELEASE" ]];then -# sed -i "2915,2949d" configure -# fi -# fi -# ./configure -# if [[ ${OS_ARCH} =~ "x86_64" ]]; then -# export CPU_CORES_NUM_x86=`cat /proc/cpuinfo |grep "cores" |wc -l` -# make -j${CPU_CORES_NUM_x86} -# elif [[ ${OS_ARCH} =~ "aarch64" ]]; then -# export CPU_CORES_NUM_arm=`cat /proc/cpuinfo |grep "architecture" |wc -l` -# make -j${CPU_CORES_NUM_arm} -# else -# echo "OS_ARCH: ${OS_ARCH} is unknown, set CPU_CORES_NUM=16 " -# export CPU_CORES_NUM=16 -# make -j${CPU_CORES_NUM} -# fi -# make install - -#protobuf-c -mkdir -p ${OPEN_SOURCE}/protobuf-c/include/ -mkdir -p ${LIBRARY}/protobuf/protobuf-c/ -cp ${OPEN_SOURCE}/protobuf-c/protobuf-c-1.4.1/protobuf-c/protobuf-c.h ${OPEN_SOURCE}/protobuf-c/include/ -cp ${OPEN_SOURCE}/protobuf-c/protobuf-c-1.4.1/protobuf-c/protobuf-c.h ${LIBRARY}/protobuf/protobuf-c/ - -#openssl -cd ${OPEN_SOURCE}/openssl/openssl-3.0.7/ -./config shared -if [[ ${OS_ARCH} =~ "x86_64" ]]; then - export CPU_CORES_NUM_x86=`cat /proc/cpuinfo |grep "cores" |wc -l` - make -j${CPU_CORES_NUM_x86} -elif [[ ${OS_ARCH} =~ "aarch64" ]]; then - export CPU_CORES_NUM_arm=`cat /proc/cpuinfo |grep "architecture" |wc -l` - make -j${CPU_CORES_NUM_arm} -else - echo "OS_ARCH: ${OS_ARCH} is unknown, set CPU_CORES_NUM=16 " - export CPU_CORES_NUM=16 - make -j${CPU_CORES_NUM} -fi -mkdir -p ${OPEN_SOURCE}/openssl/include/ -mkdir -p ${LIBRARY}/openssl/lib/ -cp -rf ${OPEN_SOURCE}/openssl/openssl-3.0.7/include/* ${OPEN_SOURCE}/openssl/include/ -cp -rf ${OPEN_SOURCE}/openssl/openssl-3.0.7/*.a ${LIBRARY}/openssl/lib -echo "copy lib finished" - -#zlib -cd ${OPEN_SOURCE}/zlib/zlib-1.2.13 -mkdir -p ${OPEN_SOURCE}/zlib/include -cp zconf.h zlib.h ${OPEN_SOURCE}/zlib/include diff --git a/build/local_install.sh b/build/local_install.sh new file mode 100644 index 0000000000000000000000000000000000000000..b26ffa77633ae0baa2962f20b3aca467a0aa4075 --- /dev/null +++ b/build/local_install.sh @@ -0,0 +1,150 @@ +#!/bin/bash + + +CURRENT_PATH=$(dirname $(readlink -f $0)) +CODE_PATH=$(cd "${CURRENT_PATH}/.."; pwd) +WORK_DIR=$(cd "${CURRENT_PATH}/../../"; pwd) +BUILD_ARGS="" +PATCH="" # 是否在cantian中创建元数据 +BUILD_TYPE="release" +USER="cantiandba" + +function prepare() { + echo "Prepare env start." + yum install -y libaio-devel openssl openssl-devel \ + ndctl-devel ncurses ncurses-devel libtirpc-devel \ + expect ant bison iputils iproute wget\ + libtirpc-devel make gcc gcc-c++ gdb gdb-gdbserver\ + python3 python3-devel git net-tools cmake automake\ + byacc libtool --skip-broken + echo "Prepare env success." +} + +function cantian_patch() { + escaped_variable=$(echo "${WORK_DIR}" | sed 's/\//\\\//g') + sed -i "s/\/home\/regress\/CantianKernel/${escaped_variable}\/cantian/g" ${WORK_DIR}/cantian/pkg/install/install.py + sed -i "s/\/home\/regress/${escaped_variable}/g" ${CODE_PATH}/pkg/install/Common.py + sed -i "s/\/home\/regress/${escaped_variable}/g" ${CODE_PATH}/pkg/install/funclib.py + sed -i "s/192.168.86.1/127.0.0.1/g" ${CODE_PATH}/pkg/install/funclib.py +} + +function compile() { + cantian_patch + export local_build=true + cd ${CODE_PATH}/build || exit 1 + sh Makefile.sh package-${BUILD_TYPE} ${BUILD_ARGS} + if [[ $? -ne 0 ]]; then + echo "build_cantian failed." + exit 1 + fi +} + +function clean() { + kill -9 $(pidof cantiand) > /dev/null 2>&1 + kill -9 $(pidof cms) > /dev/null 2>&1 + rm -rf ${WORK_DIR}/cantian_data/* /home/${USER}/install /home/${USER}/data /data/data/* + sed -i "/${USER}/d" /home/${USER}/.bashrc +} + +function install() { + id "${USER}" + if [[ $? -ne 0 ]]; then + echo "add user ${USER}." + useradd -m -s /bin/bash ${USER} + echo "${USER}:${USER}" | chpasswd + fi + touch /.dockerenv + clean + mkdir -p "${WORK_DIR}"/cantian_data -m 755 + chown -R ${USER}:${USER} "${WORK_DIR}"/cantian_data + cd ${CODE_PATH}/Cantian-DATABASE-*-64bit || exit 1 + mkdir -p /home/${USER}/logs + run_mode=cantiand_in_cluster + python3 install.py -U ${USER}:${USER} -R /home/${USER}/install \ + -D /home/${USER}/data -l /home/${USER}/logs/install.log \ + -M ${run_mode} -Z _LOG_LEVEL=255 -N 0 -W 192.168.0.1 -g \ + withoutroot -d -c -Z _SYS_PASSWORD=huawei@1234 -Z SESSIONS=1000 + if [[ $? -ne 0 ]]; then + echo "install cantian failed." + exit 1 + fi +} + +function usage() { + echo 'Usage: sh local_install.sh compile [OPTION]' + echo 'Options:' + echo ' -b, --build_type= Build type, default is release.' + echo ' -u, --user= User name, default is cantiandba.' + echo ' -h, --help Display thishelp and exit.' +} + +function parse_params() +{ + ARGS=$(getopt -o b:u: --long build_type:,user:, -n "$0" -- "$@") + if [ $? != 0 ]; then + echo "Terminating..." + exit 1 + fi + eval set -- "${ARGS}" + while true + do + case "$1" in + -b | --build_type) + BUILD_TYPE=$2 + shift 2 + ;; + -u | --user) + USER=$2 + shift 2 + ;; + --) + shift + break + ;; + -h) + usage + exit 1 + ;; + esac + done +} + +function help() { + echo 'Usage: sh local_install.sh [OPTION]' + echo 'Options:' + echo ' prepare Prepare compile and install dependencies.' + echo ' compile Compile cantian.' + echo ' install Install and start cantian.' + echo ' clean Uninstall and clean env.' +} + +function main() +{ + mode=$1 + shift + parse_params "$@" + case $mode in + prepare) + prepare + exit 0 + ;; + compile) + compile + exit 0 + ;; + install) + install + exit 0 + ;; + clean) + clean + exit 0 + ;; + *) + help + exit 1 + ;; + esac +} + +main "$@" \ No newline at end of file diff --git a/common/script/cantian_service.sh b/common/script/cantian_service.sh index 59da4a62d53dc09cb848f2b8046bf459ad677653..6ec6c6a49f9bf65ed06b8326b210b3ed9e5c9b34 100644 --- a/common/script/cantian_service.sh +++ b/common/script/cantian_service.sh @@ -5,7 +5,6 @@ set +x CURRENT_PATH=$(dirname $(readlink -f $0)) SCRIPT_NAME=${PARENT_DIR_NAME}/$(basename $0) -DEPLOY_MODE_DBSTORE_UNIFY_FLAG=/opt/cantian/log/deploy/.dbstor_unify_flag source ${CURRENT_PATH}/../../action/env.sh source ${CURRENT_PATH}/log4sh.sh NFS_TIMEO=50 @@ -74,19 +73,19 @@ function mountNfs() # 防止日志输出到/var/log/messages中 if [[ x"${deploy_mode}" == x"file" ]]; then - storage_dbstore_fs=`python3 ${CURRENT_PATH}/../../action/get_config_info.py "storage_dbstore_fs"` + storage_dbstor_fs=`python3 ${CURRENT_PATH}/../../action/get_config_info.py "storage_dbstor_fs"` storage_logic_ip=`python3 ${CURRENT_PATH}/../../action/get_config_info.py "storage_logic_ip"` - mountpoint /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" > /dev/null 2>&1 + mountpoint /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" > /dev/null 2>&1 if [ $? -ne 0 ]; then - mount -t nfs -o vers=4.0,timeo=${NFS_TIMEO},nosuid,nodev "${storage_logic_ip}":/"${storage_dbstore_fs}" /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" + mount -t nfs -o vers=4.0,timeo=${NFS_TIMEO},nosuid,nodev "${storage_logic_ip}":/"${storage_dbstor_fs}" /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" fi if [ $? -ne 0 ]; then - logAndEchoError "mount /mnt/dbdata/remote/storage_"${storage_dbstore_fs} failed. [Line:${LINENO}, File:${SCRIPT_NAME}]" + logAndEchoError "mount /mnt/dbdata/remote/storage_"${storage_dbstor_fs} failed. [Line:${LINENO}, File:${SCRIPT_NAME}]" exit 1 else - logAndEchoInfo "mount /mnt/dbdata/remote/storage_"${storage_dbstore_fs} success. [Line:${LINENO}, File:${SCRIPT_NAME}]" + logAndEchoInfo "mount /mnt/dbdata/remote/storage_"${storage_dbstor_fs} success. [Line:${LINENO}, File:${SCRIPT_NAME}]" fi fi if [[ x"${deploy_mode}" == x"file" ]] || [[ -f /opt/cantian/youmai_demo ]];then diff --git a/ct_om/service/cantian_exporter/exporter/get_info.py b/ct_om/service/cantian_exporter/exporter/get_info.py index b89c7d9560e0c326ca6aa950f0cd7f00aff3e299..a66526204f99a06283a60271d165699605fdcc9f 100644 --- a/ct_om/service/cantian_exporter/exporter/get_info.py +++ b/ct_om/service/cantian_exporter/exporter/get_info.py @@ -707,20 +707,20 @@ class GetDbstorInfo: def __init__(self): self.deploy_config = self.get_deploy_info() self.std_output = { - self.deploy_config.get("storage_dbstore_fs"): + self.deploy_config.get("storage_dbstor_fs"): { 'limit': 0, 'used': 0, 'free': 0, 'snapshotLimit': 0, 'snapshotUsed': 0, 'fsId': '', 'linkState': '' }, - self.deploy_config.get("storage_dbstore_page_fs"): + self.deploy_config.get("storage_dbstor_page_fs"): { 'limit': 0, 'used': 0, 'free': 0, 'snapshotLimit': 0, 'snapshotUsed': 0, 'fsId': '', 'linkState': '' } } - self.info_file_path = '/opt/cantian/common/data/dbstore_info.json' + self.info_file_path = '/opt/cantian/common/data/dbstor_info.json' self.index = 0 self.max_index = 10 self.last_time_stamp = None diff --git a/docker/Dockerfile b/docker/Dockerfile index 53e4d48b9f11d20d9b3c8a41927fe19d542d2ff6..5239f8af124e4cceec74b3a07aebe68bf145cba6 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,90 +1,70 @@ -FROM centos:8.2.2004 - -ARG BUILD=debug +FROM centos:8.2.2004 AS buildtag -RUN touch /root/.curlrc && sed -i "$a insecure" /root/.curlrc -RUN rm -rf /etc/yum.repos.d/* && curl https://mirrors.huaweicloud.com/repository/conf/CentOS-8-anon.repo > /etc/yum.repos.d/CentOS-Base.repo -RUN yum clean all && echo 'sslverify=False' >> /etc/yum.conf -RUN yum update -y -RUN rm -rf /etc/yum.repos.d/* && curl https://mirrors.huaweicloud.com/repository/conf/CentOS-8-anon.repo > /etc/yum.repos.d/CentOS-Base.repo -RUN yum clean all && echo 'sslverify=False' >> /etc/yum.conf -RUN yum install -y epel-release -RUN yum install -y \ - vim make gcc gcc-c++ gcc-toolset-10 gdb gdb-gdbserver python2 python2-devel \ - python3 python3-devel git net-tools cmake wget \ - tree mlocate psmisc sudo perf gawk perl-open perl \ - kernel-debug kernel-debug-core kernel-debug-devel \ - kernel-debug-modules kernel-debug-modules-extra \ - ndctl ndctl-libs ndctl-devel ncurses ncurses-devel libtirpc-devel \ - jq libpmem libpmem-devel daxio nc libaio-devel openssl openssl-devel \ - libibverbs librdmacm libibumad libibmad rdma-core-devel \ - openssh-server dnf-plugins-core dnf-utils ant maven bison +WORKDIR /tool -RUN touch /root/.wgetrc && sed -i "$a check_certificate = off" /root/.wgetrc +RUN yum clean all && touch /root/.curlrc && sed -i "$a insecure" /root/.curlrc && touch /root/.wgetrc && sed -i "$a check_certificate = off" /root/.wgetrc && \ + rm -rf /etc/yum.repos.d/* && curl https://mirrors.huaweicloud.com/repository/conf/CentOS-8-anon.repo > /etc/yum.repos.d/CentOS-Base.repo && \ + echo 'sslverify=False' >> /etc/yum.conf && yum update -y -# sshpass -RUN wget https://github.com/thkukuk/rpcsvc-proto/releases/download/v1.4/rpcsvc-proto-1.4.tar.gz \ - && tar -zxvf rpcsvc-proto-1.4.tar.gz && cd rpcsvc-proto-1.4 \ - && ./configure && make && make install && cd - +RUN rm -rf /etc/yum.repos.d/* && curl https://mirrors.huaweicloud.com/repository/conf/CentOS-8-anon.repo > /etc/yum.repos.d/CentOS-Base.repo && yum install -y wget git -RUN debuginfo-install -y libibverbs \ - librdmacm libibumad libibmad rdma-core-devel \ - libpmem libpmem-devel +RUN git clone https://gitee.com/cantian-repo/dockerbuild.git && \ + cd dockerbuild/third_party && \ + cat boost_1_73_0a* > boost_1_73_0.zip && \ + rm -rf boost_1_73_0a* && \ + mv * /tool && cd - && \ + rm -rf dockerbuild -RUN yum group install -y "Development Tools" +FROM centos:8.2.2004 -RUN mkdir /tools +ARG BUILD=debug WORKDIR /tools -RUN wget --progress=bar:force -O FlameGraph-master.zip https://github.com/brendangregg/FlameGraph/archive/master.zip -RUN wget --progress=bar:force -O fzf-master.zip https://github.com/junegunn/fzf/archive/master.zip -RUN wget --progress=bar:force https://boostorg.jfrog.io/artifactory/main/release/1.73.0/source/boost_1_73_0.tar.gz \ - && tar -zxf boost_1_73_0.tar.gz -RUN wget --progress=bar:force https://github.com/doxygen/doxygen/releases/download/Release_1_9_2/doxygen-1.9.2.src.tar.gz \ - && tar -zxf doxygen-1.9.2.src.tar.gz && cd doxygen-1.9.2 && mkdir build && cd build \ - && cmake -G "Unix Makefiles" .. && make && cd ../.. - -RUN wget --progress=bar:force -O /tmp/mysql80-community-release-el8-9.noarch.rpm https://dev.mysql.com/get/mysql80-community-release-el8-9.noarch.rpm -RUN yum install -y /tmp/mysql80-community-release-el8-9.noarch.rpm -RUN yum install -y mysql-connector-c++-devel --nogpgcheck -RUN yum install -y git-clang-format -RUN yum install -y libcap-devel -RUN yum install -y expect - -RUN wget --progress=bar:force -O /tmp/libasan5-8.2.1-3.el7.x86_64.rpm http://mirror.centos.org/centos/7/sclo/x86_64/rh/Packages/l/libasan5-8.2.1-3.el7.x86_64.rpm -RUN rpm -ivh /tmp/libasan5-8.2.1-3.el7.x86_64.rpm -RUN mkdir /docker-entrypoint-initdb.d -VOLUME /var/lib/mysql -RUN wget --progress=bar:force -O /tmp/docker-ce-cli-19.03.5-3.el7.x86_64.rpm https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-cli-19.03.5-3.el7.x86_64.rpm -RUN yum install -y /tmp/docker-ce-cli-19.03.5-3.el7.x86_64.rpm - -RUN /usr/bin/ssh-keygen -A -RUN sed -i -e 's/^#Port 22/Port 2200/g' \ +COPY --from=buildtag /tool/*.gz /tools/ +COPY --from=buildtag /tool/*.zip /tools/ +#安装顺序开发工具->编程语言->系统工具->性能分析->安全和权限->库和头文件->容器和集群管理->其他实用工具 \ +# ->内核调试包->特定于系统的库->特定于发行版的软件->不常用的工具->其他 +RUN yum clean all && touch /root/.curlrc && sed -i "$a insecure" /root/.curlrc && touch /root/.wgetrc && sed -i "$a check_certificate = off" /root/.wgetrc && \ + rm -rf /etc/yum.repos.d/* && curl https://mirrors.huaweicloud.com/repository/conf/CentOS-8-anon.repo > /etc/yum.repos.d/CentOS-Base.repo && \ + echo 'sslverify=False' >> /etc/yum.conf && mkdir /docker-entrypoint-initdb.d /cores && \ + yum update -y && \ + rm -rf /etc/yum.repos.d/* && curl https://mirrors.huaweicloud.com/repository/conf/CentOS-8-anon.repo > /etc/yum.repos.d/CentOS-Base.repo && \ + tar -zxvf rpcsvc-proto-1.4.tar.gz && \ + tar -zxf doxygen-1.9.2.src.tar.gz && \ + yum install -y vim make gcc gcc-c++ gcc-toolset-10 gdb gdb-gdbserver git cmake libpmem daxio perl-open libcap-devel git-clang-format wget\ + python3 python3-devel perl \ + net-tools iproute iputils nfs-utils unzip openssl ncurses dnf-plugins-core dnf-utils nc openssh-server perf sudo \ + openssl-devel libpmem-devel libaio-devel ncurses-devel libtirpc-devel \ + ndctl ndctl-libs ndctl-devel \ + tree mlocate psmisc gawk jq \ + libibverbs epel-release \ + ant maven bison expect && \ + unzip boost_1_73_0.zip && \ + python3 -m pip install --user --upgrade pip && \ + yum group install -y "Development Tools" && \ + pip3 install cryptography pyOpenSSL && \ + /usr/bin/ssh-keygen -A && \ + debuginfo-install -y libibverbs \ + libpmem libpmem-devel && \ + yum clean all &> /dev/null && \ + cd rpcsvc-proto-1.4 \ + && ./configure && make && make install && cd - && \ + cd doxygen-1.9.2 && mkdir build && cd build \ + && cmake -G "Unix Makefiles" .. && make && cd ../.. && \ + sed -i -e 's/^#Port 22/Port 2200/g' \ -e 's/^SyslogFacility AUTHPRIV/#SyslogFacility AUTHPRIV/g' \ -e 's/^#SyslogFacility AUTH/SyslogFacility AUTH/g' \ - -e 's/^UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config - -RUN wget --progress=bar:force -P /tools https://mirrors.huaweicloud.com/java/jdk/8u202-b08/jdk-8u202-linux-x64.tar.gz \ - && tar -zxvf jdk-8u202-linux-x64.tar.gz -C /usr/local && sed -i '$aexport JAVA_HOME=/usr/local/jdk1.8.0_202' /etc/profile \ - && sed -i '$aexport PATH=$JAVA_HOME/bin:$PATH' /etc/profile && source /etc/profile - -RUN wget --progress=bar:force -P /tools https://go.dev/dl/go1.18.1.linux-amd64.tar.gz \ - && tar -zxvf go1.18.1.linux-amd64.tar.gz -C /usr/local && sed -i '$aexport GO_HOME=/usr/local/go' /etc/profile \ - && sed -i '$a export PATH=$GO_HOME/bin:$PATH' /etc/profile && source /etc/profile + -e 's/^UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config && \ + sed -i '$aexport PATH=$JAVA_HOME/bin:$PATH' /etc/profile && \ + sed -i '$a export PYTHON3_HOME=/usr/include/python3.6m' /etc/profile && source /etc/profile && \ + ln -s /usr/bin/gcc /usr/local/bin/gcc && ln -s /usr/bin/g++ /usr/local/bin/g++ && \ + touch /opt/bashrc && sed -i '$a ln /dev/sdb /dev/gss-disk1 2>/dev/null' /opt/bashrc && \ + sed -i '$a chmod 777 /dev/gss-disk1' /opt/bashrc && \ + sed -i '$a chmod 666 /var/run/docker.sock' /opt/bashrc && \ + rm -rf /tools/*.gz /tools/*.zip -RUN yum install -y nfs-utils -RUN pip3 install --upgrade pip -RUN pip3 install cryptography pyOpenSSL -RUN sed -i '$a export PYTHON3_HOME=/usr/include/python3.6m' /etc/profile - -RUN ln -s /usr/bin/gcc /usr/local/bin/gcc && ln -s /usr/bin/g++ /usr/local/bin/g++ - -RUN touch /opt/bashrc && sed -i '$a ln /dev/sdb /dev/gss-disk1 2>/dev/null' /opt/bashrc -RUN sed -i '$a chmod 777 /dev/gss-disk1' /opt/bashrc -RUN sed -i '$a chmod 666 /var/run/docker.sock' /opt/bashrc +VOLUME /var/lib/mysql -RUN mkdir /cores -RUN mkdir /home/regress WORKDIR /home/regress EXPOSE 3306 33060 diff --git a/docker/Dockerfile_ARM64 b/docker/Dockerfile_ARM64 index 4ae5ff34b1fffb0d61178950ea4eecb94e974fa8..e6e3822edc336c4466be3d4c4fb654376a34252e 100644 --- a/docker/Dockerfile_ARM64 +++ b/docker/Dockerfile_ARM64 @@ -1,81 +1,80 @@ +# 第一阶段:构建阶段 +FROM openeuler/openeuler:22.03-lts-sp1 AS buildtag + +WORKDIR /tool + +# 配置 yum 和 wget 不检查证书 +RUN yum clean all && \ + echo 'sslverify=False' >> /etc/yum.conf && \ + touch /root/.wgetrc && \ + echo 'check_certificate = off' >> /root/.wgetrc && \ + yum update -y && \ + yum install -y wget git + +# 克隆仓库并合并 boost 文件 +RUN git clone https://gitee.com/cantian-repo/dockerbuild.git && \ + cd dockerbuild/third_party && \ + cat boost_1_73_0a* > boost_1_73_0.zip && \ + rm -rf boost_1_73_0a* && \ + mv * /tool && \ + cd - && \ + rm -rf dockerbuild + +# 第二阶段:最终镜像 FROM openeuler/openeuler:22.03-lts-sp1 ARG BUILD=debug - -RUN touch /root/.curlrc && sed -i "$a insecure" /root/.curlrc - -RUN yum clean all && echo 'sslverify=False' >> /etc/yum.conf -RUN yum update -y -RUN yum install -y \ - vim make gcc gcc-c++ gdb gdb-gdbserver \ - python3 python3-devel git net-tools cmake wget \ - tree mlocate psmisc sudo perf gawk perl-open perl \ - ndctl ndctl-libs ndctl-devel ncurses ncurses-devel libtirpc-devel \ - jq nc libaio-devel openssl openssl-devel \ - libibverbs librdmacm libibumad libibmad rdma-core-devel \ - openssh-server dnf-plugins-core dnf-utils ant maven bison - -RUN touch /root/.wgetrc && sed -i "$a check_certificate = off" /root/.wgetrc - -# sshpass -RUN wget https://github.com/thkukuk/rpcsvc-proto/releases/download/v1.4/rpcsvc-proto-1.4.tar.gz \ - && tar -zxvf rpcsvc-proto-1.4.tar.gz && cd rpcsvc-proto-1.4 \ - && ./configure && make && make install && cd - - -RUN debuginfo-install -y libibverbs \ - librdmacm libibumad libibmad rdma-core-devel - -RUN yum group install -y "Development Tools" - -RUN mkdir /tools WORKDIR /tools -RUN wget --progress=bar:force -O FlameGraph-master.zip https://github.com/brendangregg/FlameGraph/archive/master.zip -RUN wget --progress=bar:force -O fzf-master.zip https://github.com/junegunn/fzf/archive/master.zip -RUN wget --progress=bar:force https://boostorg.jfrog.io/artifactory/main/release/1.73.0/source/boost_1_73_0.tar.gz \ - && tar -zxf boost_1_73_0.tar.gz -RUN wget --progress=bar:force https://github.com/doxygen/doxygen/releases/download/Release_1_9_2/doxygen-1.9.2.src.tar.gz \ - && tar -zxf doxygen-1.9.2.src.tar.gz && cd doxygen-1.9.2 && mkdir build && cd build \ - && cmake -G "Unix Makefiles" .. && make && cd ../.. - -RUN wget --progress=bar:force -O /tmp/mysql80-community-release-el8-9.noarch.rpm https://dev.mysql.com/get/mysql80-community-release-el8-9.noarch.rpm -RUN yum install -y /tmp/mysql80-community-release-el8-9.noarch.rpm -RUN yum install -y mysql-connector-c++-devel --nogpgcheck -RUN yum install -y git-clang-format -RUN yum install -y libcap-devel -RUN yum install -y expect -RUN wget --progress=bar:force -O /tmp/libasan5-8.2.1-3.bs1.el7.aarch64.rpm http://mirror.centos.org/altarch/7/sclo/aarch64/rh/Packages/l/libasan5-8.2.1-3.bs1.el7.aarch64.rpm -RUN rpm -ivh /tmp/libasan5-8.2.1-3.bs1.el7.aarch64.rpm - -RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo -RUN sed -i 's/\$releasever/8/g' /etc/yum.repos.d/docker-ce.repo -RUN mkdir /docker-entrypoint-initdb.d -VOLUME /var/lib/mysql -RUN wget --progress=bar:force -O /tmp/docker-ce-19.03.13-3.el8.aarch64.rpm https://download.docker.com/linux/centos/8/aarch64/stable/Packages/docker-ce-cli-19.03.13-3.el8.aarch64.rpm -RUN yum install -y /tmp/docker-ce-19.03.13-3.el8.aarch64.rpm - -RUN /usr/bin/ssh-keygen -A -RUN sed -i -e 's/^#Port 22/Port 2200/g' \ +# 从构建阶段复制文件 +COPY --from=buildtag /tool/*.gz /tools/ +COPY --from=buildtag /tool/*.zip /tools/ + +# 安装依赖并编译 +RUN tar -zxvf rpcsvc-proto-1.4.tar.gz && \ + tar -zxf doxygen-1.9.2.src.tar.gz && \ + yum clean all && \ + echo 'sslverify=False' >> /etc/yum.conf && \ + touch /root/.wgetrc && \ + echo 'check_certificate = off' >> /root/.wgetrc && \ + yum update -y && \ + yum install -y vim make gcc gcc-c++ gdb gdb-gdbserver \ + python3 python3-devel git net-tools cmake \ + tree mlocate psmisc sudo perf gawk perl-open perl wget unzip \ + ndctl ndctl-libs ndctl-devel ncurses ncurses-devel libtirpc-devel \ + python3-pip jq nc libaio-devel openssl openssl-devel \ + libibverbs libibumad libibmad \ + openssh-server dnf-plugins-core dnf-utils ant maven bison \ + libcap-devel expect nfs-utils iputils iproute && \ + unzip boost_1_73_0.zip && \ + python3 -m pip install --user --upgrade pip && \ + yum group install -y "Development Tools" && \ + pip3 install cryptography pyOpenSSL && \ + /usr/bin/ssh-keygen -A && \ + debuginfo-install -y libibverbs \ + librdmacm libibumad libibmad rdma-core-devel && \ + yum clean all &> /dev/null && \ + cd rpcsvc-proto-1.4 && \ + ./configure && make && make install && cd - && \ + rm -rf rpcsvc-proto-1.4 && \ + cd doxygen-1.9.2 && mkdir build && cd build && \ + cmake -G "Unix Makefiles" .. && make && cd ../.. && \ + rm -rf doxygen-1.9.2 && \ + echo 'export PYTHON3_HOME=/usr/include/python3.6m' >> /etc/profile && \ + source /etc/profile && \ + touch /opt/bashrc && \ + echo 'ln /dev/sdb /dev/gss-disk1 2>/dev/null' >> /opt/bashrc && \ + echo 'chmod 777 /dev/gss-disk1' >> /opt/bashrc && \ + echo 'chmod 666 /var/run/docker.sock' >> /opt/bashrc && \ + sed -i -e 's/^#Port 22/Port 2200/g' \ -e 's/^SyslogFacility AUTHPRIV/#SyslogFacility AUTHPRIV/g' \ -e 's/^#SyslogFacility AUTH/SyslogFacility AUTH/g' \ - -e 's/^UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config - -RUN wget --progress=bar:force -P /tools https://go.dev/dl/go1.18.1.linux-arm64.tar.gz \ - && tar -zxvf go1.18.1.linux-arm64.tar.gz -C /usr/local && sed -i '$aexport GO_HOME=/usr/local/go' /etc/profile \ - && sed -i '$a export PATH=$GO_HOME/bin:$PATH' /etc/profile && source /etc/profile + -e 's/^UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config && \ + rm -rf /tools/*.gz /tools/*.zip -RUN yum install -y nfs-utils python3-pip -RUN pip install cryptography pyOpenSSL -RUN sed -i '$a export PYTHON3_HOME=/usr/include/python3.6m' /etc/profile - -RUN ln -s /usr/bin/gcc /usr/local/bin/gcc && ln -s /usr/bin/g++ /usr/local/bin/g++ - -RUN touch /opt/bashrc && sed -i '$a ln /dev/sdb /dev/gss-disk1 2>/dev/null' /opt/bashrc -RUN sed -i '$a chmod 777 /dev/gss-disk1' /opt/bashrc -RUN sed -i '$a chmod 666 /var/run/docker.sock' /opt/bashrc - -RUN mkdir /cores -RUN mkdir /home/regress +# 设置卷和工作目录 +VOLUME /var/lib/mysql WORKDIR /home/regress -EXPOSE 3306 33060 +# 暴露端口 +EXPOSE 3306 33060 \ No newline at end of file diff --git a/docker/readme.md b/docker/readme.md index 1ec293f3edba666d44f79a8663a7ad5f6156bbb2..8b795b3af5df1e2d469aa1f73c9eabad38ff57f0 100644 --- a/docker/readme.md +++ b/docker/readme.md @@ -36,17 +36,18 @@ drwxr-xr-x 7 root root 4096 Sep 20 10:25 cantian-connector-mysql // cantia drwxr-xr-x 4 root root 4096 Sep 25 18:11 cantian_data // cantian 数据文件目录 ``` -### 下载最新docker镜像 +### 下载最新docker基础镜像 ```shell # x86版本 -docker pull ykfnxx/cantian_dev:0.1.0 +docker pull quay.io/centos/centos:centos8.2.2004 +docker tag quay.io/centos/centos:centos8.2.2004 centos:8.2.2004 # arm版本 -docker pull ykfnxx/cantian_dev:0.1.1 -# x决定是arm/x86版本 -docker tag ykfnxx/cantian_dev:0.1.[x] cantian_dev:latest +docker pull hub.oepkgs.net/openeuler/openeuler:22.03-lts-sp1 +docker tag hub.oepkgs.net/openeuler/openeuler:22.03-lts-sp1 openeuler/openeuler:22.03-lts-sp1 ``` + ### 准备代码 ```shell @@ -60,6 +61,20 @@ mv mysql-server-mysql-8.0.26 cantian-connector-mysql/mysql-source mkdir -p cantian_data ``` +屏蔽编译告警 +```shell +# 注释maintainer.cmake文件中两行 +sed -i '/STRING_APPEND(MY_C_WARNING_FLAGS " -Werror")/s/^/#/' cantian-connector-mysql/mysql-source/cmake/maintainer.cmake +sed -i '/STRING_APPEND(MY_CXX_WARNING_FLAGS " -Werror")/s/^/#/' cantian-connector-mysql/mysql-source/cmake/maintainer.cmake +``` +编译容器镜像 +```shell +cd cantian/docker +# arm环境执行以下命令 +docker build -t cantian_dev -f Dockerfile_ARM64 . +# x86环境执行以下命令 +docker build -t cantian_dev -f Dockerfile . +``` ### 启动开发编译自验容器 diff --git "a/documents/\344\272\247\345\223\201\346\226\207\346\241\243/\345\256\211\350\243\205\346\214\207\345\215\227.md" "b/documents/\344\272\247\345\223\201\346\226\207\346\241\243/\345\256\211\350\243\205\346\214\207\345\215\227.md" index b15971969c3f1f6a025acf2b2a06396223729b13..a1431f876b453d2bad46179bcf1748806d81c916 100644 --- "a/documents/\344\272\247\345\223\201\346\226\207\346\241\243/\345\256\211\350\243\205\346\214\207\345\215\227.md" +++ "b/documents/\344\272\247\345\223\201\346\226\207\346\241\243/\345\256\211\350\243\205\346\214\207\345\215\227.md" @@ -116,9 +116,9 @@ -

storage_dbstore_fs

+

storage_dbstor_fs

-

ctdb_dbstore_fs

+

ctdb_dbstor_fs

10TB

@@ -1364,7 +1364,7 @@ Info: Save the configuration successfully. -

storage_dbstore_fs

+

storage_dbstor_fs

Cantian引擎使用的存储文件系统名称。该文件系统应在配置存储网络进行了创建,请填写创建该文件系统时使用的名称。

须知:

每个文件系统只能部署一个Cantian引擎。完成部署后,请勿修改文件系统的名称、以及NFS共享的名称。

@@ -1409,7 +1409,7 @@ Info: Save the configuration successfully.

storage_logic_ip

-

挂载storage_dbstore_fs文件系统的逻辑端口的IP。该逻辑IP应在配置存储网络进行了设置,请填写设置的逻辑IP值。

+

挂载storage_dbstor_fs文件系统的逻辑端口的IP。该逻辑IP应在配置存储网络进行了设置,请填写设置的逻辑IP值。

mes_ssl_switch

diff --git a/library/BUILD.bazel b/library/BUILD.bazel index b45329366e9167b476540a5ec1ab17b9b865b987..8156620410b820ae862953b6a2409c1f6773770e 100644 --- a/library/BUILD.bazel +++ b/library/BUILD.bazel @@ -63,8 +63,8 @@ cc_import ( ) cc_import ( - name = "dbstoreClient", - shared_library = "dbstor/lib/libdbstoreClient.so", + name = "dbstorClient", + shared_library = "dbstor/lib/libdbstorClient.so", ) cc_import ( diff --git a/pkg/CMakeLists.txt b/pkg/CMakeLists.txt index d78532fcadda54f42a3f031a0f059826963bac97..05630fd285e4e715d761609c0bfa9ae4a9c302f5 100644 --- a/pkg/CMakeLists.txt +++ b/pkg/CMakeLists.txt @@ -25,8 +25,12 @@ if (UNIX) add_compile_definitions(WSEC_COMPILE_MIP_OTHER) endif () +if(NOT USE_OSS_BUILD) + add_compile_options(-w) +endif() + # add subdir add_subdirectory(src) if(NOT USE_OSS_BUILD) - add_subdirectory(test) + add_subdirectory(test) endif() diff --git a/pkg/cfg/cantiand-ut.ini b/pkg/cfg/cantiand-ut.ini new file mode 100644 index 0000000000000000000000000000000000000000..0ae64ed5f9cf9d303afd246b4e133741e0a60a1a --- /dev/null +++ b/pkg/cfg/cantiand-ut.ini @@ -0,0 +1,14 @@ +TEMP_BUFFER_SIZE = 1G +DATA_BUFFER_SIZE = 2G +SHARED_POOL_SIZE = 1G +LOG_BUFFER_SIZE = 64M +DBWR_PROCESSES = 8 +LOG_BUFFER_COUNT = 8 +LSNR_ADDR = 127.0.0.1 +LSNR_PORT = 1511 +SESSIONS = 1500 +MYSQL_DEPLOY_GROUP_ID = 5000 +_SYS_PASSWORD = Ck311QUAECd2bdgppbA85VqPjjV/Wn/1jByyAWxktckKOjFf59olvzHvQWeKumrSJBjcV8RxupSQveBbc1i0J63n4kpk1+m43FyDL2XyBzQ50cVWsOFNXw== +SHM_MYSQL_CPU_GROUP_INFO = 0-3 +SHM_CPU_GROUP_INFO = 0-3 +CLUSTER_DATABASE = TRUE \ No newline at end of file diff --git a/pkg/cfg/cms-ut.ini b/pkg/cfg/cms-ut.ini new file mode 100644 index 0000000000000000000000000000000000000000..9a6240b5c4deed75898e9ef322f2cf750fd15fda --- /dev/null +++ b/pkg/cfg/cms-ut.ini @@ -0,0 +1,23 @@ +_LOG_BACKUP_FILE_COUNT = 10 +NODE_ID = 0 +GCC_HOME = /tmp/cantian_ut/data/gcc_home/gcc_file +GCC_TYPE = FILE +CMS_LOG = /tmp/cantian_ut/data/log +_PORT = 14587 +_IP = 127.0.0.1 +_LOG_LEVEL = 7 +_SPLIT_BRAIN = TRUE +_LOG_MAX_FILE_SIZE = 100M +_DETECT_DISK_TIMEOUT = 6000 +_DISK_DETECT_FILE = gcc_file, +_EXIT_NUM_COUNT_FILE = /tmp/cantian_ut/data/exit_num.txt +_CMS_MES_THREAD_NUM = 5 +_CMS_MES_MAX_SESSION_NUM = 40 +_CMS_MES_MESSAGE_POOL_COUNT = 1 +_CMS_MES_MESSAGE_QUEUE_COUNT = 1 +_CMS_MES_MESSAGE_BUFF_COUNT = 4096 +_CMS_MES_MESSAGE_CHANNEL_NUM = 1 +_CMS_NODE_FAULT_THRESHOLD = 5 +_USE_DBSTOR = FALSE +_CMS_MES_PIPE_TYPE = TCP +_CMS_MES_CRC_CHECK_SWITCH = TRUE diff --git a/pkg/deploy/action/cantian/appctl.sh b/pkg/deploy/action/cantian/appctl.sh index 854d6953d8c76ada97907b88e3c34d935cf1571e..0f0d465710a7ee15bad4febe7c5a420f49b01445 100644 --- a/pkg/deploy/action/cantian/appctl.sh +++ b/pkg/deploy/action/cantian/appctl.sh @@ -709,13 +709,13 @@ function main_deploy() { pre_install) check_old_install chown_mod_scripts - init_cpu_config do_deploy ${PRE_INSTALL_NAME} ${INSTALL_TYPE} exit $? ;; install) copy_cantian_scripts create_mysql_dir + init_cpu_config do_deploy ${INSTALL_NAME} ${INSTALL_TYPE} exit $? ;; diff --git a/pkg/deploy/action/cantian/bind_cpu_config.py b/pkg/deploy/action/cantian/bind_cpu_config.py index 8be67e047b96e9d9678ca3e01b9f60d419af817b..87458d73cfbd598dac473a917db99de8e3460160 100644 --- a/pkg/deploy/action/cantian/bind_cpu_config.py +++ b/pkg/deploy/action/cantian/bind_cpu_config.py @@ -11,10 +11,9 @@ import grp from log import LOGGER from get_config_info import get_value sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) -from update_config import update_dbstore_conf +from update_config import update_dbstor_conf -# 需要的路径和配置 -CONFIG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "cpu_bind_config.json") +CONFIG_PATH = "/opt/cantian/action/cantian/cpu_bind_config.json" CPU_CONFIG_INFO = "/opt/cantian/cantian/cfg/cpu_config.json" CONFIG_DIR = "/mnt/dbdata/local/cantian/tmp/data" XNET_MODULE = "NETWORK_BIND_CPU" @@ -199,6 +198,36 @@ class NumaConfigBase: err_msg = "ERROR: cpu_bind_config.json does not exist" raise Exception(err_msg) + def get_module_bind_cpu_list(self, module_thread_num): + """ 获取模块绑核的 CPU 列表 """ + result_ranges = [] + count = module_thread_num + + numa_pointer = {numa_id: 0 for numa_id in self.available_cpu_for_binding_dict} + + # 过滤掉已经绑定的 CPU + if self.bind_cpu_list: + available_cpu_for_binding_filtered = { + numa_id: [cpu for cpu in available_cpu_list if cpu not in self.bind_cpu_list] + for numa_id, available_cpu_list in self.available_cpu_for_binding_dict.items() + } + else: + available_cpu_for_binding_filtered = self.available_cpu_for_binding_dict + + while count > 0: + for numa_id, available_cpu_list in available_cpu_for_binding_filtered.items(): + if numa_pointer[numa_id] < len(available_cpu_list): + result_ranges.append(available_cpu_list[numa_pointer[numa_id]]) + numa_pointer[numa_id] += 1 + count -= 1 + if count == 0: + break + + for numa_id, available_cpu_list in available_cpu_for_binding_filtered.items(): + self.available_cpu_for_binding_dict[numa_id] = available_cpu_list[numa_pointer[numa_id]:] + + return result_ranges + def update_dbstor_config_file(self, cpu_config_info): """ Modifies the dbstor configuration file based on the provided CPU configuration. @@ -210,9 +239,9 @@ class NumaConfigBase: if module_id_key in cpu_config_info and cpu_config_info[module_id_key]: cpu_info = cpu_list_to_cpu_info(cpu_config_info[module_id_key]) - update_dbstore_conf("add", dbstor_file_key, cpu_info) + update_dbstor_conf("add", dbstor_file_key, cpu_info) else: - update_dbstore_conf("remove", dbstor_file_key, None) + update_dbstor_conf("remove", dbstor_file_key, None) def update_cantian_config_file(self, cantiand_cpu_info): """ @@ -353,8 +382,17 @@ class PhysicalCpuConfig(NumaConfigBase): f"Cannot use CPUs outside the available range or in 0-5.") LOGGER.error(err_msg) raise Exception(err_msg) + + duplicate_cpus = set(manually_configured_cpus) & set(self.bind_cpu_list) + if duplicate_cpus: + err_msg = (f"Currently bound CPUs: {self.bind_cpu_list}. " + f"Conflict in CPU binding for {module_id_key}: CPUs {duplicate_cpus} are already bound.") + LOGGER.error(err_msg) + raise Exception(err_msg) LOGGER.info(f"{module_id_key} is manually configured, skipping CPU binding generation.") bind_cpu_list.extend(manually_configured_cpus) + self.bind_cpu_dict[module_id_key] = ",".join(map(str, manually_configured_cpus)) + self.bind_cpu_list = bind_cpu_list continue if not module_info: @@ -374,29 +412,7 @@ class PhysicalCpuConfig(NumaConfigBase): module_cpu_list = self.get_module_bind_cpu_list(module_info) bind_cpu_list.extend(module_cpu_list) self.bind_cpu_dict[module_id_key] = ",".join(map(str, module_cpu_list)) - - self.bind_cpu_list = bind_cpu_list - - def get_module_bind_cpu_list(self, module_thread_num): - """ 获取模块绑核的 CPU 列表 """ - result_ranges = [] - count = module_thread_num - - numa_pointer = {numa_id: 0 for numa_id in self.available_cpu_for_binding_dict} - - while count > 0: - for numa_id, available_cpu_list in self.available_cpu_for_binding_dict.items(): - if numa_pointer[numa_id] < len(available_cpu_list): - result_ranges.append(available_cpu_list[numa_pointer[numa_id]]) - numa_pointer[numa_id] += 1 - count -= 1 - if count == 0: - break - - for numa_id, available_cpu_list in self.available_cpu_for_binding_dict.items(): - self.available_cpu_for_binding_dict[numa_id] = available_cpu_list[numa_pointer[numa_id]:] - - return result_ranges + self.bind_cpu_list = bind_cpu_list def get_mysql_cpu_info(self): """ @@ -520,27 +536,6 @@ class ContainerCpuConfig(NumaConfigBase): return bind_cpu_list - def get_module_bind_cpu_list(self, module_thread_num): - """ 轮流选 CPU 进行绑核 """ - result_ranges = [] - count = module_thread_num - - numa_pointer = {numa_id: 0 for numa_id in self.available_cpu_for_binding_dict} - - while count > 0: - for numa_id, available_cpu_list in self.available_cpu_for_binding_dict.items(): - if numa_pointer[numa_id] < len(available_cpu_list): - result_ranges.append(available_cpu_list[numa_pointer[numa_id]]) - numa_pointer[numa_id] += 1 - count -= 1 - if count == 0: - break - - for numa_id, available_cpu_list in self.available_cpu_for_binding_dict.items(): - self.available_cpu_for_binding_dict[numa_id] = available_cpu_list[numa_pointer[numa_id]:] - - return result_ranges - def get_mysql_cpu_info(self): """ 获取 MYSQL_CPU_INFO:容器中的 NUMA 节点信息 diff --git a/pkg/deploy/action/cantian/cantian_install.py b/pkg/deploy/action/cantian/cantian_install.py index 9f44f30d6d6ea98ada66e7718b67ca7dd7b5c7cc..b8f854e2655305718d1b557d41c3659f9f0b7b45 100644 --- a/pkg/deploy/action/cantian/cantian_install.py +++ b/pkg/deploy/action/cantian/cantian_install.py @@ -301,9 +301,9 @@ def load_config_param(json_data): if json_data.get('cantian_in_container', 0) == '1': g_opts.cantian_in_container = True global DEPLOY_MODE - DEPLOY_MODE = json_data.get("deploy_mode", "").strip() + DEPLOY_MODE = get_value("deploy_mode") g_opts.db_type = json_data.get('db_type', '').strip() - g_opts.storage_dbstore_fs = json_data.get("storage_dbstore_fs", "").strip() + g_opts.storage_dbstor_fs = json_data.get("storage_dbstor_fs", "").strip() g_opts.storage_share_fs = json_data.get('storage_share_fs', "").strip() g_opts.namespace = json_data.get('cluster_name', 'test1').strip() g_opts.share_logic_ip = json_data.get('share_logic_ip', '127.0.0.1').strip() if DEPLOY_MODE == "file" else None @@ -1041,7 +1041,7 @@ class Installer: self.cantiand_configs["ARCHIVE_DEST_1"] = g_opts.archive_location self.cantiand_configs["MAX_ARCH_FILES_SIZE"] = g_opts.max_arch_files_size self.cantiand_configs["CLUSTER_ID"] = g_opts.cluster_id - self.add_config_for_dbstore() + self.add_config_for_dbstor() self.ssl_path = os.path.join(self.install_path, "sslkeys") self.show_parse_result() @@ -1063,7 +1063,7 @@ class Installer: LOGGER.info("Using set cantiand config parameters : " + str(conf_parameters)) LOGGER.info("End check parameters.") - def add_config_for_dbstore(self): + def add_config_for_dbstor(self): self.cantiand_configs["CONTROL_FILES"] = "{0}, {1}, {2}".format(os.path.join(self.data, "data/ctrl1"), os.path.join(self.data, "data/ctrl2"), os.path.join(self.data, "data/ctrl3")) @@ -1080,7 +1080,7 @@ class Installer: else: self.cantiand_configs["ENABLE_DBSTOR"] = "FALSE" self.cantiand_configs["SHARED_PATH"] = \ - '/mnt/dbdata/remote/storage_{}/data'.format(g_opts.storage_dbstore_fs) + '/mnt/dbdata/remote/storage_{}/data'.format(g_opts.storage_dbstor_fs) def parse_key_and_value(self): flags = os.O_RDONLY @@ -2410,7 +2410,7 @@ class Installer: # create data, cfg, log dir, trc data_dir = "%s/data" % self.data if not g_opts.use_dbstor: - mount_storage_data = f"/mnt/dbdata/remote/storage_{g_opts.storage_dbstore_fs}/data" + mount_storage_data = f"/mnt/dbdata/remote/storage_{g_opts.storage_dbstor_fs}/data" cmd = "ln -s %s %s;" % (mount_storage_data, self.data) ret_code, _, stderr = _exec_popen(cmd) if ret_code: @@ -2734,7 +2734,7 @@ class Installer: self.cantiand_configs["SHARED_PATH"] = "+vg1" else: self.cantiand_configs["SHARED_PATH"] = '/mnt/dbdata/remote/storage_{}/data'.format( - g_opts.storage_dbstore_fs) + g_opts.storage_dbstor_fs) # clean old backup log # backup log file before rm data @@ -3420,10 +3420,10 @@ def check_archive_dir(): raise Exception(err_msg) else: if any("arch" in line and (".arc" in line or "arch_file.tmp" in line) for line in output.splitlines()): - err_msg = "Archive files found in dbstore: %s" % output + err_msg = "Archive files found in dbstor: %s" % output LOGGER.error(err_msg) raise Exception(err_msg) - log("Checked the archive status in dbstore.") + log("Checked the archive status in dbstor.") class CanTian(object): @@ -3484,11 +3484,20 @@ class CanTian(object): if ret_code: LOGGER.error("can not get pid of cantiand or mysqld, command: %s, err: %s" % (cmd, stderr)) raise Exception("can not get pid of cantiand or mysqld, command: %s, err: %s" % (cmd, stderr)) - cantiand_pid = cantiand_pid.strip(" ") - if cantiand_pid is not None and len(cantiand_pid) > 0: - cmd = "echo 0x6f > " + sep_mark + "proc" + sep_mark + str(cantiand_pid) + \ - sep_mark + "coredump_filter" - ret_code, cantiand_pid, stderr = _exec_popen(cmd) + + cantiand_pids = cantiand_pid.strip().split() + if len(cantiand_pids) > 1: + error_message = (f"Detected multiple cantiand/mysqld processes ({', '.join(cantiand_pids)}). " + f"Please manually clean up the abnormal processes and retry.") + LOGGER.error(error_message) + raise Exception(error_message) + + cantiand_pid = cantiand_pids[0] + coredump_filter_path = os.path.join(sep_mark, "proc", str(cantiand_pid), "coredump_filter") + + if cantiand_pid: + cmd = f"echo 0x6f > {coredump_filter_path}" + ret_code, _, stderr = _exec_popen(cmd) if ret_code: LOGGER.error("can not set coredump_filter, command: %s, err: %s" % (cmd, stderr)) raise Exception("can not set coredump_filter, command: %s, err: %s" % (cmd, stderr)) diff --git a/pkg/deploy/action/cantian/get_config_info.py b/pkg/deploy/action/cantian/get_config_info.py index d2813870e3b0aadac0fe3676e928ad5aca81f51e..1f836d9be09463f2a5b57fb279270e3c5bd7f47b 100644 --- a/pkg/deploy/action/cantian/get_config_info.py +++ b/pkg/deploy/action/cantian/get_config_info.py @@ -142,7 +142,11 @@ def get_value(param): if param in kernel_params_list: return info_cantian_config.get(param, "") if param in mysql_kernel_params_list: - return info_cantian_config.get(param, "") + return info_cantian_config.get(param, "") + if param == "deploy_mode": + if info.get('deploy_mode', ""): + return info.get('deploy_mode') + return "dbstor" if info.get('deploy_policy', "") in ["ModeB", "ModeC"] else "file" return info.get(param, "") diff --git a/pkg/deploy/action/cantian/init_container.sh b/pkg/deploy/action/cantian/init_container.sh index f6e4de2f792853032d4e9bac3cd833b2949c0dca..affabc1bf32450db54319af60aba81a62fe71cfc 100644 --- a/pkg/deploy/action/cantian/init_container.sh +++ b/pkg/deploy/action/cantian/init_container.sh @@ -21,7 +21,7 @@ cantian_user=`python3 ${CURRENT_PATH}/get_config_info.py "deploy_user"` archive_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_archive_fs"` cluster_id=`python3 ${CURRENT_PATH}/get_config_info.py "cluster_id"` deploy_mode=`python3 ${CURRENT_PATH}/get_config_info.py "deploy_mode"` -storage_dbstore_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstore_fs"` +storage_dbstor_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstor_fs"` cluster_name=`python3 ${CURRENT_PATH}/get_config_info.py "cluster_name"` max_arch_files_size=`python3 ${CURRENT_PATH}/get_config_info.py "MAX_ARCH_FILES_SIZE"` cms_ip=`python3 ${CURRENT_PATH}/get_config_info.py "cms_ip"` @@ -88,7 +88,7 @@ function set_cantian_config() { elif [[ "$deploy_mode" == "file" ]]; then sed -i -r "s/(ENABLE_DBSTOR = ).*/\1FALSE/" ${CONFIG_PATH}/${CANTIAN_CONFIG_NAME} sed -i -r "s/(INTERCONNECT_TYPE = ).*/\1TCP/" ${CONFIG_PATH}/${CANTIAN_CONFIG_NAME} - sed -i "s|SHARED_PATH.*|SHARED_PATH = /mnt/dbdata/remote/storage_${storage_dbstore_fs}/data|g" ${CONFIG_PATH}/${CANTIAN_CONFIG_NAME} + sed -i "s|SHARED_PATH.*|SHARED_PATH = /mnt/dbdata/remote/storage_${storage_dbstor_fs}/data|g" ${CONFIG_PATH}/${CANTIAN_CONFIG_NAME} else echo "Unknown deployment mode: $deploy_mode" exit 1 diff --git a/pkg/deploy/action/cantian/install.sh b/pkg/deploy/action/cantian/install.sh index c0839c675e9b27ef1fce08e9d4e6a6b7654a0836..2747dc3c42f03bbabc9fe6d0f93b3624b0f5d553 100644 --- a/pkg/deploy/action/cantian/install.sh +++ b/pkg/deploy/action/cantian/install.sh @@ -66,6 +66,7 @@ function cantian_install() cp -rf ${RPM_UNPACK_PATH}/add-ons /opt/cantian/cantian/server/ cp -rf ${RPM_UNPACK_PATH}/bin /opt/cantian/cantian/server/ + rm -rf /opt/cantian/cantian/server/bin/cms cp -rf ${RPM_UNPACK_PATH}/lib /opt/cantian/cantian/server/ cp -rf ${RPM_UNPACK_PATH}/admin /opt/cantian/cantian/server/ cp -rf ${RPM_UNPACK_PATH}/cfg /opt/cantian/cantian/server/ diff --git a/pkg/deploy/action/cantian/install_config.json b/pkg/deploy/action/cantian/install_config.json index 55106b743593f984b1a4c52450a2eb96f41aedb9..232a91d7f5087af6f64efb3d87001b019afb05e9 100644 --- a/pkg/deploy/action/cantian/install_config.json +++ b/pkg/deploy/action/cantian/install_config.json @@ -3,7 +3,7 @@ "D_DATA_PATH": "/mnt/dbdata/local/cantian/tmp/data", "l_LOG_FILE": "/opt/cantian/log/cantian/cantian_deploy.log", "M_RUNING_MODE": "cantiand_in_cluster", - "USE_DBSTORE": "--dbstor", + "USE_DBSTOR": "--dbstor", "p_PACKAGE_AND_VERSION": "-P", "Z_KERNEL_PARAMETER1": "CHECKPOINT_PERIOD=1", "Z_KERNEL_PARAMETER2": "OPTIMIZED_WORKER_THREADS=2000", diff --git a/pkg/deploy/action/cantian/installdb.sh b/pkg/deploy/action/cantian/installdb.sh index f4356bab6dcbceb0e98d1ea8f35a053ffb626ee4..fbc7e75bf96126b38bf46825d7f7a3defe734bee 100644 --- a/pkg/deploy/action/cantian/installdb.sh +++ b/pkg/deploy/action/cantian/installdb.sh @@ -73,6 +73,12 @@ function wait_node0_online() { wait_for_success 5400 is_db0_online_by_cms } +function dss_reghl() { + log "start register node ${NODE_ID} by dss" + dsscmd reghl -D ${DSS_HOME} >> /dev/null 2>&1 + if [ $? != 0 ]; then err "failed to register node ${NODE_ID} by dss"; fi +} + function start_cantiand() { log "================ start cantiand ${NODE_ID} ================" ever_started=`python3 ${CURRENT_PATH}/get_config_info.py "CANTIAN_EVER_START"` @@ -92,14 +98,16 @@ function start_cantiand() { fi fi set -e - ever_started=`python3 ${CURRENT_PATH}/get_config_info.py "CANTIAN_EVER_START"` if [ "${NODE_ID}" != 0 ] && [ "${ever_started}" != "True" ]; then wait_node0_online || err "timeout waiting for node0" sleep 60 fi log "Start cantiand with mode=${START_MODE}, CTDB_HOME=${CTDB_HOME}, RUN_MODE=${RUN_MODE}" - + if [ ${deploy_mode} == "dss" ]; then + dss_reghl + fi + if [ "${RUN_MODE}" == "cantiand_with_mysql" ] || [ "${RUN_MODE}" == "cantiand_with_mysql_st" ] || [ "${RUN_MODE}" == "cantiand_with_mysql_in_cluster" ]; then if [ ! -f "${MYSQL_CONFIG_FILE}" ]; then err "Invalid mysql config file: ${MYSQL_CONFIG_FILE}" diff --git a/pkg/deploy/action/cantian/options.py b/pkg/deploy/action/cantian/options.py index a477ea3ef42a5f7cf2ce495103bf76a7ed02a1ea..66344a59ab4be22627ef894396464d7eed7c65b1 100644 --- a/pkg/deploy/action/cantian/options.py +++ b/pkg/deploy/action/cantian/options.py @@ -73,4 +73,4 @@ class Options(object): self.cert_encrypt_pwd = "" - self.storage_dbstore_fs = "" \ No newline at end of file + self.storage_dbstor_fs = "" \ No newline at end of file diff --git a/pkg/deploy/action/change_log_priority.sh b/pkg/deploy/action/change_log_priority.sh index fe63a51e7fc9d7d5b2cf6ac8dab60bb0a49391f5..ebc9c1dc7dd175b83288b805340b41f0b7f3242f 100644 --- a/pkg/deploy/action/change_log_priority.sh +++ b/pkg/deploy/action/change_log_priority.sh @@ -20,16 +20,10 @@ su - "${cantian_user}" -s /bin/bash -c "find /opt/cantian/log/ct_om -type f -pri su - "${cantian_user}" -s /bin/bash -c "find /opt/cantian/log/deploy -type f -print0 | xargs -0 chmod 660" > /dev/null 2>&1 su - "${cantian_user}" -s /bin/bash -c "find /opt/cantian/log/logicrep -type f -print0 | xargs -0 chmod 660" > /dev/null 2>&1 su - "${cantian_user}" -s /bin/bash -c "find /opt/cantian/log/cantian_exporter -type f -print0 | xargs -0 chmod 660" > /dev/null 2>&1 -su - "${cantian_user}" -s /bin/bash -c "find /opt/cantian/log/mysql -type f -print0 | xargs -0 chmod 660" > /dev/null 2>&1 +su - "${deploy_user}" -s /bin/bash -c "find /opt/cantian/log/mysql -type f -print0 | xargs -0 chmod 660" > /dev/null 2>&1 su - "${cantian_user}" -s /bin/bash -c "find /opt/cantian/log/dbstor -type f -print0 | xargs -0 chmod 660" > /dev/null 2>&1 su - "${cantian_user}" -s /bin/bash -c "chgrp -R ${cantian_common_group} /mnt/dbdata/local/cantian/tmp/data/log/cantianstatus.log" > /dev/null 2>&1 su - "${cantian_user}" -s /bin/bash -c "chgrp -R ${cantian_common_group} /opt/cantian/log" > /dev/null 2>&1 -su - "${cantian_user}" -s /bin/bash -c "chown -R :${cantian_common_group} /opt/cantian/log" > /dev/null 2>&1 -su - "${cantian_user}" -s /bin/bash -c "chown -R :${cantian_common_group} /opt/cantian/dbstor/" > /dev/null 2>&1 -su - "${cantian_user}" -s /bin/bash -c "chown -R :${cantian_common_group} /opt/cantian/cantian/" > /dev/null 2>&1 -su - "${cantian_user}" -s /bin/bash -c "chown -R :${cantian_common_group} /opt/cantian/cms/" > /dev/null 2>&1 -su - "${cantian_user}" -s /bin/bash -c "chown -R :${cantian_common_group} /opt/cantian/logicrep" > /dev/null 2>&1 -su - "${cantian_user}" -s /bin/bash -c "chown -R :${cantian_common_group} /opt/cantian/mysql" > /dev/null 2>&1 - +su - "${deploy_user}" -s /bin/bash -c "chgrp -R ${cantian_common_group} /opt/cantian/log/mysql" > /dev/null 2>&1 diff --git a/pkg/deploy/action/cms/cms_start2.sh b/pkg/deploy/action/cms/cms_start2.sh index 894079451831315bec34da7e183804d65b6c1b65..d539c517b70b5110f819685f612d00f9f4a3a4c3 100644 --- a/pkg/deploy/action/cms/cms_start2.sh +++ b/pkg/deploy/action/cms/cms_start2.sh @@ -66,6 +66,11 @@ start_cms() { cms_srv_pid=$! log "=========== wait for cms server start, pid[${cms_srv_pid}]================" wait_for_cms_start + cms_if_use_dss=`cms res -list | grep "dss" | wc -l` + if [ $cms_if_use_dss -ne 0 ]; then + log "=========== start dss ${NODE_ID} ================" + cms res -start dss -node ${NODE_ID} + fi log "=========== start cantian ${NODE_ID} ================" cms res -start db -node ${NODE_ID} } diff --git a/pkg/deploy/action/cms/cmsctl.py b/pkg/deploy/action/cms/cmsctl.py index 54050c606f5d1b464b82e4bc7baac88e050926db..5bafe0b2c7843c02debbe71c6479210c0c3ddd32 100644 --- a/pkg/deploy/action/cms/cmsctl.py +++ b/pkg/deploy/action/cms/cmsctl.py @@ -1475,7 +1475,7 @@ class CmsCtl(object): if ret_code == 0: LOGGER.info("clean gcc home cmd : %s" % str_cmd) ret_code, stdout, stderr = _exec_popen(str_cmd) - if ret_code and deploy_mode not in USE_DBSTOR and self.install_step < 2: + if ret_code and deploy_mode in USE_DBSTOR and self.install_step < 2: LOGGER.info("cms install failed, no need to clean gcc file") elif ret_code: output = stdout + stderr diff --git a/pkg/deploy/action/cms/get_config_info.py b/pkg/deploy/action/cms/get_config_info.py index 4edc4838112a746a955885b84374628125dcc0c1..3bc1a92218d442eeaa5e07558417b7c87337705a 100644 --- a/pkg/deploy/action/cms/get_config_info.py +++ b/pkg/deploy/action/cms/get_config_info.py @@ -33,6 +33,11 @@ def get_value(param): cms_conf = json.loads(file.read()) return cms_conf.get("install_step") + if param == "deploy_mode": + if info.get('deploy_mode', ""): + return info.get('deploy_mode') + return "dbstor" if info.get('deploy_policy', "") in ["ModeB", "ModeC"] else "file" + return info.get(param, "") diff --git a/pkg/deploy/action/config_params.json b/pkg/deploy/action/config_params.json index 96502db8edc983c328484bf7d43685eab23a3cd0..f3ef500c9f7ba956a732a6d1bead516672846255 100644 --- a/pkg/deploy/action/config_params.json +++ b/pkg/deploy/action/config_params.json @@ -8,11 +8,11 @@ "cantian_vlan_ip": "10.77.77.100;10.77.77.101", "storage_vlan_ip": "10.77.77.102", "cms_ip": "192.168.86.1;192.168.86.2", - "storage_dbstore_fs": "foo", - "storage_dbstore_page_fs": "foo_page", + "storage_dbstor_fs": "foo", + "storage_dbstor_page_fs": "foo_page", "storage_share_fs": "share", "storage_archive_fs": "archive", - "dbstore_fs_vstore_id": "0", + "dbstor_fs_vstore_id": "0", "mes_type": "UC", "mes_ssl_switch": true, "MAX_ARCH_FILES_SIZE": "300G", @@ -28,7 +28,7 @@ "dm_user": "", "esn": "", "pool_id": "", - "dbstore_fs_vstore_id": "" + "dbstor_fs_vstore_id": "" } } } \ No newline at end of file diff --git a/pkg/deploy/action/config_params_file.json b/pkg/deploy/action/config_params_file.json index 92a8adb44f798784273d1c6f9be98655b89cbd48..8064a59092dfd83db30a7894b1d1f95848118426 100644 --- a/pkg/deploy/action/config_params_file.json +++ b/pkg/deploy/action/config_params_file.json @@ -3,7 +3,7 @@ "deploy_user": "ctdba:ctdba", "node_id": "0", "cms_ip": "192.168.86.1;192.168.86.2", - "storage_dbstore_fs": "foo", + "storage_dbstor_fs": "foo", "storage_share_fs": "share", "storage_archive_fs": "archive", "storage_metadata_fs": "metadata", diff --git a/pkg/deploy/action/dbstor/check_usr_pwd.sh b/pkg/deploy/action/dbstor/check_usr_pwd.sh index 6c5256a88e6a98d5737a55e90f2ebeb64eb99192..e0a3cb8a4a3cd26afc2aa35ff52aeff04d50a2ce 100644 --- a/pkg/deploy/action/dbstor/check_usr_pwd.sh +++ b/pkg/deploy/action/dbstor/check_usr_pwd.sh @@ -46,9 +46,9 @@ function check_dr_status_in_container() function execute_dbstor_query_file() { local fs=$(python3 ${CURRENT_PATH}/../cantian/get_config_info.py "$1") - if [ $1 == "storage_dbstore_fs" ];then - local dbstore_fs_vstore_id=$(python3 ${CURRENT_PATH}/../cantian/get_config_info.py "dbstore_fs_vstore_id") - out=$(/opt/cantian/image/Cantian-RUN-CENTOS-64bit/bin/dbstor --query-fs-info --fs-name="${fs}" --vstore_id="${dbstore_fs_vstore_id}" ) + if [ $1 == "storage_dbstor_fs" ];then + local dbstor_fs_vstore_id=$(python3 ${CURRENT_PATH}/../cantian/get_config_info.py "dbstor_fs_vstore_id") + out=$(/opt/cantian/image/Cantian-RUN-CENTOS-64bit/bin/dbstor --query-fs-info --fs-name="${fs}" --vstore_id="${dbstor_fs_vstore_id}" ) result=$? else out=$(/opt/cantian/image/Cantian-RUN-CENTOS-64bit/bin/dbstor --query-fs-info --fs-name="${fs}" --vstore_id=0 ) @@ -66,8 +66,8 @@ function execute_dbstor_query_file() else fs_mode=$(echo ${out} | grep -c "fs_mode = 1") if [[ ${fs_mode} -gt 0 ]];then - if [ $1 == "storage_dbstore_fs" ];then - out=$(/opt/cantian/image/Cantian-RUN-CENTOS-64bit/bin/dbstor --query-file --fs-name="${fs}" --vstore_id="${dbstore_fs_vstore_id}" ) + if [ $1 == "storage_dbstor_fs" ];then + out=$(/opt/cantian/image/Cantian-RUN-CENTOS-64bit/bin/dbstor --query-file --fs-name="${fs}" --vstore_id="${dbstor_fs_vstore_id}" ) return_code=$? echo ${out} >> /opt/cantian/log/dbstor/install.log if [[ ${return_code} -ne 0 ]];then @@ -90,8 +90,8 @@ function check_file_system() check_dr_status_in_container dr_stat=$? if [[ ! -f "${CURRENT_PATH}/../../config/dr_deploy_param.json" ]] && [[ ${dr_stat} -ne 1 ]];then - execute_dbstor_query_file "storage_dbstore_fs" - execute_dbstor_query_file "storage_dbstore_page_fs" + execute_dbstor_query_file "storage_dbstor_fs" + execute_dbstor_query_file "storage_dbstor_page_fs" fi echo "File system check pass" >> /opt/cantian/log/dbstor/install.log } diff --git a/pkg/deploy/action/dbstor/dbstor_install.py b/pkg/deploy/action/dbstor/dbstor_install.py index a708f54faf1c0c1663b16de70e2c4e1137209e3f..43cbd1540e39164d8dfb6f800f6907da5e5a799e 100644 --- a/pkg/deploy/action/dbstor/dbstor_install.py +++ b/pkg/deploy/action/dbstor/dbstor_install.py @@ -28,7 +28,6 @@ try: from kmc_adapter import CApiWrapper from init_unify_config import ConfigTool - DBSTORE_LOG_PATH = "/opt/cantian/dbstor" LOG_PATH = "/opt/cantian/log/dbstor" LOG_FILE = "/opt/cantian/log/dbstor/install.log" JS_CONF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json") @@ -283,7 +282,7 @@ class DBStor: self.cluster_name = "" self.cluster_id = "" self.cantian_in_container = "" - self.dbstore_fs_vstore_id = "0" + self.dbstor_fs_vstore_id = "0" self.dbstor_page_fs_vstore_id = "0" self.dbstor_home="/opt/cantian/dbstor" self.dbstor_log="/opt/cantian/log/dbstor" @@ -325,13 +324,13 @@ class DBStor: def read_dbstor_para(self): with os.fdopen(os.open(JS_CONF_FILE, os.O_RDONLY | os.O_EXCL, stat.S_IWUSR | stat.S_IRUSR), "r") as file_obj: json_data = json.load(file_obj) - self.dbstor_config['NAMESPACE_FSNAME'] = json_data.get('storage_dbstore_fs', "").strip() - self.dbstor_config['NAMESPACE_PAGE_FSNAME'] = json_data.get('storage_dbstore_page_fs', "").strip() + self.dbstor_config['NAMESPACE_FSNAME'] = json_data.get('storage_dbstor_fs', "").strip() + self.dbstor_config['NAMESPACE_PAGE_FSNAME'] = json_data.get('storage_dbstor_page_fs', "").strip() self.dbstor_config['LOCAL_IP'] = json_data.get('cantian_vlan_ip', "").strip() self.dbstor_config['REMOTE_IP'] = json_data.get('storage_vlan_ip', "").strip() self.dbstor_config['NODE_ID'] = json_data.get('node_id', "").strip() self.dbstor_config['LINK_TYPE'] = json_data.get('link_type', "").strip() - self.dbstor_config['LOG_VSTOR'] = json_data.get('dbstore_fs_vstore_id', "0").strip() + self.dbstor_config['LOG_VSTOR'] = json_data.get('dbstor_fs_vstore_id', "0").strip() self.dbstor_config['PAGE_VSTOR'] = json_data.get('dbstor_page_fs_vstore_id', "0").strip() if json_data.get('link_type', "").strip() != '0': self.dbstor_config['LINK_TYPE'] = '1' @@ -345,11 +344,11 @@ class DBStor: def check_dbstor_para(self): logger.info("Checking parameters.") if len(self.dbstor_config.get('NAMESPACE_FSNAME', "").strip()) == 0: - message = "The storage_dbstore_fs parameter is not entered" + message = "The storage_dbstor_fs parameter is not entered" console_and_log(message) raise ValueError(message) if len(self.dbstor_config.get('NAMESPACE_PAGE_FSNAME', "").strip()) == 0: - message = "The storage_dbstore_page_fs parameter is not entered" + message = "The storage_dbstor_page_fs parameter is not entered" console_and_log(message) raise ValueError(message) if len(self.dbstor_config.get('LOCAL_IP', "").strip()) == 0: @@ -593,7 +592,7 @@ class DBStor: value = conf.get(SECTION, option) self.dbstor_config[option.strip().upper()] = value.strip() if "LOG_VSTOR" not in self.dbstor_config.keys(): - self.dbstor_config["LOG_VSTOR"] = self.dbstore_fs_vstore_id + self.dbstor_config["LOG_VSTOR"] = self.dbstor_fs_vstore_id if "PAGE_VSTOR" not in self.dbstor_config.keys(): self.dbstor_config["PAGE_VSTOR"] = self.dbstor_page_fs_vstore_id if "DBS_LOG_PATH" not in self.dbstor_config.keys(): @@ -636,7 +635,7 @@ class DBStor: self.node_id = json_data.get('node_id', "").strip() self.cluster_id = json_data.get('cluster_id', "").strip() self.cantian_in_container = json_data.get('cantian_in_container', "0").strip() - self.dbstore_fs_vstore_id = json_data.get('dbstore_fs_vstore_id', "0").strip() + self.dbstor_fs_vstore_id = json_data.get('dbstor_fs_vstore_id', "0").strip() self.conf_file_path = "/opt/cantian/dbstor/tools" self.backup_conf_file = os.path.join(BACKUP_CONF_FILE, "dbstor_config.ini") self.cluster_name = json_data.get("cluster_name", '') diff --git a/pkg/deploy/action/dbstor/init_container.sh b/pkg/deploy/action/dbstor/init_container.sh index f860013146d6530d6ffb0b011599b44e9478ec91..2f5451c5bd16252663f9f1409edd38bcac987676 100644 --- a/pkg/deploy/action/dbstor/init_container.sh +++ b/pkg/deploy/action/dbstor/init_container.sh @@ -10,12 +10,12 @@ DBSTOR_PWD="dbstorPwd" function set_dbstor_config() { deploy_mode=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "deploy_mode"` - storage_dbstore_fs=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "storage_dbstore_fs"` - storage_dbstore_page_fs=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "storage_dbstore_page_fs"` + storage_dbstor_fs=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "storage_dbstor_fs"` + storage_dbstor_page_fs=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "storage_dbstor_page_fs"` link_type=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "link_type"` node_id=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "node_id"` cluster_id=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "cluster_id"` - log_vstor=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "dbstore_fs_vstore_id"` + log_vstor=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "dbstor_fs_vstore_id"` cantian_vlan_name=`python3 ${CURRENT_PATH}/../cantian/get_config_info.py "cantian_vlan_ip"` cantian_vlan_ip="" @@ -60,8 +60,8 @@ function set_dbstor_config() { dbstor_user=`cat ${DORADO_CONF_PATH}/${DBSTOR_USER}` dbstor_pwd=`cat ${DORADO_CONF_PATH}/${DBSTOR_PWD}` - sed -i -r "s:(NAMESPACE_FSNAME = ).*:\1${storage_dbstore_fs}:g" ${DBSTOR_CONFIG_PATH} - sed -i -r "s:(NAMESPACE_PAGE_FSNAME = ).*:\1${storage_dbstore_page_fs}:g" ${DBSTOR_CONFIG_PATH} + sed -i -r "s:(NAMESPACE_FSNAME = ).*:\1${storage_dbstor_fs}:g" ${DBSTOR_CONFIG_PATH} + sed -i -r "s:(NAMESPACE_PAGE_FSNAME = ).*:\1${storage_dbstor_page_fs}:g" ${DBSTOR_CONFIG_PATH} sed -i -r "s:(DPU_UUID = ).*:\1${dpu_uuid}:g" ${DBSTOR_CONFIG_PATH} sed -i -r "s:(LINK_TYPE = ).*:\1${link_type}:g" ${DBSTOR_CONFIG_PATH} sed -i -r "s:(LOCAL_IP = ).*:\1${cantian_vlan_ip}:g" ${DBSTOR_CONFIG_PATH} diff --git a/pkg/deploy/action/dbstor/init_unify_config.py b/pkg/deploy/action/dbstor/init_unify_config.py index 81a7b4ff33ca876c40761ed6e73795354f467440..bb223578213d2ac62da159647a032e188a8444f9 100644 --- a/pkg/deploy/action/dbstor/init_unify_config.py +++ b/pkg/deploy/action/dbstor/init_unify_config.py @@ -7,7 +7,7 @@ import logging import subprocess from configparser import ConfigParser -DBSTORE_LOG_PATH = "/opt/cantian/dbstor" + LOG_PATH = "/opt/cantian/log/dbstor" LOG_FILE = "/opt/cantian/log/dbstor/install.log" JS_CONF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../config/deploy_param.json") @@ -76,7 +76,7 @@ class ConfigTool: self.cluster_name = "" self.cluster_id = "" self.cantian_in_container = "" - self.dbstore_fs_vstore_id = "0" + self.dbstor_fs_vstore_id = "0" self.dbstor_page_fs_vstore_id = "0" self.dbstor_home="/opt/cantian/dbstor" self.dbstor_log_path="/opt/cantian/log/dbstor" @@ -99,7 +99,7 @@ class ConfigTool: self.node_id = json_data.get('node_id', "").strip() self.cluster_id = json_data.get('cluster_id', "").strip() self.cantian_in_container = json_data.get('cantian_in_container', "0").strip() - self.dbstore_fs_vstore_id = json_data.get('dbstore_fs_vstore_id', "0").strip() + self.dbstor_fs_vstore_id = json_data.get('dbstor_fs_vstore_id', "0").strip() self.cluster_name = json_data.get("cluster_name", '') def create_unify_dbstor_config(self): diff --git a/pkg/deploy/action/dbstor/update_dbstor_config.py b/pkg/deploy/action/dbstor/update_dbstor_config.py index 17e2859ab656ce08909d91a29c172b02c6737411..eb3fcc72016025e29673f4f156000c0ba9660fd4 100644 --- a/pkg/deploy/action/dbstor/update_dbstor_config.py +++ b/pkg/deploy/action/dbstor/update_dbstor_config.py @@ -237,9 +237,9 @@ def parse_parameter(): def check_parameter(): console_and_log("Checking parameters.") if len(db_opts.dbstor_config.get("NAMESPACE_FSNAME", "").strip()) == 0: - log_exit("The storage_dbstore_fs parameter is not entered") + log_exit("The storage_dbstor_fs parameter is not entered") if len(db_opts.dbstor_config.get("NAMESPACE_PAGE_FSNAME", "").strip()) == 0: - log_exit("The storage_dbstore_page_fs parameter is not entered") + log_exit("The storage_dbstor_page_fs parameter is not entered") if len(db_opts.dbstor_config.get("DPU_UUID", "").strip()) == 0: log_exit("The uuid parameter is not exist") if len(db_opts.dbstor_config.get("LOCAL_IP", "").strip()) == 0: diff --git a/pkg/deploy/action/docker/cantian_initer.sh b/pkg/deploy/action/docker/cantian_initer.sh index 63d9e22f6a490e8346d92480a343c235ebfb959b..8cee9d1f00c142860c4639790e96f816f7707d73 100644 --- a/pkg/deploy/action/docker/cantian_initer.sh +++ b/pkg/deploy/action/docker/cantian_initer.sh @@ -175,8 +175,8 @@ function mount_fs() { chown ${deploy_user}:${cantian_common_group} /mnt/dbdata/remote/metadata_${storage_metadata_fs}/node1 chmod 755 /mnt/dbdata/remote # 多租会缺少这个标记文件,这里补上 - DEPLOY_MODE_DBSTORE_UNIFY_FLAG=/opt/cantian/log/deploy/.dbstor_unify_flag - touch "${DEPLOY_MODE_DBSTORE_UNIFY_FLAG}" + DEPLOY_MODE_DBSTOR_UNIFY_FLAG=/opt/cantian/log/deploy/.dbstor_unify_flag + touch "${DEPLOY_MODE_DBSTOR_UNIFY_FLAG}" return 0 fi logAndEchoInfo "Begin to mount file system. [Line:${LINENO}, File:${SCRIPT_NAME}]" diff --git a/pkg/deploy/action/docker/config/configMap-file.yaml b/pkg/deploy/action/docker/config/configMap-file.yaml index 1e49716c5c3c50ecda4f78319be434a505aa159d..92e07ed816dd6fb6b3a928c8109964c91381051e 100644 --- a/pkg/deploy/action/docker/config/configMap-file.yaml +++ b/pkg/deploy/action/docker/config/configMap-file.yaml @@ -10,7 +10,7 @@ data: "deploy_user": "ctdba:ctdba", "node_id": "0", "cms_ip": "192.168.86.1;192.168.86.2", - "storage_dbstore_fs": "foo", + "storage_dbstor_fs": "foo", "storage_share_fs": "share", "storage_archive_fs": "archive", "storage_metadata_fs": "metadata", @@ -40,7 +40,7 @@ data: "deploy_user": "ctdba:ctdba", "node_id": "1", "cms_ip": "192.168.86.1;192.168.86.2", - "storage_dbstore_fs": "foo", + "storage_dbstor_fs": "foo", "storage_share_fs": "share", "storage_archive_fs": "archive", "storage_metadata_fs": "metadata", diff --git a/pkg/deploy/action/docker/config/configMap.yaml b/pkg/deploy/action/docker/config/configMap.yaml index 6e6f8d4c3e2f9ca78acdc2af21022ff5c0076427..ddc4997df6bd4934620cee60cddb4ea5c6ea69c7 100644 --- a/pkg/deploy/action/docker/config/configMap.yaml +++ b/pkg/deploy/action/docker/config/configMap.yaml @@ -15,11 +15,11 @@ data: "cantian_vlan_ip": "net1|net2", "storage_vlan_ip": "66.66.66.10;66.66.66.11|77.77.77.10;77.77.77.11", "cms_ip": "cantian-1-node0.cantian-svc-1.cantian.svc.cluster.local;cantian-1-node1.cantian-svc-1.cantian.svc.cluster.local", - "storage_dbstore_fs": "foo", - "storage_dbstore_page_fs": "foo_page", + "storage_dbstor_fs": "foo", + "storage_dbstor_page_fs": "foo_page", "storage_share_fs": "share", "storage_archive_fs": "archive", - "dbstore_fs_vstore_id": "0", + "dbstor_fs_vstore_id": "0", "mes_type": "UC", "mes_ssl_switch": true, "MAX_ARCH_FILES_SIZE": "300G", @@ -38,7 +38,7 @@ data: "dm_user": "", "esn": "", "pool_id": "", - "dbstore_fs_vstore_id": "" + "dbstor_fs_vstore_id": "" } } } @@ -61,11 +61,11 @@ data: "cantian_vlan_ip": "net1|net2", "storage_vlan_ip": "66.66.66.10;66.66.66.11|77.77.77.10;77.77.77.11", "cms_ip": "cantian-1-node0.cantian-svc-1.cantian.svc.cluster.local;cantian-1-node1.cantian-svc-1.cantian.svc.cluster.local", - "storage_dbstore_fs": "foo", - "storage_dbstore_page_fs": "foo_page", + "storage_dbstor_fs": "foo", + "storage_dbstor_page_fs": "foo_page", "storage_share_fs": "share", "storage_archive_fs": "archive", - "dbstore_fs_vstore_id": "0", + "dbstor_fs_vstore_id": "0", "mes_type": "UC", "mes_ssl_switch": true, "MAX_ARCH_FILES_SIZE": "300G", @@ -84,7 +84,7 @@ data: "dm_user": "", "esn": "", "pool_id": "", - "dbstore_fs_vstore_id": "" + "dbstor_fs_vstore_id": "" } } } diff --git a/pkg/deploy/action/docker/dr_deploy.py b/pkg/deploy/action/docker/dr_deploy.py index 21219b04bfba70f9f4d0f64a72b355ada59faacc..2f697a483968c33883823e6da30cda0ad6a6d534 100644 --- a/pkg/deploy/action/docker/dr_deploy.py +++ b/pkg/deploy/action/docker/dr_deploy.py @@ -129,8 +129,8 @@ def execute_command(command, raise_flag=False, timeout=None): def get_dm_password(): password_file = os.path.join(DORADO_CONF_PATH, DM_PWD) if not os.path.exists(password_file): - LOG.error("DM password file not found.") - raise Exception("get dm password file not found.") + LOG.error("DM file not found.") + raise Exception("get dm file not found.") encode_dm_password = get_file_content(password_file) decode_dm_password = resolve_kmc_pwd(encode_dm_password) @@ -141,7 +141,7 @@ def get_dr_status(dm_password=None): if not dm_password: dm_password = get_dm_password() if dm_password == "": - LOG.error("DM Password is empty.") + LOG.error("DM pass word is empty.") cmd = (f"echo -e '{dm_password}' | sh {SCRIPT_PATH}/appctl.sh dr_operate progress_query " f"--action=check --display=table 2>&1 | grep -E '^\-|^\|'") execute_command(cmd, raise_flag=True, timeout=30) @@ -200,8 +200,8 @@ def dr_deploy(role=None, dm_password=None, mysql_pwd='', delete_flag=False): if not dm_password: dm_password = get_dm_password() if dm_password == "": - LOG.error("DM Password is empty.") - raise Exception("get dm_password failed.") + LOG.error("DM Pass word is empty.") + raise Exception("get dm_pass word failed.") if not role: role = get_value("dr_deploy.role") cmd = (f"echo -e '{dm_password}' | sh {SCRIPT_PATH}/appctl.sh dr_operate pre_check {role} " @@ -328,7 +328,7 @@ def main(): mysql_pwd = '' delete_config = False if len(sys.argv) == 1: - mysql_pwd = getpass.getpass("Please input mysql login passwd:") + mysql_pwd = getpass.getpass("Please input mysql login password:") delete_config = True if len(sys.argv) > 1: diff --git a/pkg/deploy/action/docker/mount.sh b/pkg/deploy/action/docker/mount.sh index e6da29da4a5a8a65480c36a44d0fab55ace0ca3c..1b782529510f1d19c49b7e13cb5129551d233727 100644 --- a/pkg/deploy/action/docker/mount.sh +++ b/pkg/deploy/action/docker/mount.sh @@ -112,7 +112,7 @@ function mount_fs() { if [[ x"${deploy_mode}" == x"file" ]]; then share_logic_ip=`python3 ${CURRENT_PATH}/get_config_info.py "share_logic_ip"` - storage_dbstore_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstore_fs"` + storage_dbstor_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstor_fs"` storage_logic_ip=`python3 ${CURRENT_PATH}/get_config_info.py "storage_logic_ip"` # nas模式才挂载share nfs mount -t nfs -o vers=4.0,timeo=${NFS_TIMEO},nosuid,nodev ${share_logic_ip}:/${storage_share_fs} /mnt/dbdata/remote/share_${storage_share_fs} @@ -123,22 +123,22 @@ function mount_fs() { chown -hR "${cantian_user}":"${cantian_group}" /mnt/dbdata/remote/share_${storage_share_fs} > /dev/null 2>&1 checkMountNFS ${share_result} - mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" - mount -t nfs -o vers=4.0,timeo=${NFS_TIMEO},nosuid,nodev "${storage_logic_ip}":/"${storage_dbstore_fs}" /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" - dbstore_result=$? - if [ ${dbstore_result} -ne 0 ]; then - logAndEchoError "mount dbstore nfs failed" + mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" + mount -t nfs -o vers=4.0,timeo=${NFS_TIMEO},nosuid,nodev "${storage_logic_ip}":/"${storage_dbstor_fs}" /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" + dbstor_result=$? + if [ ${dbstor_result} -ne 0 ]; then + logAndEchoError "mount dbstor nfs failed" fi - chown "${cantian_user}":"${cantian_user}" /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" - checkMountNFS ${dbstore_result} + chown "${cantian_user}":"${cantian_user}" /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" + checkMountNFS ${dbstor_result} - mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/data - mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/share_data + mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/data + mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/share_data rm -rf /mnt/dbdata/local/cantian/tmp/data/data - ln -s /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/data/ /mnt/dbdata/local/cantian/tmp/data/data + ln -s /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/data/ /mnt/dbdata/local/cantian/tmp/data/data chown -h ${cantian_user}:${cantian_user} /mnt/dbdata/local/cantian/tmp/data/data - chown -h ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/data - chown -h ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/share_data + chown -h ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/data + chown -h ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/share_data fi # 检查nfs是否都挂载成功 diff --git a/pkg/deploy/action/dss/appctl.sh b/pkg/deploy/action/dss/appctl.sh index 68f5caded3436375421454e2f20b3fae84e9871e..98c5e062f3488f5b0fd52a5454f51500e5670097 100644 --- a/pkg/deploy/action/dss/appctl.sh +++ b/pkg/deploy/action/dss/appctl.sh @@ -65,8 +65,11 @@ function permission_opt() { chown -hR "${cantian_user}":"${cantian_group}" "${DSS_SOURCE}" chown "${cantian_user}":"${cantian_group}" "${CURRENT_PATH}"/* chown root:root "${CURRENT_PATH}"/appctl.sh - mkdir -p /opt/cantian/dss/log - touch /opt/cantian/dss/log/dss_deploy.log + mkdir -p /opt/cantian/log/dss + touch /opt/cantian/log/dss/dss_deploy.log + chmod -R 750 /opt/cantian/log/dss/ + chown -hR "${cantian_user}":"${cantian_group}" /opt/cantian/log/dss/ + mkdir -m 750 -p /opt/cantian/dss/ chown -hR "${cantian_user}":"${cantian_group}" /opt/cantian/dss/ } diff --git a/pkg/deploy/action/dss/dss_contrl.sh b/pkg/deploy/action/dss/dss_contrl.sh index 57d527664ee7381ba012d7e5fdb5623f6fda529f..250fc6ab88fd5c11cb880593d663c0fa559904aa 100644 --- a/pkg/deploy/action/dss/dss_contrl.sh +++ b/pkg/deploy/action/dss/dss_contrl.sh @@ -1,86 +1,41 @@ #!/bin/bash -export PATH=${GAUSSHOME}/bin:$PATH -export LD_LIBRARY_PATH=${GAUSSHOME}/lib:${GAUSSHOME}/add-ons:$LD_LIBRARY_PATH +source ~/.bashrc -curr_path=`dirname $(readlink -f $0)` -curr_filename=`basename $(readlink -f $0)` -os_user=`whoami` - -file_user=`ls -l ${curr_path}"/${curr_filename}" | awk '{print $3}'` - -if [ ${file_user} != ${os_user} ]; then - echo "Can't run ${curr_filename}, because it does not belong to the current user!" - exit 1 -fi +USER=`whoami` +if [ "${USER}" = "root" ] +then + USER=$(grep '"U_USERNAME_AND_GROUP"' /opt/cantian/action/cantian/install_config.json | cut -d '"' -f 4 | sed 's/:.*//') +fi -GSDB_BIN=gaussdb -GSDB_BIN_FULL=${GAUSSHOME}/bin/gaussdb DSS_BIN=dssserver -DSS_BIN_FULL=${GAUSSHOME}/bin/dssserver -BIN_PATH=${GAUSSHOME}/bin -SCRIPT_NAME=$0 +BIN_PATH=${DSS_HOME}/bin +SCRIPT_NAME=`basename $(readlink -f $0)` +CONN_PATH=UDS:${DSS_HOME}/.dss_unix_d_socket +PARM=$1 +NODE_ID=$2 usage() { - echo "Usage: $0 [cmd] [dssserver_id] [DSS_HOME] [GSDB_HOME]" + echo "Usage:" echo "cmd:" - echo " -start: start dssserver" - echo " -stop: stop dssserver&create dn_stop_flag_file" - echo " -check: check dssserver" - echo " -clean: clean dssserver&${GSDB_BIN}" - echo " -reg: register dssserver" - echo " -unreg: unregister dssserver" - echo " -isreg: check whether dssserver is registered" - echo "dssserver_id:" - echo " dssserver id" - echo "DSS_HOME:" - echo " dssserver data path" - echo "GSDB_HOME:" - echo " ${GSDB_BIN} data path" -} - -if [ $# -lt 4 ] -then - echo "parameter numbers not meet, num=$#." - usage - exit 1 -fi - -log() -{ - time=`date "+%Y-%m-%d %H:%M:%S"` - echo "[$time][DSS]$1" >> ${startdss_log} 2>&1 -} - -assert_empty() -{ - return -} - -assert_nonempty() -{ - if [[ -z ${2} ]] - then - log "[SCRIPT]The ${1} parameter is empty." - exit 1 - fi + echo " $0 -start node_id: start dssserver" + echo " $0 -stop node_id: kill dssserver" + echo " $0 -stop_force node_id: kill dssserver by force" + echo " $0 -check node_id: check dssserver" + echo " $0 -reg node_id: register dssserver" + echo " $0 -kick node_id: unregister dssserver" + echo " $0 -isreg node_id: check whether dssserver is registered" } program_pid() { - pid=`ps -f f -u \`whoami\` | grep -w ${1} | grep ${2} | grep -v grep | grep -v ${SCRIPT_NAME} | awk '{print $2}' | tail -1` - echo ${pid} -} - -program_pid2() -{ - pid=`ps -f f -u \`whoami\` | grep -w ${1} | grep -v grep | grep -v ${SCRIPT_NAME} | awk '{print $2}'` + pid=`ps -f f -u ${USER} | grep -w ${DSS_BIN} | grep ${DSS_HOME} | grep -v grep | grep -v ${SCRIPT_NAME} | awk '{print $2}' | tail -1` echo ${pid} } program_status() { - pid=`program_pid $1 $2` + pid=`program_pid` if [[ -z ${pid} ]]; then echo "" return @@ -98,138 +53,49 @@ program_status() echo "" } -kill_program() +function check_dss() { - assert_nonempty 1 ${1} - assert_nonempty 2 ${2} - pid=`program_pid $1 $2` - if [[ -z ${pid} ]] - then - log "[KILL]${1} is already dead." - return - fi - - kill -9 ${pid} - if [ $? -ne 0 ] + dss_status=$(program_status) + if [[ -z ${dss_status} ]] then - log "[KILL]ERROR! ${1} with pid:${pid} is not killed..." - exit 1 + echo "RES_FAILED" + return 1 fi - for ((i=0; i < 30; i++)) - do - ps -f -p "${pid}" | grep ${1} - if [ $? -eq 0 ] - then - sleep 0.1 - else - log "[KILL]SUCCESS!" - return - fi - done - - log "[KILL]ERROR! ${1} with pid:${pid} is not killed..." - exit 1 -} - -function clear_script_log -{ - local _log_dir=$1 - local _log_name=$2 - local _max_log_backup=$3 - - if [ -L ${_log_dir} ]; then - typeset log_num=`find -L "${_log_dir}" -maxdepth 1 -type f -name "${_log_name}*" | wc -l` - if [ ${log_num} -ge ${_max_log_backup} ];then - find -L "${_log_dir}" -maxdepth 1 -type f -name "${_log_name}*" | xargs ls -t {} 2>/dev/null | tail -n $(expr ${log_num} - ${_max_log_backup}) | xargs -i rm -f {} - fi - else - typeset log_num=`find -L "${_log_dir}" -maxdepth 1 -type f -name "${_log_name}*" | wc -l` - if [ ${log_num} -ge ${_max_log_backup} ];then - find "${_log_dir}" -maxdepth 1 -type f -name "${_log_name}*" | xargs ls -t {} 2>/dev/null | tail -n $(expr ${log_num} - ${_max_log_backup}) | xargs -i rm -f {} - fi - fi -} - -check_log_file() -{ - log_path=$1 - log_file=$2 - operation=$3 - # max log file size 16 * 1024 * 1024 - MAX_LOG_SIZE=16777216 - MAX_LOG_BACKUP=10 - log_file_size=$(ls -l ${log_file} |awk '{print $5}') - if [ -f ${log_file} ];then - if [ ${log_file_size} -ge ${MAX_LOG_SIZE} ];then - mv -f ${log_file} "${log_path}/${operation} - `date +%Y-%m-%d_%H%M%S`.log" 2>/dev/null - clear_script_log "${log_path}" "${operation}-" $MAX_LOG_BACKUP - fi - fi -} - -touch_logfile() -{ - log_file=$1 - if [ ! -f $log_file ] + if [[ "${dss_status}" == "D" || "${dss_status}" == "T" || "${dss_status}" == "Z" ]] then - touch $log_file - chmod 600 $log_file + echo "RES_EAGAIN" + return 3 fi + return 0 } -assert_nonempty 1 ${1} -assert_nonempty 2 ${2} -assert_nonempty 3 ${3} -assert_nonempty 4 ${4} - -CMD=${1} -INSTANCE_ID=${2} -export DSS_HOME=${3} -GSDB_HOME=${4} -CONN_PATH=UDS:${DSS_HOME}/.dss_unix_d_socket -startdss_log=${DSS_HOME}/startdss.log - function check_dss_config() { - log "[START]Checking dss_inst.ini before start dss..." if [[ ! -e ${DSS_HOME}/cfg/dss_inst.ini ]] then - log "[START]${DSS_HOME}/cfg/dss_inst.ini must exist" + echo "${DSS_HOME}/cfg/dss_inst.ini NOT exist" + echo "RES_FAILED" exit 1 fi - log "[START]Checking dss_vg_conf.ini before start dss..." if [[ ! -e ${DSS_HOME}/cfg/dss_vg_conf.ini ]] then - log "[START]${DSS_HOME}/cfg/dss_vg_conf.ini must exist" + echo "${DSS_HOME}/cfg/dss_vg_conf.ini NOT exist" + echo "RES_FAILED" exit 1 fi LSNR_PATH=`cat ${DSS_HOME}/cfg/dss_inst.ini | sed s/[[:space:]]//g |grep -Eo "^LSNR_PATH=.*" | awk -F '=' '{print $2}'` if [[ -z ${LSNR_PATH} ]] then - log "[START]can't find lsnr path. Aborting." + echo "CANNOT find lsnr path." + echo "RES_FAILED" exit 1 fi CONN_PATH=UDS:${LSNR_PATH}/.dss_unix_d_socket } -get_startdss_log() -{ - LOG_HOME=`cat ${DSS_HOME}/cfg/dss_inst.ini | sed s/[[:space:]]//g |grep -Eo "^LOG_HOME=.*" | awk -F '=' '{print $2}'` - if [[ ! -z ${LOG_HOME} ]] - then - startdss_log=${LOG_HOME}/startdss.log - fi - - if [[ -z ${DSS_HOME} ]] - then - startdss_log=/dev/null - else - touch_logfile $startdss_log - fi -} -function ScandCheck() +function scand_check() { groups=`groups` echo $groups @@ -237,230 +103,182 @@ function ScandCheck() for var in ${array[@]} do echo $var - nohup dsscmd scandisk -t block -p /dev/sd -u $os_user -g $var >> /dev/null 2>&1 + nohup dsscmd scandisk -t block -p /dev/sd -u $USER -g $var >> /dev/null 2>&1 & if [[ $? != 0 ]] then - log "[SCAND]dsscmd scandisk -t block -p /dev/sd -u $os_user -g $var fail." exit 1 fi - log "[SCAND]dsscmd scandisk." - done -} - -kill_dss_and_perctrl() -{ - pid=$(program_pid dssserver ${DSS_HOME}) - if [[ -z ${pid} ]] - then - log "[${1}]dssserver not exist." - fi - kill_program dssserver ${DSS_HOME} - log "[${1}]Success to kill dssserver." - - pid=$(program_pid2 perctrl) - for perctrl_pid in ${pid} - do - if [[ -z ${perctrl_pid} ]] - then - log "[${1}]perctrl not exist." - fi - kill_program ${perctrl_pid} perctrl - log "[${1}]kill perctrl ${perctrl_pid} success." done } -# 1st step: if database exists, kill it -# 2nd step: if dssserver no exists, start it -function Start() -{ - check_dss_config - check_log_file ${DSS_HOME} $startdss_log startdss - pid=`program_pid ${DSS_BIN_FULL} ${DSS_HOME}` - if [[ ! -z ${pid} ]] - then - log "[START]dssserver already started in dir ${DSS_HOME}..." - else - log "[START]Starting dssserver..." - pid=`program_pid ${GSDB_BIN_FULL} ${GSDB_HOME}` - if [[ ! -z ${pid} ]] - then - log "[START]kill ${GSDB_BIN} before start dssserver" - kill_program ${GSDB_BIN_FULL} ${GSDB_HOME} - else - log "[START]${GSDB_BIN} is offline in dir ${GSDB_HOME}..." - fi - log "[START]dssserver" - ScandCheck - nohup ${DSS_BIN_FULL} -D ${DSS_HOME} >> ${startdss_log} 2>&1 & - log "[START]start dssserver in ${DSS_HOME} is starting." - fi -} - -# 1st Whether there is a dn stop tag file -# 2st stop tag file need to be created when there is no dn stop tag file -# 3st step: kill database -# 4nd step: stop dssserver by using dsscmd -# 5rd step: if fail to stop dssserver in 2nd step, then kill dssserver -function Stop() -{ - check_log_file ${DSS_HOME} $startdss_log startdss - - log "[STOP]stop ${GSDB_BIN}..." - db_flag_file=instance_manual_start_$(expr $INSTANCE_ID + 6001) - log "[STOP]db_flag_file=$db_flag_file" - - if [[ -f $GAUSSHOME/bin/$db_flag_file ]]; - then - log "[STOP]$GAUSSHOME/bin/$db_flag_file is exist" - else - touch $GAUSSHOME/bin/$db_flag_file - fi - - pid=$(program_pid ${GSDB_BIN_FULL} ${GSDB_HOME}) - if [[ ! -z ${pid} ]] - then - log "[STOP] kill ${GSDB_BIN} before stop dssserver" - kill_program ${GSDB_BIN_FULL} ${GSDB_HOME} - fi - - kill_dss_and_perctrl "STOP" -} - -# 1st step: check dssserver if exists - - - -function Check() +function reg() { - dss_status=$(program_status dssserver ${DSS_HOME}) - if [[ -z ${dss_status} ]] - then - log "[CHECK]dssserver is offline." - exit 1 - fi - if [[ "${dss_status}" == "D" || "${dss_status}" == "T" || "${dss_status}" == "Z" ]] - then - log "[CHECK]dssserver is dead." - exit 3 - fi - - pid=$(program_pid2 perctrl) - for perctrl_pid in ${pid} - do - perctrl_status=$(program_status ${perctrl_pid} perctrl) - if [[ "${perctrl_status}" == "D" || "${perctrl_status}" == "T" || "${perctrl_status}" == "Z" ]] - then - log "[CHECK]perctrl is dead." - exit 3 - fi - done -} -# 1st step: kill database -# 2nd step: stop dssserver by using dsscmd -# 3rd step: if fail to stop dssserver in 2nd step, then kill dssserver -function Clean() -{ - check_log_file ${DSS_HOME} $startdss_log startdss - pid=$(program_pid ${GSDB_BIN_FULL} ${GSDB_HOME}) - if [[ ! -z ${pid} ]] - then - log "[CLEAN]kill ${GSDB_BIN} before kill dssserver" - kill_program ${GSDB_BIN_FULL} ${GSDB_HOME} - fi - kill_dss_and_perctrl "CLEAN" - dsscmd clean_vglock -D ${DSS_HOME} >> /dev/null 2>&1 -} - -function Reg() -{ - ScandCheck + scand_check LOCAL_INSTANCE_ID=`awk '/INST_ID/{print}' ${DSS_HOME}/cfg/dss_inst.ini | awk -F= '{print $2}' | xargs` if [[ -z ${LOCAL_INSTANCE_ID} ]] then - log "[REG]can't find inst id. Aborting." - exit 1 + echo "RES_FAILED" + return 1 fi dsscmd reghl -D ${DSS_HOME} >> /dev/null 2>&1 if [[ $? != 0 ]] then - log "[REG]dsscmd reghl -D ${DSS_HOME} fail." - exit 1 + echo "RES_EAGAIN" + return 3 fi - log "[REG]register success." + return 0 } -function Unreg() +function kick() { LOCAL_INSTANCE_ID=`awk '/INST_ID/{print}' ${DSS_HOME}/cfg/dss_inst.ini | awk -F= '{print $2}' | xargs` if [[ -z ${LOCAL_INSTANCE_ID} ]] then - log "[UNREG]can't find inst id. Aborting." + echo "RES_FAILED" exit 1 fi - if [[ ${LOCAL_INSTANCE_ID} == ${INSTANCE_ID} ]] + if [[ ${LOCAL_INSTANCE_ID} == ${NODE_ID} ]] then dsscmd unreghl -D ${DSS_HOME} >> /dev/null 2>&1 else - pid=$(program_pid dssserver ${DSS_HOME}) - if [[ -z ${pid} ]] - then - log "[UNREG]dssserver is not running." - exit 1 - fi - dsscmd kickh -i ${INSTANCE_ID} -D ${DSS_HOME} >> /dev/null 2>&1 + dsscmd kickh -i ${NODE_ID} -D ${DSS_HOME} >> /dev/null 2>&1 fi if [[ $? != 0 ]] then - log "[UNREG]dsscmd kickh -i ${INSTANCE_ID} -D ${DSS_HOME} fail, or dsscmd unreghl -D ${DSS_HOME} fail." + echo "RES_FAILED" exit 1 fi - log "[UNREG]unregister ${INSTANCE_ID} success." + echo "RES_SUCCESS" + exit 0 } -function Isreg() +function is_reg() { - dsscmd inq_reg -i ${INSTANCE_ID} -D ${DSS_HOME} >> /dev/null 2>&1 + dsscmd inq_reg -i ${NODE_ID} -D ${DSS_HOME} >> /dev/null 2>&1 result=$? if [[ ${result} == 255 ]] then - log "[ISREG]dsscmd inq_reg -i ${INSTANCE_ID} -D ${DSS_HOME} fail." - exit 1 + echo "RES_EAGAIN" + exit 3 fi if [[ ${result} != 2 ]] then - log "[ISREG]result: ${result}" + echo "RES_FAILED" + exit 1 fi - exit ${result} + echo "RES_SUCCESS" + exit 0 } -function Main() +function start_dss() { - if [ "$CMD" == "-start" ]; then - Start - exit 0 - elif [ "$CMD" == "-stop" ]; then - Stop - exit 0 - elif [ "$CMD" == "-check" ]; then - Check - exit 0 - elif [ "$CMD" == "-clean" ]; then - Clean - exit 0 - elif [ "$CMD" == "-reg" ]; then - Reg - exit 0 - elif [ "$CMD" == "-unreg" ]; then - Unreg - exit 0 - elif [ "$CMD" == "-isreg" ]; then - Isreg - exit 0 - else - echo "[SCRIPT]Please confirm the input parameters." - exit 1 - fi + check_dss_config + reg + if [ $? -ne 0 ]; then + echo "RES_FAILED" + exit 1 + fi + nohup ${BIN_PATH}/${DSS_BIN} -D ${DSS_HOME} >> /dev/null 2>&1 & +} + +function stop_dss() { + res_count=`ps -u ${USER} | grep ${DSS_BIN} |grep -v grep |wc -l` + echo "res_count = ${res_count}" + if [ "$res_count" -eq "0" ]; then + echo "RES_FAILED" + exit 1 + elif [ "$res_count" -eq "1" ]; then + ps -u ${USER} | grep ${DSS_BIN}|grep -v grep | awk '{print "kill -9 " $1}' |sh + echo "RES_SUCCESS" + exit 0 + else + res_count=`ps -fu ${USER} | grep ${DSS_BIN} | grep ${process_path} | grep -v grep | wc -l` + echo "res_count is ${res_count}" + if [ "$res_count" -eq "0" ]; then + echo "RES_FAILED" + exit 1 + elif [ "$res_count" -eq "1" ]; then + ps -fu ${USER} | grep ${DSS_BIN} | grep ${process_path} | grep -v grep | awk '{print "kill -9 " $2}' |sh + echo "RES_SUCCESS" + exit 0 + else + echo "RES_EAGAIN" + exit 3 + fi + fi +} + +function stop_dss_by_force() { + res_count=`ps -u ${USER} | grep ${DSS_BIN}|grep -v grep |wc -l` + echo "res_count = ${res_count}" + if [ "$res_count" -eq "0" ]; then + echo "RES_SUCCESS" + exit 0 + elif [ "$res_count" -eq "1" ]; then + ps -u ${USER} | grep ${DSS_BIN}|grep -v grep | awk '{print "kill -9 " $1}' |sh + echo "RES_SUCCESS" + exit 0 + else + res_count=`ps -fu ${USER} | grep ${DSS_BIN} | grep ${process_path} | grep -v grep | wc -l` + echo "res_count is ${res_count}" + if [ "$res_count" -eq "0" ]; then + echo "RES_SUCCESS" + exit 0 + elif [ "$res_count" -eq "1" ]; then + ps -fu ${USER} | grep ${DSS_BIN} | grep ${process_path} | grep -v grep | awk '{print "kill -9 " $2}' |sh + echo "RES_SUCCESS" + exit 0 + else + echo "RES_FAILED" + exit 1 + fi + fi } -Main \ No newline at end of file +############################### main ############################### + +if [ $# -ne 2 ]; then + usage + exit 1 +fi + +case "${PARM}" in + -start) + start_dss + ;; + -stop) + stop_dss + ;; + -stop_force) + stop_dss_by_force + ;; + -check) + check_dss + if [ $? -ne 0 ]; then + echo "RES_FAILED" + exit 1 + fi + ;; + -reg) + reg + if [ $? -ne 0 ]; then + echo "RES_FAILED" + exit 1 + fi + ;; + -kick) + kick + ;; + -isreg) + is_reg + ;; + *) + echo "RES_FAILED" + usage + exit 1 + ;; +esac + +echo "RES_SUCCESS" +exit 0 diff --git a/pkg/deploy/action/dss/dssctl.py b/pkg/deploy/action/dss/dssctl.py index 1f54bda6b4027811a44420886acb16eabd9dff0e..6378e27d27df34b252972c6feea8cdc19c6ebefe 100644 --- a/pkg/deploy/action/dss/dssctl.py +++ b/pkg/deploy/action/dss/dssctl.py @@ -27,7 +27,7 @@ CMS_HOME = "/opt/cantian/cms/service" DSS_CFG = "/opt/cantian/dss/cfg" BACKUP_NAME = "/opt/cantian/backup/files/dss" SCRIPTS_DIR = "/opt/cantian/action/dss" -DSS_CTRL_SCRIPTS = "%s/bin/dss_contrl.sh" % DSS_HOME +DSS_CTRL_SCRIPTS = "%s/dss_contrl.sh" % SCRIPTS_DIR RETRY_TIMES = 20 @@ -86,7 +86,7 @@ class DssCtl(object): self.ca_path = get_value("ca_path") self.crt_path = get_value("crt_path") self.key_path = get_value("key_path") - self.log_file = os.path.join(DSS_HOME, "log/run/dssinstance.rlog") + self.log_file = os.path.join(DSS_LOG, "run/dssinstance.rlog") self.begin_time = None @staticmethod @@ -239,6 +239,10 @@ class DssCtl(object): add dss res for cms :return: """ + os.chmod(DSS_CTRL_SCRIPTS, 0o700) + dss_contrl_path = os.path.join(DSS_HOME, "dss_contrl.sh") + shutil.copyfile(DSS_CTRL_SCRIPTS, dss_contrl_path) + os.chmod(dss_contrl_path, 0o700) if self.node_id == "0": LOG.info("Start to add dss res.") cmd = ("source ~/.bashrc && %s/bin/cms res -add dss -type dss -attr \"script=%s\"" @@ -249,6 +253,7 @@ class DssCtl(object): err_msg = "Failed to add dss res, details: %s" % (str(output)) raise Exception(err_msg) LOG.info("Success to add dss res.") + LOG.info("Success to copy dss control script.") def config_perctrl_permission(self) -> None: """ @@ -319,8 +324,8 @@ class DssCtl(object): shutil.rmtree(os.path.join(DSS_HOME, "lib")) if os.path.exists(os.path.join(DSS_HOME, "bin")): shutil.rmtree(os.path.join(DSS_HOME, "bin")) - if os.path.exists(os.path.join(DSS_HOME, "cfg")): - shutil.rmtree(os.path.join(DSS_HOME, "cfg")) + if os.path.exists(DSS_CFG): + shutil.rmtree(DSS_CFG) LOG.info("Success to clean soft.") def pre_install(self, *args) -> None: @@ -347,7 +352,7 @@ class DssCtl(object): self.modify_env(action="add") self.prepare_cfg() self.prepare_source() - #self.cms_add_dss_res() + self.cms_add_dss_res() self.config_perctrl_permission() self.prepare_dss_dick() self.reghl_dss_disk() @@ -381,6 +386,7 @@ class DssCtl(object): """ start dss server: - check dss server status + - register dss disk - start dss server :param args: :return: @@ -388,6 +394,7 @@ class DssCtl(object): LOG.info("Start dss server.") if self.check_status(): return + self.reghl_dss_disk() self.begin_time = str(datetime.datetime.now()).split(".")[0] dssserver_cmd = "source ~/.bashrc && nohup dssserver -D %s &" % DSS_HOME subprocess.Popen(dssserver_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/pkg/deploy/action/get_config_info.py b/pkg/deploy/action/get_config_info.py index 32bf62228f26414a1567995094791be6a7e4adc9..751ed2551b558486d10f230d77e72337b6501e9b 100644 --- a/pkg/deploy/action/get_config_info.py +++ b/pkg/deploy/action/get_config_info.py @@ -24,6 +24,11 @@ def get_value(param): if param == "cluster_scale": return len(info.get("cms_ip").split(";")) + if param == "deploy_mode": + if info.get('deploy_mode', ""): + return info.get('deploy_mode') + return "dbstor" if info.get('deploy_policy', "") in ["ModeB", "ModeC"] else "file" + return info.get(param, "") diff --git a/pkg/deploy/action/implement/check_deploy_param.py b/pkg/deploy/action/implement/check_deploy_param.py index 1bbcb6aac9ccff1f14b6d0a62343c9a60f5fecd0..c2129363381ade8f61fa74a164d4267e5c215632 100644 --- a/pkg/deploy/action/implement/check_deploy_param.py +++ b/pkg/deploy/action/implement/check_deploy_param.py @@ -7,8 +7,8 @@ DEPLOY_PARAM_FILE = os.path.join(CUR_PATH, '../../config/deploy_param.json') CHECK_LIST = [ "cluster_id", "cluster_name", - "storage_dbstore_fs", - "storage_dbstore_page_fs", + "storage_dbstor_fs", + "storage_dbstor_page_fs", "storage_share_fs", "storage_archive_fs", "storage_metadata_fs", diff --git a/pkg/deploy/action/init_container.sh b/pkg/deploy/action/init_container.sh index 30d56f9435d912e4fe33a6ff2005d6797c7cea14..e210ff283bdf997c05a360e91e77ed62d5a466f6 100644 --- a/pkg/deploy/action/init_container.sh +++ b/pkg/deploy/action/init_container.sh @@ -2,7 +2,7 @@ set +x CURRENT_PATH=$(dirname $(readlink -f $0)) SCRIPT_NAME=${PARENT_DIR_NAME}/$(basename $0) -DBSTORE_CHECK_FILE=${CURRENT_PATH}/dbstor/check_dbstor_compat.sh +DBSTOR_CHECK_FILE=${CURRENT_PATH}/dbstor/check_dbstor_compat.sh cantian_user=`python3 ${CURRENT_PATH}/cantian/get_config_info.py "deploy_user"` cantian_group=`python3 ${CURRENT_PATH}/cantian/get_config_info.py "deploy_group"` deploy_mode=`python3 ${CURRENT_PATH}/cantian/get_config_info.py "deploy_mode"` @@ -10,7 +10,7 @@ storage_share_fs=`python3 ${CURRENT_PATH}/cantian/get_config_info.py "storage_sh cluster_name=`python3 ${CURRENT_PATH}/cantian/get_config_info.py "cluster_name"` node_id=`python3 ${CURRENT_PATH}/cantian/get_config_info.py "node_id"` storage_metadata_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_metadata_fs"` -storage_dbstore_page_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstore_page_fs"` +storage_dbstor_page_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstor_page_fs"` metadata_path="/mnt/dbdata/remote/metadata_${storage_metadata_fs}" dr_setup=`python3 ${CURRENT_PATH}/docker/get_config_info.py "dr_deploy.dr_setup"` @@ -31,18 +31,18 @@ function check_dbstor_usr_passwd() { fi } -function check_dbstore_client_compatibility() { - logAndEchoInfo "begin to check dbstore client compatibility." - if [ ! -f "${DBSTORE_CHECK_FILE}" ];then - logAndEchoError "${DBSTORE_CHECK_FILE} file is not exists." +function check_dbstor_client_compatibility() { + logAndEchoInfo "begin to check dbstor client compatibility." + if [ ! -f "${DBSTOR_CHECK_FILE}" ];then + logAndEchoError "${DBSTOR_CHECK_FILE} file is not exists." exit 1 fi - su -s /bin/bash - "${cantian_user}" -c "sh ${DBSTORE_CHECK_FILE}" + su -s /bin/bash - "${cantian_user}" -c "sh ${DBSTOR_CHECK_FILE}" if [[ $? -ne 0 ]];then - logAndEchoError "dbstore client compatibility check failed." + logAndEchoError "dbstor client compatibility check failed." exit 1 fi - logAndEchoInfo "dbstore client compatibility check success." + logAndEchoInfo "dbstor client compatibility check success." } function check_gcc_file() { @@ -66,7 +66,7 @@ function check_gcc_file() { logAndEchoInfo "begin to check cluster name." is_cluster_name_exist=$(su -s /bin/bash - "${cantian_user}" \ - -c 'dbstor --query-file --fs-name='"${storage_dbstore_page_fs}"' --file-dir=/' | grep "${cluster_name}$" | wc -l) + -c 'dbstor --query-file --fs-name='"${storage_dbstor_page_fs}"' --file-dir=/' | grep "${cluster_name}$" | wc -l) if [[ ${is_cluster_name_exist} -eq 0 ]];then logAndEchoError "query cluster name failed, please check whether cluster config parameters conflict." exit 1 @@ -120,8 +120,8 @@ function init_module() { if [[ ${lib_name} = 'dbstor' ]]; then check_dbstor_usr_passwd - # 检查dbstore client 与server端是否兼容 - check_dbstore_client_compatibility + # 检查dbstor client 与server端是否兼容 + check_dbstor_client_compatibility check_gcc_file fi done diff --git a/pkg/deploy/action/install.sh b/pkg/deploy/action/install.sh index 4add28d7307f2e3ab950e25a3c887ec06a2c57b5..fbcbe1c8cc33ff31d7702cb3b637d028cdfe9e2e 100644 --- a/pkg/deploy/action/install.sh +++ b/pkg/deploy/action/install.sh @@ -11,8 +11,8 @@ CONFIG_PATH=${CURRENT_PATH}/../config ENV_FILE=${CURRENT_PATH}/env.sh MYSQL_MOUNT_PATH=/opt/cantian/image/cantian_connector/for_mysql_official/mf_connector_mount_dir UPDATE_CONFIG_FILE_PATH="${CURRENT_PATH}"/update_config.py -DBSTORE_CHECK_FILE=${CURRENT_PATH}/dbstor/check_dbstor_compat.sh -DEPLOY_MODE_DBSTORE_UNIFY_FLAG=/opt/cantian/log/deploy/.dbstor_unify_flag +DBSTOR_CHECK_FILE=${CURRENT_PATH}/dbstor/check_dbstor_compat.sh +DEPLOY_MODE_DBSTOR_UNIFY_FLAG=/opt/cantian/log/deploy/.dbstor_unify_flag config_install_type="override" pass_check='true' add_group_user_ceck='true' @@ -432,20 +432,20 @@ function check_dbstor_usr_passwd() { fi } -function check_dbstore_client_compatibility() { - logAndEchoInfo "begin to check dbstore client compatibility." - if [ ! -f "${DBSTORE_CHECK_FILE}" ];then - logAndEchoError "${DBSTORE_CHECK_FILE} file is not exists." +function check_dbstor_client_compatibility() { + logAndEchoInfo "begin to check dbstor client compatibility." + if [ ! -f "${DBSTOR_CHECK_FILE}" ];then + logAndEchoError "${DBSTOR_CHECK_FILE} file is not exists." uninstall exit 1 fi - su -s /bin/bash - "${cantian_user}" -c "sh ${DBSTORE_CHECK_FILE}" + su -s /bin/bash - "${cantian_user}" -c "sh ${DBSTOR_CHECK_FILE}" if [[ $? -ne 0 ]];then - logAndEchoError "dbstore client compatibility check failed." + logAndEchoError "dbstor client compatibility check failed." uninstall exit 1 fi - logAndEchoInfo "dbstore client compatibility check success." + logAndEchoInfo "dbstor client compatibility check success." } function mount_fs() { @@ -509,17 +509,17 @@ function mount_fs() { checkMountNFS ${metadata_result} if [[ x"${deploy_mode}" == x"file" ]]; then - storage_dbstore_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstore_fs"` + storage_dbstor_fs=`python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstor_fs"` storage_logic_ip=`python3 ${CURRENT_PATH}/get_config_info.py "storage_logic_ip"` - mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" - chown "${cantian_user}":"${cantian_user}" /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" - mount -t nfs -o vers=4.0,timeo=${NFS_TIMEO},nosuid,nodev "${storage_logic_ip}":/"${storage_dbstore_fs}" /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" + mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" + chown "${cantian_user}":"${cantian_user}" /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" + mount -t nfs -o vers=4.0,timeo=${NFS_TIMEO},nosuid,nodev "${storage_logic_ip}":/"${storage_dbstor_fs}" /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" checkMountNFS $? - chown "${cantian_user}":"${cantian_user}" /mnt/dbdata/remote/storage_"${storage_dbstore_fs}" - mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/data - mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/share_data - chown ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/data - chown ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstore_fs}"/share_data + chown "${cantian_user}":"${cantian_user}" /mnt/dbdata/remote/storage_"${storage_dbstor_fs}" + mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/data + mkdir -m 750 -p /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/share_data + chown ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/data + chown ${cantian_user}:${cantian_user} /mnt/dbdata/remote/storage_"${storage_dbstor_fs}"/share_data fi if [[ x"${deploy_mode}" == x"file" ]] || [[ -f /opt/cantian/youmai_demo ]];then # nas模式才挂载share nfs @@ -602,8 +602,8 @@ fi # 单进程场景使用deploy_user is_single=$(cat "${CURRENT_PATH}"/cantian/options.py | grep -oP 'self\.running_mode = "\K[^"]+') if [[ x"${is_single}" == x"cantiand_with_mysql_in_cluster" ]];then - sed -i "s/cantian_user=\"cantian\"/cantian_user=\"${deploy_user}\"/g" "${CURRENT_PATH}"/env.sh - sed -i "s/cantian_group=\"cantian\"/cantian_group=\"${deploy_group}\"/g" "${CURRENT_PATH}"/env.sh + sed -i "/^cantian_user=/ c\cantian_user=\"${deploy_user}"\" "${CURRENT_PATH}"/env.sh + sed -i "/^cantian_group=/ c\cantian_group=\"${deploy_group}"\" "${CURRENT_PATH}"/env.sh fi if [[ ${use_dorado["${deploy_mode}"]} -ne 1 ]];then @@ -825,7 +825,7 @@ cp -rfp ${CURRENT_PATH}/../common /opt/cantian/ cp -rfp ${CURRENT_PATH}/wsr_report /opt/cantian/action cp -rfp ${CURRENT_PATH}/dbstor /opt/cantian/action -# 适配开源场景,使用file,不使用dbstore,提前安装参天rpm包 +# 适配开源场景,使用file,不使用dbstor,提前安装参天rpm包 install_rpm if [[ x"${deploy_mode}" == x"dss" ]];then @@ -863,8 +863,8 @@ do fi if [[ "${cantian_in_container}" == "0" ]] && [[ ${use_dorado["${deploy_mode}"]} ]]; then check_dbstor_usr_passwd - # 检查dbstore client 与server端是否兼容 - check_dbstore_client_compatibility + # 检查dbstor client 与server端是否兼容 + check_dbstor_client_compatibility fi else sh ${CURRENT_PATH}/${lib_name}/appctl.sh install >> ${OM_DEPLOY_LOG_FILE} 2>&1 @@ -882,8 +882,10 @@ done # 把升级备份相关路径拷贝到/opt/cantian cp -rfp ${CURRENT_PATH}/../repo /opt/cantian/ cp -rfp ${CURRENT_PATH}/../versions.yml /opt/cantian/ -source ${CURRENT_PATH}/docker/dbstor_tool_opt_common.sh -update_version_yml_by_dbstor +if [[ "${cantian_in_container}" == "0" ]]; then + source ${CURRENT_PATH}/docker/dbstor_tool_opt_common.sh + update_version_yml_by_dbstor +fi config_security_limits > /dev/null 2>&1 diff --git a/pkg/deploy/action/logic/storage_operate.py b/pkg/deploy/action/logic/storage_operate.py index 0aa3369a3a7e918f051b77ab3ca6baffb9ffa070..d64680bfc2b5653b9b209b08f12ee0c07d781f64 100644 --- a/pkg/deploy/action/logic/storage_operate.py +++ b/pkg/deploy/action/logic/storage_operate.py @@ -23,7 +23,7 @@ class StorageInf(object): @staticmethod def umount_file_system(fs_name): """ - 挂载dbstore文件系统 + 挂载dbstor文件系统 :param fs_name: 文件系统名称 :return: """ @@ -53,7 +53,7 @@ class StorageInf(object): @staticmethod def mount_file_system(fs_name, logic_ip, prefix=None, params=""): """ - 取消dbstore文件系统挂载 + 取消dbstor文件系统挂载 :param logic_ip: 挂载ip :param prefix: 挂载路径前缀 :param params: 挂载参数 diff --git a/pkg/deploy/action/modify_env.py b/pkg/deploy/action/modify_env.py index bb7de36929436c38b4073095161ef034a493e34b..19e89853c1982cd160de3c19fee13420b26a581e 100644 --- a/pkg/deploy/action/modify_env.py +++ b/pkg/deploy/action/modify_env.py @@ -42,8 +42,8 @@ def modify_env(): def modify_cantian_config(): data = json.loads(read_file(CANTIAN_CONFIG)) - if "USE_DBSTORE" in data.keys(): - del data["USE_DBSTORE"] + if "USE_DBSTOR" in data.keys(): + del data["USE_DBSTOR"] data = json.dumps(data, indent=4) write_file(data, CANTIAN_CONFIG) diff --git a/pkg/deploy/action/mysql/mysqlctl.py b/pkg/deploy/action/mysql/mysqlctl.py index aa5a5b8d6e416775ba8fc13fda16361ac0efdec4..f9a34a173139847a31ab48326b222f2991a3912b 100644 --- a/pkg/deploy/action/mysql/mysqlctl.py +++ b/pkg/deploy/action/mysql/mysqlctl.py @@ -13,7 +13,7 @@ from get_config_info import get_value INSTALL_PATH = CUR_PATH.parent.parent INFO_SRC = "/opt/cantian/mysql/install/mysql/docs/INFO_SRC" CANTIAN_INSTALL_CONFIG = os.path.join(CUR_PATH.parent, "cantian", "install_config.json") -TARGET_VERSION = "1.0.0" +TARGET_VERSION = "1.0.2" class MysqlCtl(object): diff --git a/pkg/deploy/action/obtains_lsid.py b/pkg/deploy/action/obtains_lsid.py index 1365788ae0f15701a635bb64c1327115e35d79a2..c3a258244a228eb3618caeb9c879b75f40ecf55e 100644 --- a/pkg/deploy/action/obtains_lsid.py +++ b/pkg/deploy/action/obtains_lsid.py @@ -8,7 +8,7 @@ from pathlib import Path CUR_PATH = os.path.dirname(os.path.realpath(__file__)) INSTALL_FILE = str(Path(os.path.join(CUR_PATH, "../config/deploy_param.json"))) -DBSTORE_UNIFY_FLAG = os.path.exists("/opt/cantian/log/deploy/.dbstor_unify_flag") +DBSTOR_UNIFY_FLAG = os.path.exists("/opt/cantian/log/deploy/.dbstor_unify_flag") # 适配LLT if os.path.exists(INSTALL_FILE): diff --git a/pkg/deploy/action/pre_install.py b/pkg/deploy/action/pre_install.py index 5a03cc5c32b6a4f9eb46daf13914d796e2045238..fbf70c922ef5a121ba9f7ac679a6f06251dcdd68 100644 --- a/pkg/deploy/action/pre_install.py +++ b/pkg/deploy/action/pre_install.py @@ -52,7 +52,7 @@ kernel_element = { 'VARIANT_MEMORY_AREA_SIZE', '_INDEX_BUFFER_SIZE' } -use_dbstor = ["dbstor", "combine"] +use_dbstor = ["dbstor", "combined"] UnitConversionInfo = collections.namedtuple('UnitConversionInfo', ['tmp_gb', 'tmp_mb', 'tmp_kb', 'key', 'value', 'sga_buff_size', 'temp_buffer_size', 'data_buffer_size', 'shared_pool_size', @@ -215,12 +215,12 @@ class ConfigChecker: return True @staticmethod - def dbstore_fs_vstore_id(value): + def dbstor_fs_vstore_id(value): try: value = int(value) except Exception as error: - LOG.error('dbstore_fs_vstore id type must be int : %s', str(error)) + LOG.error('dbstor_fs_vstore id type must be int : %s', str(error)) return False return True @@ -323,7 +323,7 @@ class CheckInstallConfig(CheckBase): self.config_path = config_path self.value_checker = ConfigChecker self.config_key = { - 'deploy_user', 'node_id', 'cms_ip', 'storage_dbstore_fs', 'storage_share_fs', 'storage_archive_fs', + 'deploy_user', 'node_id', 'cms_ip', 'storage_dbstor_fs', 'storage_share_fs', 'storage_archive_fs', 'storage_metadata_fs', 'share_logic_ip', 'archive_logic_ip', 'metadata_logic_ip', 'db_type', 'MAX_ARCH_FILES_SIZE', 'mysql_in_container', 'mysql_metadata_in_cantian', 'storage_logic_ip', 'deploy_mode', 'mes_ssl_switch', 'cantian_in_container', 'deploy_policy', 'link_type', 'ca_path', 'crt_path', 'key_path' @@ -332,9 +332,9 @@ class CheckInstallConfig(CheckBase): 'deploy_user', 'node_id', 'cms_ip', 'db_type', 'cantian_in_container', 'MAX_ARCH_FILES_SIZE', 'mysql_in_container', 'mysql_metadata_in_cantian', 'deploy_mode', 'mes_ssl_switch', "redo_num", "redo_size"} - self.dbstore_config_key = { - 'cluster_name', 'cantian_vlan_ip', 'storage_vlan_ip', 'link_type', 'storage_dbstore_page_fs', - 'kerberos_key', 'cluster_id', 'mes_type', "vstore_id", "dbstore_fs_vstore_id" + self.dbstor_config_key = { + 'cluster_name', 'cantian_vlan_ip', 'storage_vlan_ip', 'link_type', 'storage_dbstor_page_fs', + 'kerberos_key', 'cluster_id', 'mes_type', "vstore_id", "dbstor_fs_vstore_id" } self.file_config_key = { "redo_num", "redo_size" @@ -615,7 +615,7 @@ class CheckInstallConfig(CheckBase): if install_config_params['deploy_mode'] in use_dbstor: self.config_key.remove("storage_logic_ip") - self.config_key.update(self.dbstore_config_key) + self.config_key.update(self.dbstor_config_key) ping_check_element.remove("storage_logic_ip") if install_config_params['deploy_mode'] == "dbstor": if not install_config_params['mysql_metadata_in_cantian']: @@ -714,8 +714,8 @@ class CheckInstallConfig(CheckBase): install_config_params['mes_ssl_switch'] = False if 'deploy_mode' not in install_config_params.keys(): install_config_params['deploy_mode'] = "combined" - if 'dbstore_fs_vstore_id' not in install_config_params.keys(): - install_config_params['dbstore_fs_vstore_id'] = "0" + if 'dbstor_fs_vstore_id' not in install_config_params.keys(): + install_config_params['dbstor_fs_vstore_id'] = "0" if (install_config_params.get("mes_ssl_switch") and install_config_params.get("cantian_in_container", "-1") == "0"): self.config_key.update(self.mes_type_key) diff --git a/pkg/deploy/action/pre_upgrade.sh b/pkg/deploy/action/pre_upgrade.sh index 685e8884e834b3772455c41ccbb555385f3974c8..0233fc211fa82a3a889b2e7da315654f0326d547 100644 --- a/pkg/deploy/action/pre_upgrade.sh +++ b/pkg/deploy/action/pre_upgrade.sh @@ -6,7 +6,7 @@ UPGRADE_MODE=$1 CONFIG_FILE_PATH=$2 CONFIG_PATH=${CURRENT_PATH}/../config CMS_CHECK_FILE=/opt/cantian/action/fetch_cls_stat.py -DBSTORE_CHECK_FILE=/opt/cantian/dbstor/tools/cs_check_version.sh +DBSTOR_CHECK_FILE=/opt/cantian/dbstor/tools/cs_check_version.sh CANTIAN_PATH=/opt/cantian MEM_REQUIRED=5 # 单位G SIZE_UPPER=1024 @@ -263,22 +263,22 @@ function gen_upgrade_plan() { return 0 } -function check_dbstore_client_compatibility() { +function check_dbstor_client_compatibility() { deploy_mode=$(python3 ${CURRENT_PATH}/get_config_info.py "deploy_mode") if [[ x"${deploy_mode}" == x"file" || "${source_version}" == "2.0.0"* ]]; then return 0 fi - logAndEchoInfo "begin to check dbstore client compatibility." - if [ ! -f "${DBSTORE_CHECK_FILE}" ];then - logAndEchoError "${DBSTORE_CHECK_FILE} file is not exists." + logAndEchoInfo "begin to check dbstor client compatibility." + if [ ! -f "${DBSTOR_CHECK_FILE}" ];then + logAndEchoError "${DBSTOR_CHECK_FILE} file is not exists." exit 1 fi - su -s /bin/bash - "${cantian_user}" -c "sh ${DBSTORE_CHECK_FILE}" + su -s /bin/bash - "${cantian_user}" -c "sh ${DBSTOR_CHECK_FILE}" if [[ $? -ne 0 ]];then - logAndEchoError "dbstore client compatibility check failed." + logAndEchoError "dbstor client compatibility check failed." exit 1 fi - logAndEchoInfo "dbstore client compatibility check success." + logAndEchoInfo "dbstor client compatibility check success." } function check_file_system_exist() { @@ -302,9 +302,9 @@ function check_file_system_exist() { exit 1 fi echo "" - echo -e "${dm_login_ip}\n${dm_login_user}\n${dm_login_pwd}\n" | python3 "${CURRENT_PATH}"/storage_operate/split_dbstore_fs.py "pre_upgrade" "${CURRENT_PATH}"/../config/deploy_param.json + echo -e "${dm_login_ip}\n${dm_login_user}\n${dm_login_pwd}\n" | python3 "${CURRENT_PATH}"/storage_operate/split_dbstor_fs.py "pre_upgrade" "${CURRENT_PATH}"/../config/deploy_param.json if [ $? -ne 0 ];then - logAndEchoError "Check dbstore page file system failed, /opt/cantian/log/deploy/om_deploy" + logAndEchoError "Check dbstor page file system failed, /opt/cantian/log/deploy/om_deploy" exit 1 fi echo -e "${dm_login_ip}\n${dm_login_user}\n${dm_login_pwd}\n" | python3 "${CURRENT_PATH}"/storage_operate/migrate_file_system.py "pre_upgrade" "${CURRENT_PATH}"/../config/deploy_param.json "/opt/cantian/config/deploy_param.json" @@ -321,7 +321,7 @@ function offline_upgrade() { check_cms_stat check_mem_avail check_upgrade_version - check_dbstore_client_compatibility + check_dbstor_client_compatibility call_each_pre_upgrade gen_upgrade_plan } @@ -332,7 +332,7 @@ function rollup_upgrade() { local_node_health_check cantian_database_health_status_check component_version_dependency_check - check_dbstore_client_compatibility + check_dbstor_client_compatibility call_each_pre_upgrade gen_upgrade_plan } diff --git a/pkg/deploy/action/rollback.sh b/pkg/deploy/action/rollback.sh index a9a08c929dad37fb682bd2220ad52f88c8c61699..a7a69b7dbdc6cee78e4a804c2b96f2d311386dab 100644 --- a/pkg/deploy/action/rollback.sh +++ b/pkg/deploy/action/rollback.sh @@ -76,6 +76,20 @@ function input_params_check() { fi fi fi + + # 若使用入湖,需校验so依赖文件路径进行文件拷贝 + if [[ -f /opt/software/tools/logicrep/start.success ]] || [[ -f /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success ]]; then + read -p "please input the so rely path of logicrep: " SO_PATH + if [ ! -d "${SO_PATH}" ]; then + logAndEchoInfo "pass upgrade mode check, current upgrade mode: ${UPGRADE_MODE}" + exit 1 + else + if [ -z "$(ls -A "${SO_PATH}")" ]; then + logAndEchoInfo "pass upgrade mode check, current upgrade mode: ${UPGRADE_MODE}" + exit 1 + fi + fi + fi logAndEchoInfo ">>>>> pass check input params <<<<<" } @@ -173,7 +187,7 @@ function stop_cantian() { logAndEchoInfo "stop cantian success" } -function install_dbstore(){ +function install_dbstor(){ local arrch=$(uname -p) local dbstor_path="${CURRENT_PATH}"/../repo local dbstor_package_file=$(ls "${dbstor_path}"/DBStor_Client*_"${arrch}"*.tgz) @@ -244,7 +258,7 @@ function install_rpm() tar -zxf ${RPM_UNPACK_PATH_FILE}/Cantian-RUN-CENTOS-64bit.tar.gz -C ${RPM_PACK_ORG_PATH} if [ x"${deploy_mode}" != x"file" ];then echo "start rollback rpm package" - install_dbstore + install_dbstor if [ $? -ne 0 ];then sh ${CURRENT_PATH}/uninstall.sh ${config_install_type} exit 1 @@ -437,6 +451,9 @@ function clear_tag_file() { rm -rf "${_file}" fi done + if [[ -f /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success ]]; then + rm -rf /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success + fi delete_fs_upgrade_file_or_path_by_dbstor call_ctback_tool.success delete_fs_upgrade_file_or_path_by_dbstor cantian_offline_upgrade_commit_${source_version}.success # 滚动升级场景进行离线回退,需要清理滚动升级相关文件 @@ -762,6 +779,10 @@ function offline_rollback() { start_cantian check_local_nodes fi + if [[ -f /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success ]]; then + echo "begin to start logicrep." + sh "/opt/cantian/action/logicrep/appctrl.sh startup" ${SO_PATH} + fi if [[ -f ${DR_DEPLOY_FLAG} ]];then local warning_msg="\tThe standby side needs to perform recover operation, otherwise there may be data inconsistency situations" echo -e "\033[5;31mWarning:\033[0m" diff --git a/pkg/deploy/action/storage_operate/create_file_system.py b/pkg/deploy/action/storage_operate/create_file_system.py index 8b2e3c82b5cc8fcfdbbcf1a9c350653dd9b55934..aadfca6afff68e4a92e87507716d5dff23c27f4f 100644 --- a/pkg/deploy/action/storage_operate/create_file_system.py +++ b/pkg/deploy/action/storage_operate/create_file_system.py @@ -23,13 +23,13 @@ DEPLOY_PARAM_PATH = str(pathlib.Path(CUR_PATH, "../../config/deploy_param.json") DEPLOY_PARAM = json.loads(read_helper(DEPLOY_PARAM_PATH)) DEPLOY_MODE = DEPLOY_PARAM.get("deploy_mode") FS_TYPE_LIST = [ - "storage_dbstore_fs", "storage_dbstore_page_fs", + "storage_dbstor_fs", "storage_dbstor_page_fs", "storage_share_fs", "storage_archive_fs", "storage_metadata_fs" ] if DEPLOY_MODE == "dbstor": FS_TYPE_LIST = [ - "storage_dbstore_fs", "storage_dbstore_page_fs", + "storage_dbstor_fs", "storage_dbstor_page_fs", "storage_share_fs", "storage_archive_fs" ] SHARE_FS_TYPE_LIST = [ @@ -38,14 +38,14 @@ SHARE_FS_TYPE_LIST = [ ] if DEPLOY_MODE == "file": FS_TYPE_LIST = [ - "storage_dbstore_fs","storage_share_fs", + "storage_dbstor_fs","storage_share_fs", "storage_archive_fs", "storage_metadata_fs" ] SHARE_FS_TYPE_LIST = [ - "storage_dbstore_fs", "storage_share_fs", + "storage_dbstor_fs", "storage_share_fs", "storage_archive_fs", "storage_metadata_fs" ] -ID_NAS_DBSTORE = 1038 +ID_NAS_DBSTOR = 1038 ID_NAS_DEFAULT = 11 @@ -191,7 +191,7 @@ class CreateFS(object): nfs_share_id = self._create_nfs_share(fs_id, fs_type) nfs_share_client_id = self._add_nfs_client(nfs_share_id, fs_type) else: - fs_id = self._create_fs(fs_type, work_load_type=ID_NAS_DBSTORE) + fs_id = self._create_fs(fs_type, work_load_type=ID_NAS_DBSTOR) fs_info[fs_type] = { "fs_id": fs_id, "nfs_share_id": nfs_share_id, @@ -245,13 +245,13 @@ class CreateFS(object): def _check_vstore_id(self): deploy_info = json.loads(read_helper(DEPLOY_PARAM_PATH)) - deploy_info_dbstore_fs_vstore_id = deploy_info.get("dbstore_fs_vstore_id") - fs_info_dbstore_fs_vstore_id = self.fs_info.get("storage_dbstore_fs").get("vstoreId") - if int(deploy_info_dbstore_fs_vstore_id) != int(fs_info_dbstore_fs_vstore_id): - err_msg = "dbstore_fs_vstore_id of config_params.json is " \ + deploy_info_dbstor_fs_vstore_id = deploy_info.get("dbstor_fs_vstore_id") + fs_info_dbstor_fs_vstore_id = self.fs_info.get("storage_dbstor_fs").get("vstoreId") + if int(deploy_info_dbstor_fs_vstore_id) != int(fs_info_dbstor_fs_vstore_id): + err_msg = "dbstor_fs_vstore_id of config_params.json is " \ "different from file_system_info.json,details:" \ - " dbstore_fs_vstore_id:(%s, %s)" % (fs_info_dbstore_fs_vstore_id, - deploy_info_dbstore_fs_vstore_id) + " dbstor_fs_vstore_id:(%s, %s)" % (fs_info_dbstor_fs_vstore_id, + deploy_info_dbstor_fs_vstore_id) LOG.error(err_msg) raise Exception(err_msg) @@ -388,7 +388,7 @@ class CreateFS(object): def _create_fs(self, fs_type, work_load_type): """ - param fs_type: storage_dbstore_fs、storage_share_fs、storage_archive_fs、storage_metadata_fs + param fs_type: storage_dbstor_fs、storage_share_fs、storage_archive_fs、storage_metadata_fs :return:the file system id """ data = self._get_fs_info(fs_type, work_load_type) @@ -398,7 +398,7 @@ class CreateFS(object): """ add nfs share :param fs_id: the file system id - :param fs_type: storage_dbstore_fs、storage_share_fs、storage_archive_fs、storage_metadata_fs + :param fs_type: storage_dbstor_fs、storage_share_fs、storage_archive_fs、storage_metadata_fs :return: nfs share id """ data = self._get_nfs_share_info(fs_type, fs_id) diff --git a/pkg/deploy/action/storage_operate/do_snapshot.py b/pkg/deploy/action/storage_operate/do_snapshot.py index cb9cbaa8bedc82c691c0628a9b0c8576207a537d..5560d1329110fe64943d90a46c27b7e40f7c11a2 100644 --- a/pkg/deploy/action/storage_operate/do_snapshot.py +++ b/pkg/deploy/action/storage_operate/do_snapshot.py @@ -46,8 +46,8 @@ class SnapShotRestClient(object): if not self.storage_operate.rest_client.token: self.storage_operate.login() config_params = json.loads(read_helper(DEPLOY_PARAM_PATH)) - storage_dbstore_page_fs = config_params.get("storage_dbstore_page_fs") - page_fs_info = self.storage_operate.query_filesystem_info(storage_dbstore_page_fs) + storage_dbstor_page_fs = config_params.get("storage_dbstor_page_fs") + page_fs_info = self.storage_operate.query_filesystem_info(storage_dbstor_page_fs) page_fs_id = page_fs_info.get("ID") dr_deploy_opt = DRDeployCommon(self.storage_operate) page_pair_info = dr_deploy_opt.query_remote_replication_pair_info(page_fs_id) @@ -157,13 +157,13 @@ def main(mode, ip_address, main_path): config_params = json.loads(read_helper(DEPLOY_PARAM_PATH)) vstore_id = config_params.get("vstore_id", 0) - dbstore_fs_vstore_id = config_params.get("dbstore_fs_vstore_id", 0) + dbstor_fs_vstore_id = config_params.get("dbstor_fs_vstore_id", 0) fs_names_type = [] for fs_type, fs_name in config_params.items(): if fs_type.endswith('_fs') and fs_type.startswith("storage") and fs_type == "storage_share_fs" and fs_name: fs_names_type.append((fs_name, fs_type, vstore_id)) - elif fs_type.endswith('_fs') and fs_type.startswith("storage") and fs_type == "storage_dbstore_fs" and fs_name: - fs_names_type.append((fs_name, fs_type, dbstore_fs_vstore_id)) + elif fs_type.endswith('_fs') and fs_type.startswith("storage") and fs_type == "storage_dbstor_fs" and fs_name: + fs_names_type.append((fs_name, fs_type, dbstor_fs_vstore_id)) elif fs_type.endswith('_fs') and fs_type.startswith("storage") and fs_name: fs_names_type.append((fs_name, fs_type, 0)) fs_names = [ diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py index 1feb94542954a57dc40dba01ae8dcdece3b3aed3..8f98ee285100a177a75f5e0db91829f4478aa3f4 100644 --- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py +++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy.py @@ -408,8 +408,8 @@ class DRDeploy(object): health_status = None running_status = None - remote_vstore_id = self.dr_deploy_info.get("remote_dbstore_fs_vstore_id") - local_vstore_id = self.dr_deploy_info.get("dbstore_fs_vstore_id") + remote_vstore_id = self.dr_deploy_info.get("remote_dbstor_fs_vstore_id") + local_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id") vstore_pair_id = self.dr_deploy_info.get("vstore_pair_id") domain_id = domain_info.get("ID") @@ -470,17 +470,17 @@ class DRDeploy(object): vstore_pair_id = vstore_pair_info.get("ID") hyper_domain_id = self.dr_deploy_info.get("hyper_domain_id") remote_pool_id = self.dr_deploy_info.get("remote_pool_id") - dbstore_fs_vstore_id = self.dr_deploy_info.get("dbstore_fs_vstore_id") + dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id") filesystem_pair_id = self.dr_deploy_info.get("ulog_fs_pair_id") - storage_dbstore_fs = self.dr_deploy_info.get("storage_dbstore_fs") - dbstore_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(storage_dbstore_fs, - dbstore_fs_vstore_id) - dbstore_fs_id = dbstore_fs_info.get("ID") + storage_dbstor_fs = self.dr_deploy_info.get("storage_dbstor_fs") + dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(storage_dbstor_fs, + dbstor_fs_vstore_id) + dbstor_fs_id = dbstor_fs_info.get("ID") if filesystem_pair_id is None: - filesystem_pair_infos = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstore_fs_id) + filesystem_pair_infos = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id) if filesystem_pair_infos is None: filesystem_pair_info = self.dr_deploy_opt.create_hyper_metro_filesystem_pair( - filesystem_id=dbstore_fs_id, pool_id=remote_pool_id, vstore_pair_id=vstore_pair_id) + filesystem_id=dbstor_fs_id, pool_id=remote_pool_id, vstore_pair_id=vstore_pair_id) task_id = filesystem_pair_info.get("taskId") self.record_deploy_process("create_metro_fs_pair", "running") self.dr_deploy_opt.query_omtask_process(task_id, timeout=120) @@ -489,7 +489,7 @@ class DRDeploy(object): pair_id=filesystem_pair_id) if filesystem_pair_info is None: filesystem_pair_info = self.dr_deploy_opt.create_hyper_metro_filesystem_pair( - filesystem_id=dbstore_fs_id, pool_id=remote_pool_id, vstore_pair_id=vstore_pair_id) + filesystem_id=dbstor_fs_id, pool_id=remote_pool_id, vstore_pair_id=vstore_pair_id) task_id = filesystem_pair_info.get("taskId") self.record_deploy_process("create_metro_fs_pair", "running") self.dr_deploy_opt.query_omtask_process(task_id, timeout=120) @@ -501,7 +501,7 @@ class DRDeploy(object): (exist_domain_id, hyper_domain_id, filesystem_pair_info) LOG.error(err_msg) raise Exception(err_msg) - filesystem_pair_infos = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstore_fs_id) + filesystem_pair_infos = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id) if len(filesystem_pair_infos) != 1: err_msg = "The metro filesystem pair create failed, Details: %s" % filesystem_pair_infos @@ -900,15 +900,15 @@ class DRDeploy(object): :param ulog_fs_pair_ready_flag: :return: """ - dbstore_fs_name = self.dr_deploy_info.get("storage_dbstore_fs") - dbstore_fs_vstore_id = self.dr_deploy_info.get("dbstore_fs_vstore_id") - dbstore_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info( - dbstore_fs_name, vstore_id=dbstore_fs_vstore_id) + dbstor_fs_name = self.dr_deploy_info.get("storage_dbstor_fs") + dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id") + dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info( + dbstor_fs_name, vstore_id=dbstor_fs_vstore_id) ulog_fs_pair_info = None - if dbstore_fs_info and not ulog_fs_pair_ready_flag: - dbstore_fs_id = dbstore_fs_info.get("ID") + if dbstor_fs_info and not ulog_fs_pair_ready_flag: + dbstor_fs_id = dbstor_fs_info.get("ID") try: - ulog_fs_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstore_fs_id) + ulog_fs_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id) except Exception as err: self.record_deploy_process("create_metro_fs_pair", "failed", code=-1, description=str(err)) @@ -934,7 +934,7 @@ class DRDeploy(object): if running_status == FilesystemPairRunningStatus.Normal \ and health_status == HealthStatus.Normal \ and sync_progress == "100": - LOG.info("Hyper metro filesystem[%s] pair ready", dbstore_fs_name) + LOG.info("Hyper metro filesystem[%s] pair ready", dbstor_fs_name) self.record_deploy_process("sync_metro_fs_pair", "success") ulog_fs_pair_ready_flag = True return ulog_fs_pair_info, ulog_fs_pair_ready_flag @@ -945,14 +945,14 @@ class DRDeploy(object): :param page_fs_pair_ready_flag: :return: """ - dbstore_page_fs_name = self.dr_deploy_info.get("storage_dbstore_page_fs") - dbstore_page_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info( - dbstore_page_fs_name) + dbstor_page_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs") + dbstor_page_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info( + dbstor_page_fs_name) page_fs_pair_info = None - if dbstore_page_fs_info and not page_fs_pair_ready_flag: + if dbstor_page_fs_info and not page_fs_pair_ready_flag: self.record_deploy_process("create_rep_page_fs_pair", "success") - dbstore_page_fs_id = dbstore_page_fs_info.get("ID") - page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(dbstore_page_fs_id) + dbstor_page_fs_id = dbstor_page_fs_info.get("ID") + page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(dbstor_page_fs_id) if page_fs_pair_info: # 当已经设置从端可读写状态,且为分裂状态时,表示当前同步完成 page_fs_pair_id = page_fs_pair_info[0].get("ID") @@ -964,7 +964,7 @@ class DRDeploy(object): replication_progress = remote_replication_pair_info.get("REPLICATIONPROGRESS") self.record_deploy_process("sync_rep_page_fs_pair", str(replication_progress) + "%") if secres_access == SecresAccess.ReadAndWrite and running_status == ReplicationRunningStatus.Split: - LOG.info("Remote replication pair[%s] ready.", dbstore_page_fs_name) + LOG.info("Remote replication pair[%s] ready.", dbstor_page_fs_name) self.record_deploy_process("sync_rep_page_fs_pair", "success") page_fs_pair_ready_flag = True return page_fs_pair_info, page_fs_pair_ready_flag @@ -1225,12 +1225,12 @@ class DRDeploy(object): self.do_lock_instance_for_backup() self.do_full_check_point() self.do_flush_table_with_read_lock() - dbstore_page_fs_name = self.dr_deploy_info.get("storage_dbstore_page_fs") + dbstor_page_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs") metadata_fs_name = self.dr_deploy_info.get("storage_metadata_fs") self.sync_speed = int(SPEED.get(self.dr_deploy_info.get("sync_speed", "medium"))) self.deploy_hyper_metro_pair() try: - self.page_fs_pair_id = self.deploy_remote_replication_pair(dbstore_page_fs_name, True) + self.page_fs_pair_id = self.deploy_remote_replication_pair(dbstor_page_fs_name, True) except Exception as err: self.record_deploy_process("create_rep_page_fs_pair", "failed", code=-1, description=str(err)) raise err diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py index 6b51a891e52dc0edd2a04348330d307a53daf7e3..f7129d0527ce6ef3fc46f894bae9a25e95ce83ed 100644 --- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py +++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_pre_check.py @@ -248,7 +248,7 @@ class DRDeployPreCheck(object): def check_standby_filesystem(self) -> list: """ 检查备端ulog文件系统所在租户下文件系统个数,要求为空。当前如果是主端直接返回 - 检查备站点dbstore page文件系统是否存在 + 检查备站点dbstor page文件系统是否存在 元数据非归一场景检查元数据文件系统是否存在 :return: """ @@ -261,35 +261,35 @@ class DRDeployPreCheck(object): LOG.info("Check standby filesystem nums start.") cantian_in_container = self.deploy_params.get("cantian_in_container") metadata_fs_name = self.local_conf_params.get("storage_metadata_fs") - dbstore_page_fs = self.local_conf_params.get("storage_dbstore_page_fs") + dbstor_page_fs = self.local_conf_params.get("storage_dbstor_page_fs") metadata_in_cantian = self.local_conf_params.get("mysql_metadata_in_cantian") name_suffix = self.local_conf_params.get("name_suffix", "") if name_suffix: - dbstore_page_fs = RepFileSystemNameRule.NamePrefix + dbstore_page_fs + name_suffix + dbstor_page_fs = RepFileSystemNameRule.NamePrefix + dbstor_page_fs + name_suffix if name_suffix and not metadata_in_cantian: metadata_fs_name = RepFileSystemNameRule.NamePrefix + metadata_fs_name + name_suffix - remote_fs_vstore_id = self.remote_conf_params.get("dbstore_fs_vstore_id") + remote_fs_vstore_id = self.remote_conf_params.get("dbstor_fs_vstore_id") if self.ulog_fs_pair_id is None: - dbstore_fs = self.local_conf_params.get("storage_dbstore_fs") + dbstor_fs = self.local_conf_params.get("storage_dbstor_fs") remote_ulog_fs_info = self.remote_operate.\ - query_remote_filesystem_info(fs_name=dbstore_fs, vstore_id=remote_fs_vstore_id) + query_remote_filesystem_info(fs_name=dbstor_fs, vstore_id=remote_fs_vstore_id) if remote_ulog_fs_info: - err_msg.append("Standby dbstore filesystem[%s] exist, filesystem id[%s]." % - (dbstore_fs, remote_ulog_fs_info.get("ID"))) + err_msg.append("Standby dbstor filesystem[%s] exist, filesystem id[%s]." % + (dbstor_fs, remote_ulog_fs_info.get("ID"))) else: if cantian_in_container == "0": err_msg.append("Standby vstore[%s] exist filesystems." % remote_fs_vstore_id) if self.page_fs_pair_id is None: - remote_dbstore_page_fs_info = self.remote_operate.\ - query_remote_filesystem_info(fs_name=dbstore_page_fs, vstore_id="0") - if remote_dbstore_page_fs_info: - err_msg.append("Standby dbstore page filesystem[%s] exist, filesystem id[%s]." % - (dbstore_page_fs, remote_dbstore_page_fs_info.get("ID"))) + remote_dbstor_page_fs_info = self.remote_operate.\ + query_remote_filesystem_info(fs_name=dbstor_page_fs, vstore_id="0") + if remote_dbstor_page_fs_info: + err_msg.append("Standby dbstor page filesystem[%s] exist, filesystem id[%s]." % + (dbstor_page_fs, remote_dbstor_page_fs_info.get("ID"))) else: if cantian_in_container == "0": - err_msg.append("Standby dbstore page filesystem[%s] exist." % dbstore_page_fs) + err_msg.append("Standby dbstor page filesystem[%s] exist." % dbstor_page_fs) if self.meta_fs_pair_id is None: remote_metadata_fs_info = self.remote_operate.\ @@ -336,18 +336,18 @@ class DRDeployPreCheck(object): if self.site == "standby": return err_msg LOG.info("Check disaster status start.") - dbstore_fs = self.local_conf_params.get("storage_dbstore_fs") - dbstore_fs_vstore_id = self.local_conf_params.get("dbstore_fs_vstore_id") - remote_dbstore_fs_vstore_id = self.local_conf_params.get("remote_dbstore_fs_vstore_id") - dbstore_page_fs = self.local_conf_params.get("storage_dbstore_page_fs") + dbstor_fs = self.local_conf_params.get("storage_dbstor_fs") + dbstor_fs_vstore_id = self.local_conf_params.get("dbstor_fs_vstore_id") + remote_dbstor_fs_vstore_id = self.local_conf_params.get("remote_dbstor_fs_vstore_id") + dbstor_page_fs = self.local_conf_params.get("storage_dbstor_page_fs") metadata_fs = self.local_conf_params.get("storage_metadata_fs") metadata_in_cantian = self.local_conf_params.get("mysql_metadata_in_cantian") - dbstore_fs_info = self.storage_opt.query_filesystem_info(dbstore_fs, dbstore_fs_vstore_id) - dbstore_fs_id = dbstore_fs_info.get("ID") - dbstore_page_fs_info = self.storage_opt.query_filesystem_info(dbstore_page_fs) + dbstor_fs_info = self.storage_opt.query_filesystem_info(dbstor_fs, dbstor_fs_vstore_id) + dbstor_fs_id = dbstor_fs_info.get("ID") + dbstor_page_fs_info = self.storage_opt.query_filesystem_info(dbstor_page_fs) metadata_fs_info = self.storage_opt.query_filesystem_info(metadata_fs) metadata_fs_id = metadata_fs_info.get("ID") - page_fs_id = dbstore_page_fs_info.get("ID") + page_fs_id = dbstor_page_fs_info.get("ID") domain_infos = self.deploy_operate.query_hyper_metro_domain_info() cantian_in_container = self.deploy_params.get("cantian_in_container") if cantian_in_container == "0": @@ -384,53 +384,53 @@ class DRDeployPreCheck(object): page_pair_info = self.deploy_operate.query_remote_replication_pair_info(page_fs_id) if page_pair_info: if cantian_in_container == "0": - err_msg.append("Filesystem[%s] replication pair is exist." % dbstore_page_fs) + err_msg.append("Filesystem[%s] replication pair is exist." % dbstor_page_fs) else: if len(page_pair_info) == 1 and page_pair_info[0].get("REMOTEDEVICEID") == self.remote_device_id: self.page_fs_pair_id = page_pair_info[0].get("ID") else: _err_msg = ("Filesystem[%s] replication pair is exist, but match failed, " - "details: %s") % (dbstore_page_fs, page_pair_info) + "details: %s") % (dbstor_page_fs, page_pair_info) err_msg.append(_err_msg) vstore_pair_infos = self.deploy_operate.query_hyper_metro_vstore_pair_info() for vstore_pair_info in vstore_pair_infos: exist_remote_vstoreid = vstore_pair_info.get("REMOTEVSTOREID") exist_local_vstoreid = vstore_pair_info.get("LOCALVSTOREID") - if exist_local_vstoreid == dbstore_fs_vstore_id and remote_dbstore_fs_vstore_id == exist_remote_vstoreid: + if exist_local_vstoreid == dbstor_fs_vstore_id and remote_dbstor_fs_vstore_id == exist_remote_vstoreid: if cantian_in_container == "0": - err_msg.append("Vstore[%s] metro pair is exist." % dbstore_fs_vstore_id) + err_msg.append("Vstore[%s] metro pair is exist." % dbstor_fs_vstore_id) else: if vstore_pair_info.get("DOMAINNAME") == domain_name: domain_id = vstore_pair_info.get("DOMAINID") if domain_id != self.hyper_domain_id: _err_msg = "Vstore[%s] metro pair is exist, " \ - "but domain id[%s] matching failed." % (dbstore_fs_vstore_id, domain_id) + "but domain id[%s] matching failed." % (dbstor_fs_vstore_id, domain_id) err_msg.append(_err_msg) self.vstore_pair_id = vstore_pair_info.get("ID") - LOG.info("Vstore[%s] metro pair is exist." % dbstore_fs_vstore_id) + LOG.info("Vstore[%s] metro pair is exist." % dbstor_fs_vstore_id) else: _err_msg = "Vstore[%s] metro pair is exist, " \ - "but domain name[%s] matching failed." % (dbstore_fs_vstore_id, domain_name) + "but domain name[%s] matching failed." % (dbstor_fs_vstore_id, domain_name) err_msg.append(_err_msg) break else: - system_count = self.remote_operate.query_remote_storage_vstore_filesystem_num(remote_dbstore_fs_vstore_id) + system_count = self.remote_operate.query_remote_storage_vstore_filesystem_num(remote_dbstor_fs_vstore_id) if system_count and system_count.get("COUNT") != "0": err_msg.append("Standby vstore[%s] exist filesystems, count[%s]" - % (remote_dbstore_fs_vstore_id, system_count.get("COUNT"))) + % (remote_dbstor_fs_vstore_id, system_count.get("COUNT"))) - ulog_pair_info = self.deploy_operate.query_hyper_metro_filesystem_pair_info(dbstore_fs_id) + ulog_pair_info = self.deploy_operate.query_hyper_metro_filesystem_pair_info(dbstor_fs_id) if ulog_pair_info: if cantian_in_container == "0": - err_msg.append("Filesystem[%s] metro pair is exist." % dbstore_fs) + err_msg.append("Filesystem[%s] metro pair is exist." % dbstor_fs) else: pair_info = ulog_pair_info[0] if pair_info.get("DOMAINNAME") == domain_name: self.ulog_fs_pair_id = pair_info.get("ID") - LOG.info("Filesystem[%s] metro pair is exist." % dbstore_fs) + LOG.info("Filesystem[%s] metro pair is exist." % dbstor_fs) else: _err_msg = "Filesystem[%s] metro pair is exist, " \ - "but domain name[%s] matching failed." % (dbstore_fs, domain_name) + "but domain name[%s] matching failed." % (dbstor_fs, domain_name) err_msg.append(_err_msg) deploy_mode = self.local_conf_params.get("deploy_mode") @@ -457,10 +457,10 @@ class DRDeployPreCheck(object): check_list = [ "cluster_id", "cluster_name", - "storage_dbstore_fs", - "storage_dbstore_page_fs", + "storage_dbstor_fs", + "storage_dbstor_page_fs", "mysql_metadata_in_cantian", - "dbstore_fs_vstore_id", + "dbstor_fs_vstore_id", "deploy_mode", ] if not os.path.exists(DEPLOY_PARAM_FILE): @@ -499,7 +499,7 @@ class DRDeployPreCheck(object): remote_dr_deploy_param = conf_params.get("dr_deploy").get(remote_site) remote_dr_deploy_param["domain_name"] = conf_params.get("dr_deploy").get("domain_name", "") remote_pool_id = conf_params.get("dr_deploy").get("standby").get("pool_id") - remote_dbstore_fs_vstore_id = conf_params.get("dr_deploy").get("standby").get("dbstore_fs_vstore_id") + remote_dbstor_fs_vstore_id = conf_params.get("dr_deploy").get("standby").get("dbstor_fs_vstore_id") name_suffix = conf_params.get("dr_deploy").get("standby").get("name_suffix", "") del conf_params["dr_deploy"] self.local_conf_params = copy.deepcopy(conf_params) @@ -507,7 +507,7 @@ class DRDeployPreCheck(object): if self.site == "active": self.local_conf_params.update({ "remote_pool_id": remote_pool_id, - "remote_dbstore_fs_vstore_id": remote_dbstore_fs_vstore_id, + "remote_dbstor_fs_vstore_id": remote_dbstor_fs_vstore_id, "name_suffix": name_suffix }) self.remote_conf_params = copy.deepcopy(conf_params) @@ -530,7 +530,7 @@ class DRDeployPreCheck(object): "remote_pool_id": self.local_conf_params.get("remote_pool_id"), "remote_cluster_name": self.local_conf_params.get("remote_cluster_name"), "remote_device_id": self.remote_device_id, - "remote_dbstore_fs_vstore_id": self.local_conf_params.get("remote_dbstore_fs_vstore_id"), + "remote_dbstor_fs_vstore_id": self.local_conf_params.get("remote_dbstor_fs_vstore_id"), "domain_name": self.local_conf_params.get("domain_name"), "hyper_domain_id": self.hyper_domain_id, "vstore_pair_id": self.vstore_pair_id, @@ -538,9 +538,9 @@ class DRDeployPreCheck(object): } name_suffix = self.local_conf_params.get("name_suffix") if name_suffix and self.site == "standby": - self.local_conf_params["storage_dbstore_page_fs"] = RepFileSystemNameRule.NamePrefix + \ + self.local_conf_params["storage_dbstor_page_fs"] = RepFileSystemNameRule.NamePrefix + \ self.local_conf_params[ - "storage_dbstore_page_fs"] + name_suffix + "storage_dbstor_page_fs"] + name_suffix if name_suffix and self.site == "standby" and not self.local_conf_params.get("mysql_metadata_in_cantian"): self.local_conf_params["mysql_metadata_in_cantian"] = RepFileSystemNameRule.NamePrefix + \ self.local_conf_params[ @@ -578,11 +578,11 @@ class DRDeployPreCheck(object): self.deploy_params = read_json_config(DEPLOY_PARAM_FILE) check_result.extend(self.check_master_cantian_status()) check_result.extend(self.check_file_system_status( - fs_name=self.local_conf_params.get("storage_dbstore_page_fs"), + fs_name=self.local_conf_params.get("storage_dbstor_page_fs"), vstore_id="0")) check_result.extend(self.check_file_system_status( - fs_name=self.local_conf_params.get("storage_dbstore_fs"), - vstore_id=self.local_conf_params.get("dbstore_fs_vstore_id"))) + fs_name=self.local_conf_params.get("storage_dbstor_fs"), + vstore_id=self.local_conf_params.get("dbstor_fs_vstore_id"))) check_result.extend(self.check_active_exist_params()) if not self.local_conf_params.get("mysql_metadata_in_cantian"): check_result.extend(self.check_file_system_status( @@ -647,16 +647,16 @@ class DRDeployPreCheck(object): if pre_install.check_main() == 1: check_result.append("Params check failed") conf_params = read_json_config(self.conf) - dbstore_fs_vstore_id = conf_params.get("dbstore_fs_vstore_id") - remote_dbstore_fs_vstore_id = conf_params.get("dr_deploy").get("standby").get("dbstore_fs_vstore_id") - if dbstore_fs_vstore_id != remote_dbstore_fs_vstore_id: - check_result.append("Inconsistent dbstor fs vstore id, %s and %s" % (dbstore_fs_vstore_id, - remote_dbstore_fs_vstore_id)) + dbstor_fs_vstore_id = conf_params.get("dbstor_fs_vstore_id") + remote_dbstor_fs_vstore_id = conf_params.get("dr_deploy").get("standby").get("dbstor_fs_vstore_id") + if dbstor_fs_vstore_id != remote_dbstor_fs_vstore_id: + check_result.append("Inconsistent dbstor fs vstore id, %s and %s" % (dbstor_fs_vstore_id, + remote_dbstor_fs_vstore_id)) return check_result try: - self.deploy_operate.storage_opt.query_vstore_info(dbstore_fs_vstore_id) + self.deploy_operate.storage_opt.query_vstore_info(dbstor_fs_vstore_id) except Exception as err: - check_result.append("Vstore[%s] is not exist, details: %s" % (dbstore_fs_vstore_id, str(err))) + check_result.append("Vstore[%s] is not exist, details: %s" % (dbstor_fs_vstore_id, str(err))) return check_result check_result.extend(self.check_nfs_lif_info()) LOG.info("Param check success") diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py index b9d47a31b40671fabb099c40ec7de26134fd5ea0..3d1b8547e83c8d37bd65220ad6b61c61decc9429 100644 --- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py +++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_progress_query.py @@ -164,11 +164,11 @@ class DrStatusCheck(object): return "Unknown" def query_page_fs_pair_status(self) -> str: - dbstore_page_fs_name = self.dr_deploy_info.get("storage_dbstore_page_fs") + dbstor_page_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs") try: - dbstore_page_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(dbstore_page_fs_name) - dbstore_page_fs_id = dbstore_page_fs_info.get("ID") - page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(dbstore_page_fs_id) + dbstor_page_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(dbstor_page_fs_name) + dbstor_page_fs_id = dbstor_page_fs_info.get("ID") + page_fs_pair_info = self.dr_deploy_opt.query_remote_replication_pair_info(dbstor_page_fs_id) if page_fs_pair_info: if page_fs_pair_info[0].get("HEALTHSTATUS") == HealthStatus.Normal: return "Normal" diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py index d54a007e4b4a9b73f33acf047ebfec90d27e4969..4b1ec60625aee9b4f9976fe955787ac545a46e84 100644 --- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py +++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_deploy_switchover.py @@ -23,7 +23,7 @@ CANTIAN_DISASTER_RECOVERY_STATUS_CHECK = 'echo -e "select DATABASE_ROLE from DV_ 'su -s /bin/bash - %s -c \'source ~/.bashrc && '\ 'export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && '\ 'python3 -B %s\'' % (RUN_USER, EXEC_SQL) -DBSTORE_CHECK_VERSION_FILE = "/opt/cantian/dbstor/tools/cs_baseline.sh" +DBSTOR_CHECK_VERSION_FILE = "/opt/cantian/dbstor/tools/cs_baseline.sh" def load_json_file(file_path): @@ -391,8 +391,8 @@ class DRRecover(SwitchOver): return LOG.info("Standby purge backup by cms command success.") - def do_dbstore_baseline(self): - cmd = "sh %s getbase %s" % (DBSTORE_CHECK_VERSION_FILE, self.cluster_name) + def do_dbstor_baseline(self): + cmd = "sh %s getbase %s" % (DBSTOR_CHECK_VERSION_FILE, self.cluster_name) LOG.info("begin to execute command[%s].", cmd) return_code, output, stderr = exec_popen(cmd, timeout=600) if return_code: @@ -492,7 +492,7 @@ class DRRecover(SwitchOver): err_msg = "standby cms res stop cantian error." LOG.error(err_msg) raise Exception(err_msg) - self.single_write = self.do_dbstore_baseline() + self.single_write = self.do_dbstor_baseline() self.dr_deploy_opt.change_fs_hyper_metro_domain_second_access( self.hyper_domain_id, DomainAccess.ReadOnly) try: diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py index 192416a902591359041afb63d9f5371157092685..1705c1ad1731075689684195e6515b05b6acc0ca 100644 --- a/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py +++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/dr_undeploy.py @@ -88,28 +88,28 @@ class UNDeploy(object): 删除双活pair 文件系统的pair id """ # 双活文件系统租户id - dbstore_fs_vstore_id = self.dr_deploy_info.get("dbstore_fs_vstore_id") - if not dbstore_fs_vstore_id: + dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id") + if not dbstor_fs_vstore_id: return # 双活文件系统名字 - storage_dbstore_fs = self.dr_deploy_info.get("storage_dbstore_fs") - if not storage_dbstore_fs: + storage_dbstor_fs = self.dr_deploy_info.get("storage_dbstor_fs") + if not storage_dbstor_fs: return - dbstore_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(storage_dbstore_fs, - dbstore_fs_vstore_id) - if not dbstore_fs_info: - LOG.info("Filesystem[%s] is not exist.", storage_dbstore_fs) + dbstor_fs_info = self.dr_deploy_opt.storage_opt.query_filesystem_info(storage_dbstor_fs, + dbstor_fs_vstore_id) + if not dbstor_fs_info: + LOG.info("Filesystem[%s] is not exist.", storage_dbstor_fs) return # 双活文件系统id - dbstore_fs_id = dbstore_fs_info.get("ID") + dbstor_fs_id = dbstor_fs_info.get("ID") # 通过双活文件系统id查询双活文件系统pair id - hyper_filesystem_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstore_fs_id) + hyper_filesystem_pair_info = self.dr_deploy_opt.query_hyper_metro_filesystem_pair_info(dbstor_fs_id) if hyper_filesystem_pair_info: hyper_filesystem_pair_id = hyper_filesystem_pair_info[0].get("ID") try: - self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(hyper_filesystem_pair_id, dbstore_fs_vstore_id) + self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(hyper_filesystem_pair_id, dbstor_fs_vstore_id) except Exception as err: - self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(hyper_filesystem_pair_id, dbstore_fs_vstore_id, + self.dr_deploy_opt.delete_hyper_metro_filesystem_pair(hyper_filesystem_pair_id, dbstor_fs_vstore_id, is_local_del=True) LOG.info("Delete Hyper Metro filesystem pair id[%s] success", hyper_filesystem_pair_id) LOG.info("Delete Hyper Metro filesystem pair id success") @@ -129,11 +129,11 @@ class UNDeploy(object): return False else: raise err - dbstore_fs_vstore_id = self.dr_deploy_info.get("dbstore_fs_vstore_id") - file_system_count = self.dr_deploy_opt.query_hyper_metro_filesystem_count_info(dbstore_fs_vstore_id) + dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id") + file_system_count = self.dr_deploy_opt.query_hyper_metro_filesystem_count_info(dbstor_fs_vstore_id) if file_system_count and file_system_count.get("COUNT") != "0": msg = "Delete Hyper Metro pair id[id:%s], " \ - "but there are also other pair file systems" % dbstore_fs_vstore_id + "but there are also other pair file systems" % dbstor_fs_vstore_id LOG.info(msg) return False try: @@ -286,15 +286,15 @@ class UNDeploy(object): LOG.info("Stop Cantian engine success.") if node_id == "0": LOG.info("Start to delete dr deploy!") - rep_fs_name = self.dr_deploy_info.get("storage_dbstore_page_fs") + rep_fs_name = self.dr_deploy_info.get("storage_dbstor_page_fs") mysql_metadata_in_cantian = self.dr_deploy_info.get("mysql_metadata_in_cantian") metadata_fs = self.dr_deploy_info.get("storage_metadata_fs") self.delete_replication() self.delete_filesystem(vstore_id="0", fs_name=rep_fs_name) if not mysql_metadata_in_cantian: self.delete_filesystem(vstore_id="0", fs_name=metadata_fs) - fs_name = self.dr_deploy_info.get("storage_dbstore_fs") - dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstore_fs_vstore_id") + fs_name = self.dr_deploy_info.get("storage_dbstor_fs") + dbstor_fs_vstore_id = self.dr_deploy_info.get("dbstor_fs_vstore_id") self.delete_hyper() try: self.delete_filesystem(dbstor_fs_vstore_id, fs_name) diff --git a/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py b/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py index 6d0fea3dc05f0b4c8d46fe3e5f3f149588afe767..e4a9473f03891c8157eb34dda1f02e74e6f45dbc 100644 --- a/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py +++ b/pkg/deploy/action/storage_operate/dr_deploy_operate/update_dr_params.py @@ -20,13 +20,13 @@ class UpdateDRParams(object): def __init__(self): self.deploy_params = read_json_config(DEPLOY_PARAM_FILE) - self.storage_dbstore_page_fs = self.deploy_params.get("storage_dbstore_page_fs") - self.storage_dbstore_fs = self.deploy_params.get("storage_dbstore_fs") + self.storage_dbstor_page_fs = self.deploy_params.get("storage_dbstor_page_fs") + self.storage_dbstor_fs = self.deploy_params.get("storage_dbstor_fs") self.storage_metadata_fs = self.deploy_params.get("storage_metadata_fs") self.storage_share_fs = self.deploy_params.get("storage_share_fs") self.cluster_name = self.deploy_params.get("cluster_name") self.mysql_metadata_in_cantian = self.deploy_params.get("mysql_metadata_in_cantian") - self.dbstore_fs_vstore_id = self.deploy_params.get("dbstore_fs_vstore_id") + self.dbstor_fs_vstore_id = self.deploy_params.get("dbstor_fs_vstore_id") self.deploy_mode = self.deploy_params.get("deploy_mode") @staticmethod @@ -41,8 +41,8 @@ class UpdateDRParams(object): def copy_dr_deploy_param_file(self): """ - 处理 dbstore_unify 模式下的 dr_deploy_param.json 文件读取逻辑, - 如果不是 dbstore_unify 模式,则从共享路径中读取文件。 + 处理 dbstor 模式下的 dr_deploy_param.json 文件读取逻辑, + 如果不是 dbstor 模式,则从共享路径中读取文件。 :return: dr_deploy_param_file 的路径 """ if self.deploy_mode == "dbstor": @@ -80,7 +80,7 @@ class UpdateDRParams(object): LOG.error(err_msg) raise Exception(err_msg) else: - # 处理非 dbstore_unify 模式的逻辑 + # 处理非 dbstor 模式的逻辑 share_path = f"/mnt/dbdata/remote/metadata_{self.storage_metadata_fs}" dr_deploy_param_file = os.path.join(share_path, "dr_deploy_param.json") diff --git a/pkg/deploy/action/storage_operate/dr_k8s_switch.py b/pkg/deploy/action/storage_operate/dr_k8s_switch.py index cea762859bcac03e824ac36d0a939cf7d8ac8126..5a22cdb4328c06d0caa0e91a05c7ac182ea5111a 100644 --- a/pkg/deploy/action/storage_operate/dr_k8s_switch.py +++ b/pkg/deploy/action/storage_operate/dr_k8s_switch.py @@ -9,6 +9,7 @@ import sys import time import logging import traceback +import collections from datetime import datetime import yaml @@ -31,9 +32,12 @@ CANTIAN_DATABASE_ROLE_CHECK = ("echo -e 'select DATABASE_ROLE from DV_LRPL_DETAI "export LD_LIBRARY_PATH=/opt/cantian/dbstor/lib:${LD_LIBRARY_PATH} && " "python3 -B %s'") -DBSTORE_CHECK_VERSION_FILE = "/opt/cantian/dbstor/tools/cs_baseline.sh" +DBSTOR_CHECK_VERSION_FILE = "/opt/cantian/dbstor/tools/cs_baseline.sh" DEPLOY_LOG_FILE = "/opt/cantian/log/deploy/deploy.log" +KUBECTL_TRY_COUNT = 5 +CHECK_DEL_TRY_COUNT = 30 + class LogGer: def __init__(self, name, file_name): @@ -188,20 +192,21 @@ def check_cantian_yaml_config(value, cantian_path): with open(cantian_path, 'r') as f: configs = yaml.safe_load_all(f) for config in configs: - if config.get("kind") == "Deployment": - cantian_flag = True - if "namespace" not in value: - value["namespace"] = config.get("metadata").get("namespace") - value["pod_name"].append(config.get("metadata").get("name").strip()) - volumes = config.get("spec").get("template").get("spec").get("volumes") - for volume in volumes: - if volume.get("name") == "config-volume": - data = { - "name": volume.get("configMap").get("name").strip(), - "pod_name": config.get("metadata").get("name").strip() - } - value["config_map"].append(data) - break + if config.get("kind") != "Deployment": + continue + cantian_flag = True + if "namespace" not in value: + value["namespace"] = config.get("metadata").get("namespace") + value["pod_name"].append(config.get("metadata").get("name").strip()) + volumes = config.get("spec").get("template").get("spec").get("volumes") + for volume in volumes: + if volume.get("name") == "config-volume": + data = { + "name": volume.get("configMap").get("name").strip(), + "pod_name": config.get("metadata").get("name").strip() + } + value["config_map"].append(data) + break return cantian_flag @@ -222,16 +227,20 @@ class K8sDRContainer: self.ulog_pair_list = [] self.ssh_cmd_end = " ; echo last_cmd=$?" self.vstore_pair_list = [] - self.single_pod = {} + self.single_pod = collections.defaultdict(list) self.abnormal_pod = {} self.check_flag = True - self.action_list = ["delete", "switch_over", "fail_over", "recover"] + self.action_list = ["delete", "switch_over", "fail_over", "recover", "unconnected_switch_delete", + "unconnected_fail_over", "unconnected_switch", "unconnected_recover", + "unconnected_check","unconnected_delete"] self.ip_info = "" self.config_count = 0 self.ssh_expect = "]# " self.change_apply = False self.dm_logic_ip = [] self.skip_login = False + self.unconnected_ip = [] + self.is_unconnected_flag = False def warning_tip(self): warning_msgs = { @@ -255,7 +264,24 @@ class K8sDRContainer: "\tAfter this operation,\n" "\tplease ensure that the original active cluster is not accessed for write operations,\n" "\totherwise it will cause data inconsistency.\n", - "delete": "\tDeletion operation will delete the all Cantian nodes under hyper metro domain.\n" + "delete": "\tDeletion operation will delete the all Cantian nodes under hyper metro domain.\n", + "unconnected_switch_delete": "\tDeletion operation will delete the all Cantian nodes under hyper " + "metro domain.\n", + "unconnected_delete": "\tDeletion operation will delete the all Cantian nodes under hyper metro domain.\n", + "unconnected_switch": "\tSwitchover operation will be performed.\n" + "\tThe current operation will cause the active-standby switch,\n" + "\tplease make sure the standby data is consistent with the main data,\n" + "\tif the data is not consistent, " + "the execution of the switch operation may cause data loss,\n" + "\tplease make sure the standby and DeviceManager are in good condition, " + "if not, the new active will hang after switch over.\n" + "\tAfter the command is executed, check the replay status on the standby " + "side to determine if the active-standby switch was successful.\n", + "unconnected_recover": "\tRecover operation will downgrade current station to standby,\n" + "\tsynchronize data from remote to local, and cover local data.\n" + "\tEnsure remote data consistency to avoid data loss.\n", + "unconnected_check": "\tThis operation will check query single_even write tags.\n", + "unconnected_fail_over": "\tThis operation will check pods' stat\n" } if self.action in warning_msgs: warning("Warning:") @@ -301,7 +327,7 @@ class K8sDRContainer: def get_self_ip_info(self): cmd = "hostname -I" - ret_code, ret, stderr = exec_popen(cmd, 20) + ret_code, ret, _ = exec_popen(cmd, 20) if ret_code: err_msg = f"Failed to get ip info for {cmd}" LOG.error(err_msg) @@ -316,32 +342,32 @@ class K8sDRContainer: with open(config_path, 'r') as f: configs = yaml.safe_load_all(f) for config in configs: - if config.get("kind") == "ConfigMap": - config_flag = True - deploy_param = json.loads(config.get("data").get("deploy_param.json")) - domain_name = deploy_param.get("dr_deploy").get("domain_name") - if domain_name != self.domain_name: - err_msg = f"Domain name is not match, server ip[{ip}], config path[{config_yaml}]" - LOG.error(err_msg) - return False - if not self.check_info_contain_logic_ip(deploy_param.get("storage_vlan_ip")): - LOG.error(f"This configuration file[{config_yaml}] " - f"does not belong to the current DM IP address") - return False - if "config_map" in value: - for configMap in value["config_map"]: - if configMap.get("name") == config.get("metadata").get("name").strip(): - configMap["node_id"] = deploy_param.get("node_id") - break - if "run_user" in value: - continue - value["storage_dbstore_fs"] = deploy_param.get("storage_dbstore_fs") - value["run_user"] = deploy_param.get("deploy_user").strip().split(":")[0] - value["cluster_name"] = deploy_param.get("cluster_name").strip() - value["storage_dbstore_page_fs"] = deploy_param.get("storage_dbstore_page_fs") - value["dbstore_fs_vstore_id"] = deploy_param.get("dbstore_fs_vstore_id") - value["storage_metadata_fs"] = deploy_param.get("storage_metadata_fs", "") - value["cluster_id"] = deploy_param.get("cluster_id") + if config.get("kind") != "ConfigMap": + continue + config_flag = True + deploy_param = json.loads(config.get("data").get("deploy_param.json")) + domain_name = deploy_param.get("dr_deploy").get("domain_name") + if domain_name != self.domain_name: + LOG.error(f"Domain name is not match, server ip[{ip}], config path[{config_yaml}]") + return False + if not self.check_info_contain_logic_ip(deploy_param.get("storage_vlan_ip")): + LOG.error(f"This configuration file[{config_yaml}] " + f"does not belong to the current DM IP address") + return False + configMaps = value.get("config_map", []) + for configMap in configMaps: + if configMap.get("name") == config.get("metadata").get("name").strip(): + configMap["node_id"] = deploy_param.get("node_id") + break + if "run_user" in value: + continue + value["storage_dbstor_fs"] = deploy_param.get("storage_dbstor_fs") + value["run_user"] = deploy_param.get("deploy_user").strip().split(":")[0] + value["cluster_name"] = deploy_param.get("cluster_name").strip() + value["storage_dbstor_page_fs"] = deploy_param.get("storage_dbstor_page_fs") + value["dbstor_fs_vstore_id"] = deploy_param.get("dbstor_fs_vstore_id") + value["storage_metadata_fs"] = deploy_param.get("storage_metadata_fs", "") + value["cluster_id"] = deploy_param.get("cluster_id") return config_flag def check_k8s_config(self, ip, index, dir_path): @@ -409,7 +435,7 @@ class K8sDRContainer: return False return True - def pre_check_link(self): + def pre_check_dir(self): if not os.path.exists(self.server_key_file): err_msg = f"Server key file {self.server_key_file} does not exist" LOG.error(err_msg) @@ -418,20 +444,28 @@ class K8sDRContainer: server_path = os.path.join(CURRENT_PATH, "server") if not os.path.exists(server_path): - os.makedirs(server_path) - config_index = 0 + os.makedirs(server_path) if not remove_dir(server_path): err_msg = f"Failed to remove {server_path}." LOG.error(err_msg) raise Exception(err_msg) + + def pre_check_link(self): + config_index = 0 + server_path = os.path.join(CURRENT_PATH, "server") for ip in self.server_info: if ip in self.ip_info: ssh_client = None islocal = True else: - ssh_client = SshClient(ip, self.server_user, private_key_file=self.server_key_file) - ssh_client.create_client() + ssh_client = SshClient(ip, self.server_user, private_key_file=self.server_key_file) islocal = False + try: + ssh_client.create_client() + except: + LOG.error(f"Failed to create ssh client for {ip}") + self.unconnected_ip.append(ip) + continue self.config_count += len(self.server_info[ip]) for index, value in enumerate(self.server_info[ip]): dir_path = os.path.join(server_path, str(config_index)) @@ -473,13 +507,48 @@ class K8sDRContainer: ssh_client.close_client() LOG.info("pre_check_link finish") + def user_confirm_twice(self, warning_info): + if self.unconnected_ip: + warning(f"Existing unconnected ip: {self.unconnected_ip}, are you sure continue manually?") + if confirm(): + warning(f"Please upload the same package to the node {self.unconnected_ip} and decompress it.\n" + "Then modify the configuration file k8s_dr_config.json.\n" + "Input {warning_info}.") + self.is_unconnected_flag = False + if not confirm(): + raise Exception("User abort the program.") + else: + raise Exception("User abort the program.") + + def pre_check_unconnected_confirm(self): + if self.unconnected_ip: + self.is_unconnected_flag = True + for ip in self.unconnected_ip: + del self.server_info[ip] + if self.action in ["delete", "switch_over", "unconnected_switch_delete","unconnected_delete" + "fail_over", "unconnected_fail_over"]: + warning(f"Existing unconnected ip: {self.unconnected_ip}, are you sure continue manually?") + if confirm(): + warning(f"Please upload the same package to the node {self.unconnected_ip} and decompress it.\n" + "Then modify the configuration file k8s_dr_config.json.\n") + if self.action in ["switch_over", "unconnected_switch_delete"]: + warning("Input 'python3 dr_k8s_switch.py unconnected_switch_delete'.") + if self.action in ["delete", "unconnected_delete"]: + warning("Input 'python3 dr_k8s_switch.py unconnected_delete'.") + if self.action in ["fail_over", "unconnected_fail_over"]: + warning("Input 'python3 dr_k8s_switch.py unconnected_fail_over'.") + if not confirm(): + self.check_flag = False + else: + self.check_flag = False + def init_dr_option(self): ping_cmd = f"ping -c 1 -i 1 {self.dm_ip}" - code, out, err = exec_popen(ping_cmd, timeout=20) + code, _, _ = exec_popen(ping_cmd, timeout=20) if code: err_msg = f"Fail to ping DM ip[{self.dm_ip}], maybe DM is fault." warning(err_msg) - if self.action != "delete": + if self.action not in ["delete", "unconnected_delete"]: raise Exception(err_msg) if confirm(): self.skip_login = True @@ -490,7 +559,7 @@ class K8sDRContainer: try: self.storage_opt.login() except Exception as e: - if self.action != "delete": + if self.action not in ["delete", "unconnected_delete"]: raise e if "user name or password is incorrect" in str(e) or "verification code is not entered" in str(e): raise e @@ -518,7 +587,8 @@ class K8sDRContainer: return False def check_hyper_metro_domain_role(self, role): - primary_action = ["switch_over", "delete"] + primary_action = ["switch_over", "delete", "unconnected_delete", "unconnected_switch", + "unconnected_switch_delete"] if self.action in primary_action and role != ConfigRole.Primary: LOG.error(f"Current action[{self.action}] not supporting operations on the standby side") self.check_flag = False @@ -548,11 +618,10 @@ class K8sDRContainer: self.ulog_pair_list.append(pair) LOG.info("get_ulog_pair_info_list finish") - def match_config_and_pair_with_fs_name(self, ip, index, ulog_pair_info): + def match_config_and_pair_with_fs_name(self, value, ulog_pair_info): log_fs_name = ulog_pair_info.get("LOCALOBJNAME").strip() vstore_id = ulog_pair_info.get("vstoreId").strip() - value = self.server_info[ip][index] - if value.get("storage_dbstore_fs") == log_fs_name and value.get("dbstore_fs_vstore_id") == vstore_id: + if value.get("storage_dbstor_fs") == log_fs_name and value.get("dbstor_fs_vstore_id") == vstore_id: value["log_fs_id"] = ulog_pair_info.get("LOCALOBJID") value["log_pair_id"] = ulog_pair_info.get("ID") return True @@ -560,18 +629,19 @@ class K8sDRContainer: def check_and_match_ulog_page_info(self): LOG.info("begin to check_and_match_ulog_page_info") - filter_server_info = {} + filter_server_info = collections.defaultdict(list) + ulog_pair_list = [] for ulog_pair_info in self.ulog_pair_list: for ip in self.server_info: for index, value in enumerate(self.server_info[ip]): if ip in filter_server_info and index in filter_server_info[ip]: continue - if self.match_config_and_pair_with_fs_name(ip, index, ulog_pair_info): - if ip in filter_server_info: - filter_server_info[ip].append(index) - else: - filter_server_info[ip] = [index] + if self.match_config_and_pair_with_fs_name(value, ulog_pair_info): + filter_server_info[ip].append(index) + ulog_pair_list.append(ulog_pair_info) break + if self.is_unconnected_flag == True: + self.ulog_pair_list = ulog_pair_list LOG.info("check_and_match_ulog_page_info finish") def check_hyper_metro_stat(self): @@ -589,7 +659,9 @@ class K8sDRContainer: self.get_self_ip_info() self.init_dr_option() self.get_dm_logic_ip() + self.pre_check_dir() self.pre_check_link() + self.pre_check_unconnected_confirm() self.check_flag_stat() if self.skip_login: return @@ -631,16 +703,10 @@ class K8sDRContainer: def get_pod_list(self, ssh_client, namespace, islocal=False): count = 0 - while True: + while count <= KUBECTL_TRY_COUNT: cmd = f"kubectl get pod -n {namespace}" res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) - if not flag: - if count == 3: - err_msg = f"Failed to get the pod list more than 3 times." - if not islocal: - err_msg = f"Failed to get the pod list more than 3 times, server ip[{ssh_client.ip}]." - LOG.error(err_msg) - return [] + if not flag: time.sleep(5) count += 1 continue @@ -649,6 +715,12 @@ class K8sDRContainer: if "NAME" in i: get_flag = True return res if get_flag else [] + if count == KUBECTL_TRY_COUNT: + err_msg = f"Failed to get the pod list more than 3 times." + if not islocal: + err_msg = f"Failed to get the pod list more than 3 times, server ip[{ssh_client.ip}]." + LOG.error(err_msg) + return [] def get_pod_name_list_by_stat(self, ssh_client, namespace, pod_list, stat="default", islocal=False): pod_name_list = [] @@ -685,7 +757,7 @@ class K8sDRContainer: cantian_yaml = value.get("cantian_yaml") config_yaml = value.get("config_yaml") cmd = f"kubectl delete -f {cantian_yaml} -f {config_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to delete pod, IP[{ip}] cantian path[{cantian_yaml}] config path[{config_yaml}]." LOG.error(err_msg) @@ -711,7 +783,7 @@ class K8sDRContainer: cantian_yaml = value.get("cantian_yaml", "") config_yaml = value.get("config_yaml", "") cmd = f"kubectl apply -f {config_yaml} -f {cantian_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to apply pod, IP[{ip}] config path[{config_yaml}] cantian path[{cantian_yaml}]" LOG.error(err_msg) @@ -725,12 +797,12 @@ class K8sDRContainer: ssh_client.create_client() islocal = False cmd = f"kubectl apply -f {config_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to apply pod ,path[{config_yaml}]" LOG.error(err_msg) cmd = f"kubectl apply -f {cantian_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to apply pod ,path[{cantian_yaml}]" LOG.error(err_msg) @@ -746,7 +818,7 @@ class K8sDRContainer: while True: if not config_del: cmd = f"kubectl delete -f {config_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to delete pod, path[{config_yaml}]" LOG.error(err_msg) @@ -754,7 +826,7 @@ class K8sDRContainer: config_del = True if not cantian_del: cmd = f"kubectl delete -f {cantian_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to delete pod, path[{cantian_yaml}]" LOG.error(err_msg) @@ -762,7 +834,7 @@ class K8sDRContainer: cantian_del = True if config_del and cantian_del: break - if count == 5: + if count == KUBECTL_TRY_COUNT: LOG.error(f"ip[{ip}] delete pod err, please check.") count += 1 @@ -784,7 +856,7 @@ class K8sDRContainer: value["dst_config_yaml"] = source_config_yml if not config_apply: cmd = f"kubectl apply -f {source_config_yml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to apply pod, path[{source_config_yml}]" LOG.error(err_msg) @@ -792,7 +864,7 @@ class K8sDRContainer: config_apply = True if not cantian_apply: cmd = f"kubectl apply -f {source_cantian_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to apply pod, path[{source_cantian_yaml}]" LOG.error(err_msg) @@ -805,7 +877,7 @@ class K8sDRContainer: config_flag = True if not config_apply: cmd = f"kubectl apply -f {dst_config_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to apply pod, path[{dst_config_yaml}]" LOG.error(err_msg) @@ -817,7 +889,7 @@ class K8sDRContainer: config_flag = True if not cantian_apply: cmd = f"kubectl apply -f {dst_cantian_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = f"Failed to apply pod, path[{dst_cantian_yaml}]" LOG.error(err_msg) @@ -825,7 +897,7 @@ class K8sDRContainer: cantian_apply = True if config_apply and cantian_apply: break - if count == 5: + if count == KUBECTL_TRY_COUNT: LOG.error(f"ip[{ip}] copy file and apply err, please check.") raise Exception("copy_file or apply error") count += 1 @@ -856,11 +928,10 @@ class K8sDRContainer: pod_name_list, islocal=islocal) if len(pod_list) == 0: break - else: - if count == 20: - exist_pod = True - break - count += 1 + if count == CHECK_DEL_TRY_COUNT: + exist_pod = True + break + count += 1 time.sleep(3) run_time += 3 if ssh_client is not None: @@ -953,9 +1024,9 @@ class K8sDRContainer: warning_flag = False for ip in self.server_info: for value in self.server_info[ip]: - pod_name_list = value.get("pod_name") + pod_name_list = value.get("pod_name", []) namespace = value.get("namespace") - abnormal_pod_list = value.get("abnormal_pods") + abnormal_pod_list = value.get("abnormal_pods", []) if len(abnormal_pod_list) == len(pod_name_list): err_msg = (f"IP[{ip}], namespace[{namespace}], " f"pod name[{pod_name_list}], all pods Abnormal status") @@ -1004,7 +1075,7 @@ class K8sDRContainer: def check_replication_filesystem_pair_stat(self): for ip in self.server_info: for value in self.server_info[ip]: - page_fs_info = self.storage_opt.query_filesystem_info(value.get("storage_dbstore_page_fs")) + page_fs_info = self.storage_opt.query_filesystem_info(value.get("storage_dbstor_page_fs")) page_pair_info = self.dr_option.query_remote_replication_pair_info(page_fs_info.get("ID"))[0] page_pair_id = page_pair_info.get("ID") value["page_pair_id"] = page_pair_id @@ -1071,7 +1142,7 @@ class K8sDRContainer: def switch_replication_pair_role(self): for ip in self.server_info: for value in self.server_info[ip]: - page_fs_info = self.storage_opt.query_filesystem_info(value.get("storage_dbstore_page_fs")) + page_fs_info = self.storage_opt.query_filesystem_info(value.get("storage_dbstor_page_fs")) page_pair_info = self.dr_option.query_remote_replication_pair_info(page_fs_info.get("ID"))[0] page_role = page_pair_info.get("ISPRIMARY") pair_id = page_pair_info.get("ID") @@ -1169,7 +1240,7 @@ class K8sDRContainer: raise Exception(err_msg) def get_single_write_flag(self, ssh_client, pod_name, namespace, cluster_name, islocal=False, timeout=60): - get_cmd = "sh %s getbase %s" % (DBSTORE_CHECK_VERSION_FILE, cluster_name) + get_cmd = "sh %s getbase %s" % (DBSTOR_CHECK_VERSION_FILE, cluster_name) cmd = f"single=$(kubectl exec -it {pod_name} -n {namespace} -- {get_cmd}); echo single=$single" try: ret_err = "" @@ -1218,7 +1289,7 @@ class K8sDRContainer: time.sleep(5) for pod_name in self.get_pod_name_list_by_stat(ssh_client, namespace, pod_name_list, stat="running", islocal=islocal): - ret, check_flag = self.pod_exe_cmd(pod_name, namespace, check_cmd, ssh_client, islocal=islocal) + _, check_flag = self.pod_exe_cmd(pod_name, namespace, check_cmd, ssh_client, islocal=islocal) if not check_flag: continue else: @@ -1236,7 +1307,7 @@ class K8sDRContainer: ssh_client = SshClient(ip, self.server_user, private_key_file=self.server_key_file) ssh_client.create_client() islocal = False - for index, value in enumerate(self.server_info[ip]): + for index, _ in enumerate(self.server_info[ip]): self.do_check_pod_dbstor_init(ip, ssh_client, index, islocal=islocal) if ssh_client is not None: ssh_client.close_client() @@ -1268,6 +1339,7 @@ class K8sDRContainer: finally: self.exe_func(self.del_pods_with_change_file) LOG.info("delete pods with change config finish") + self.user_confirm_twice("'python3 dr_k8s_switch.py unconnected_check' and wait for the indication") self.dr_option.change_fs_hyper_metro_domain_second_access(self.domain_id, DomainAccess.ReadOnly) try: self.dr_option.join_fs_hyper_metro_domain(self.domain_id) @@ -1279,6 +1351,7 @@ class K8sDRContainer: LOG.info("The current hyper_metro_status running_status is not Split.") return self.query_sync_status() + self.user_confirm_twice("'python3 dr_k8s_switch.py unconnected_recover'") LOG.info("switch hyper metro domain with recover finish.") def wait_remote_replication_pair_sync(self, pair_id): @@ -1310,20 +1383,15 @@ class K8sDRContainer: single_config = get_json_config(self.single_file_path) for ip in single_config: for value in single_config[ip]: - page_fs_info = self.storage_opt.query_filesystem_info(value.get("storage_dbstore_page_fs")) + page_fs_info = self.storage_opt.query_filesystem_info(value.get("storage_dbstor_page_fs")) page_pair_info = self.dr_option.query_remote_replication_pair_info(page_fs_info.get("ID"))[0] page_role = page_pair_info.get("ISPRIMARY") running_status = page_pair_info.get("RUNNINGSTATUS") pair_id = page_pair_info.get("ID") if page_role == "true": - if ip not in self.single_pod: - if value["single"] == "1": - self.single_pod[ip] = [value] - self.del_pod(ip, value["cantian_yaml"], ) - else: - if value["single"] == "1": - self.single_pod[ip].append(value) - self.del_pod(ip, value["cantian_yaml"], ) + if value["single"] == "1": + self.single_pod[ip].append(value) + self.del_pod(ip, value["cantian_yaml"], ) self.dr_option.swap_role_replication_pair(pair_id) self.dr_option.remote_replication_filesystem_pair_set_secondary_write_lock(pair_id) self.execute_replication_steps(running_status, value, pair_id=pair_id) @@ -1353,7 +1421,7 @@ class K8sDRContainer: os.remove(config_yaml) else: cmd = f"rm -rf {cantian_yaml} {config_yaml}" - res, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) + _, flag = self.ssh_exec_cmd(ssh_client, cmd, timeout=10, islocal=islocal) if not flag: err_msg = (f"Failed to delete config file, IP[{ip}] cantian path[{cantian_yaml}] " f"config path[{config_yaml}].]") @@ -1396,12 +1464,21 @@ class K8sDRContainer: LOG.info(f"Ip[{ip}] namespace[{namespace}] pod[{pod_name}] begin to backup pod logs.") cmd = (f"sh /ctdb/cantian_install/cantian_connector/action/docker/log_backup.sh " f"{cluster_name} {cluster_id} {node_id} {run_user} {storage_metadata_fs}") - ret, flag = self.pod_exe_cmd(pod_name, namespace, cmd, ssh_client, islocal=islocal) + _, flag = self.pod_exe_cmd(pod_name, namespace, cmd, ssh_client, islocal=islocal) if not flag: LOG.error(f"IP[{ip}] namespace[{namespace}] " f"pod_name[{pod_name}] back pod logs failed.") LOG.info(f"Ip[{ip}] namespace[{namespace}] backup_pod_log finish.") + def delete_pods(self): + self.exe_func(self.backup_pod_log) + self.exe_func(self.del_all_pod) + LOG.info("active pods delete finish.") + if not self.check_pod_del(): + LOG.error("active pods delete failed.") + return False + return True + def exe_func(self, func): ssh_client = None try: @@ -1427,16 +1504,13 @@ class K8sDRContainer: if not self.check_abnormal_pod_stat(): LOG.info("active pods stat abnormal, exit.") return - self.exe_func(self.backup_pod_log) - self.exe_func(self.del_all_pod) - LOG.info("active pods delete finish.") - if not self.check_pod_del(): - LOG.error("active pods delete failed.") - return + self.delete_pods() self.switch_hyper_metro_domain_role() self.switch_replication_pair_role() self.exe_func(self.apply_pods) LOG.info("apply pods finish") + if self.unconnected_ip: + warning(f"Go to the node {self.unconnected_ip} and input 'python3 dr_k8s_switch.py unconnected_switch'.") self.check_pod_start() def fail_over(self): @@ -1448,6 +1522,11 @@ class K8sDRContainer: self.exe_func(self.check_database_role) LOG.info("check database role finish.") + def unconnected_fail_over(self): + self.exe_func(self.check_pod_stat) + if not self.check_abnormal_pod_stat(): + warning("pod stat is abnormal, back to the orginal node and input 'no'.") + def recover(self): try: self.switch_hyper_metro_domain_role_recover() @@ -1464,13 +1543,60 @@ class K8sDRContainer: self.exe_func(self.delete_config_file) def delete(self): - self.exe_func(self.backup_pod_log) - self.exe_func(self.del_all_pod) - LOG.info("active pods delete finish.") - if not self.check_pod_del(): - LOG.error("active pods delete failed.") + if self.delete_pods(): + LOG.info("active pods delete success.") + + def unconnected_delete(self): + if self.delete_pods(): + LOG.info("active pods delete success.") + + def unconnected_switch_delete(self): + if not self.check_abnormal_pod_stat(): + LOG.info("active pods stat abnormal, exit.") return - LOG.info("active pods delete success.") + self.delete_pods() + warning("Script complete, back to the original node and wait for the next indication.") + + def unconnected_switch(self): + self.switch_replication_pair_role() + self.exe_func(self.apply_pods) + LOG.info("apply pods finish") + if self.unconnected_ip: + warning(f"Go to the node {self.unconnected_ip} and input 'python3 dr_k8s_switch.py unconnected_switch'.") + self.check_pod_start() + warning("Script complete, succeed to switch_over in this unconnected node.") + + def unconnected_check(self): + self.exe_func(self.change_config_and_apply_pod) + LOG.info("apply pods with change config finish") + self.change_apply = True + try: + self.check_dbstor_init() + except Exception as e: + LOG.error(f"Check dbstor init fialed, detail: {e}") + raise e + finally: + self.exe_func(self.del_pods_with_change_file) + LOG.info("delete pods with change config finish") + self.user_confirm_twice("'python3 dr_k8s_switch.py unconnected_check' and wait for the indication") + warning("Script complete, back to the original node and wait for the next indication.") + + def unconnected_recover(self): + self.user_confirm_twice("'python3 dr_k8s_switch.py unconnected_recover'") + try: + self.switch_replication_pair_role_recover() + self.exe_func(self.apply_pods) + LOG.info("apply pods finish") + self.check_pod_start() + self.exe_func(self.ctbackup_purge_log) + LOG.info("ctbackup_purge_log finish.") + except Exception as e: + LOG.error(f"Cannot apply pods and ctbackup purge log, detail: {e}") + raise e + finally: + if self.change_apply: + self.exe_func(self.delete_config_file) + warning("Script complete, succeed to recover in the unconnected node.") def check_flag_stat(self): if not self.check_flag: @@ -1487,14 +1613,18 @@ class K8sDRContainer: return if not self.warning_tip(): return + if "unconnected" in self.action: + self.is_unconnected_flag = True self.init_k8s_config() self.check_flag_stat() try: self.pre_check() - try: + try: getattr(self, self.action) except AttributeError as _err: - err_msg = "The supported types of operations include[fail_over, recover, switch_over, delete]" + err_msg = "The supported types of operations include [delete, switch_over, fail_over, recover,\n" + "unconnected_switch_delete, unconnected_fail_over, unconnected_switch, unconnected_recover, + "unconnected_check, unconnected_delete]\n" raise Exception(err_msg) from _err getattr(self, self.action)() except Exception as e: diff --git a/pkg/deploy/action/storage_operate/split_dbstore_fs.py b/pkg/deploy/action/storage_operate/split_dbstore_fs.py index ee13a24d25c1de0d66e5a607d22e947484b85fdb..1a32c4435bcc5d6bbd5f43fa8604b779ef9ebf6c 100644 --- a/pkg/deploy/action/storage_operate/split_dbstore_fs.py +++ b/pkg/deploy/action/storage_operate/split_dbstore_fs.py @@ -20,8 +20,8 @@ class StorageFileSystemSplit(StorageInf): def __init__(self, ip, user_name, passwd, config): super(StorageFileSystemSplit, self).__init__((ip, user_name, passwd)) self.config_info = json.loads(read_helper(config)) - self.storage_dbstore_fs = self.config_info.get("storage_dbstore_fs") - self.storage_dbstore_page_fs = self.config_info.get("storage_dbstore_page_fs") + self.storage_dbstor_fs = self.config_info.get("storage_dbstor_fs") + self.storage_dbstor_page_fs = self.config_info.get("storage_dbstor_page_fs") self.metadata_logic_ip = self.config_info.get("metadata_logic_ip") self.namespace = self.config_info.get("cluster_name") self.vstore_id = 0 @@ -42,7 +42,7 @@ class StorageFileSystemSplit(StorageInf): @staticmethod def _remove_mount_file_path(fs_name): """ - 删除dbstore文件系统挂载目录 + 删除dbstor文件系统挂载目录 :param fs_name: 文件系统名称 :return: """ @@ -63,23 +63,23 @@ class StorageFileSystemSplit(StorageInf): 4、取消挂载 :return: """ - LOGGER.info("Start to tailor dbstore fs.") + LOGGER.info("Start to tailor dbstor fs.") # sleep 避免nfs server 繁忙报错 time.sleep(2) - self.mount_file_system(self.storage_dbstore_fs, self.metadata_logic_ip) + self.mount_file_system(self.storage_dbstor_fs, self.metadata_logic_ip) # sleep 避免nfs server 繁忙报错 time.sleep(2) - self.mount_file_system(self.storage_dbstore_page_fs, self.metadata_logic_ip) + self.mount_file_system(self.storage_dbstor_page_fs, self.metadata_logic_ip) self.tailor_log_file_system() self.tailor_page_file_system() time.sleep(2) self._change_group_recursive() - self.umount_file_system(self.storage_dbstore_fs) + self.umount_file_system(self.storage_dbstor_fs) time.sleep(2) - self.umount_file_system(self.storage_dbstore_page_fs) - self._remove_mount_file_path(self.storage_dbstore_fs) - self._remove_mount_file_path(self.storage_dbstore_page_fs) - LOGGER.info("Success to tailor dbstore fs.") + self.umount_file_system(self.storage_dbstor_page_fs) + self._remove_mount_file_path(self.storage_dbstor_fs) + self._remove_mount_file_path(self.storage_dbstor_page_fs) + LOGGER.info("Success to tailor dbstor fs.") def tailor_log_file_system(self): """ @@ -89,7 +89,7 @@ class StorageFileSystemSplit(StorageInf): 2、将ulog_root_dir内部目录上移一层,然后删除ulog_root_di :return: """ - log_namespace_path = f"/mnt/dbdata/remote/{self.storage_dbstore_fs}/{self.namespace}" + log_namespace_path = f"/mnt/dbdata/remote/{self.storage_dbstor_fs}/{self.namespace}" page_pool_root_dir = os.path.join(log_namespace_path, "page_pool_root_dir") ulog_root_dir = os.path.join(log_namespace_path, "ulog_root_dir") if os.path.exists(page_pool_root_dir): @@ -107,7 +107,7 @@ class StorageFileSystemSplit(StorageInf): 3、将page_pool_root_dir内的目录上移后,删除page_pool_root_dir :return: """ - page_namespace_path = f"/mnt/dbdata/remote/{self.storage_dbstore_page_fs}/{self.namespace}" + page_namespace_path = f"/mnt/dbdata/remote/{self.storage_dbstor_page_fs}/{self.namespace}" namespace_file = os.path.join(page_namespace_path, self.namespace) page_pool_root_dir = os.path.join(page_namespace_path, "page_pool_root_dir") ulog_root_dir = os.path.join(page_namespace_path, "ulog_root_dir") @@ -149,38 +149,38 @@ class StorageFileSystemSplit(StorageInf): share_id = self.create_nfs_share(data) return share_id - def clear_dbstore_nfs_share(self, fs_id, clone_fs_id): + def clear_dbstor_nfs_share(self, fs_id, clone_fs_id): """ 删除文件系统共享 :param fs_id:文件系统id :param clone_fs_id:克隆文件系统ID :return: """ - LOGGER.info("Begin to clear dbstore nfs share.fs_name:[%s], clone_fs_name:[%s]", self.storage_dbstore_fs, - self.storage_dbstore_page_fs) + LOGGER.info("Begin to clear dbstor nfs share.fs_name:[%s], clone_fs_name:[%s]", self.storage_dbstor_fs, + self.storage_dbstor_page_fs) for _id in [fs_id, clone_fs_id]: share_info = self.query_nfs_info(_id, vstore_id=self.vstore_id) if share_info: _share_id = share_info[0].get("ID") self.delete_nfs_share(_share_id, vstore_id=self.vstore_id) - LOGGER.info("Success to clear dbstore nfs share.") + LOGGER.info("Success to clear dbstor nfs share.") def pre_upgrade(self): - LOGGER.info("Begin to check dbstore page fs info") - page_file_system_info = self.query_filesystem_info(self.storage_dbstore_page_fs, vstore_id=self.vstore_id) + LOGGER.info("Begin to check dbstor page fs info") + page_file_system_info = self.query_filesystem_info(self.storage_dbstor_page_fs, vstore_id=self.vstore_id) if page_file_system_info: - err_msg = "File system [%s] is exist." % self.storage_dbstore_page_fs + err_msg = "File system [%s] is exist." % self.storage_dbstor_page_fs LOGGER.error(err_msg) raise Exception(err_msg) - LOGGER.info("Success to check dbstore page fs info") + LOGGER.info("Success to check dbstor page fs info") def upgrade(self): """ - 分裂dbstore文件系统 + 分裂dbstor文件系统 steps: 1、登录DM - 2、查询storage_dbstore_fs信息 - 3、查询storage_dbstore_page_fs信息,不存在执行步骤4,存在判断:正在分裂执行步骤6,没有分裂执行步骤5 + 2、查询storage_dbstor_fs信息 + 3、查询storage_dbstor_page_fs信息,不存在执行步骤4,存在判断:正在分裂执行步骤6,没有分裂执行步骤5 4、克隆文件系统 5、分裂文件系统 6、查询分裂进度 @@ -190,12 +190,12 @@ class StorageFileSystemSplit(StorageInf): 10、删除共享 :return: """ - fs_info = self.query_filesystem_info(self.storage_dbstore_fs, vstore_id=self.vstore_id) + fs_info = self.query_filesystem_info(self.storage_dbstor_fs, vstore_id=self.vstore_id) fs_id = fs_info.get("ID") - clone_fs_info = self.query_filesystem_info(self.storage_dbstore_page_fs, vstore_id=self.vstore_id) + clone_fs_info = self.query_filesystem_info(self.storage_dbstor_page_fs, vstore_id=self.vstore_id) if not clone_fs_info: - clone_fs_info = self.create_clone_file_system(fs_id, self.storage_dbstore_page_fs, vstore_id=self.vstore_id) + clone_fs_info = self.create_clone_file_system(fs_id, self.storage_dbstor_page_fs, vstore_id=self.vstore_id) clone_fs_id = clone_fs_info.get("ID") split_status = clone_fs_info.get("SPLITSTATUS") @@ -204,19 +204,19 @@ class StorageFileSystemSplit(StorageInf): if int(split_status) == 1 and split_enable == "false": self.split_clone_file_system(clone_fs_id) - self.query_split_clone_file_system_process(self.storage_dbstore_page_fs, vstore_id=self.vstore_id) + self.query_split_clone_file_system_process(self.storage_dbstor_page_fs, vstore_id=self.vstore_id) - for _fs_id, _fs_name in [(fs_id, self.storage_dbstore_fs), - (clone_fs_id, self.storage_dbstore_page_fs)]: + for _fs_id, _fs_name in [(fs_id, self.storage_dbstor_fs), + (clone_fs_id, self.storage_dbstor_page_fs)]: _share_id = self.create_clone_share(_fs_id, _fs_name) self.add_clone_share_client(_share_id) self.tailor_file_system() - self.clear_dbstore_nfs_share(_fs_id, clone_fs_id) + self.clear_dbstor_nfs_share(_fs_id, clone_fs_id) def rollback(self): - page_fs_info = self.query_filesystem_info(self.storage_dbstore_page_fs, vstore_id=self.vstore_id) + page_fs_info = self.query_filesystem_info(self.storage_dbstor_page_fs, vstore_id=self.vstore_id) if not page_fs_info: return file_system_id = page_fs_info.get("ID") @@ -230,14 +230,14 @@ class StorageFileSystemSplit(StorageInf): for i in env_conf: cantian_user = i.split("=")[1] if "cantian_user" in i else cantian_user cantian_group = i.split("=")[1] if "cantian_group" in i else cantian_group - dbstore_fs_path = f"/mnt/dbdata/remote/{self.storage_dbstore_fs}/{self.namespace}" - dbstore_page_fs_path = f"/mnt/dbdata/remote/{self.storage_dbstore_page_fs}/{self.namespace}" - LOGGER.info("Start change owner of %s and %s", dbstore_fs_path, dbstore_page_fs_path) - cmd = f"chown -hR {cantian_user}:{cantian_group} {dbstore_fs_path} &&" \ - f" chown -hR {cantian_user}:{cantian_group} {dbstore_page_fs_path}" + dbstor_fs_path = f"/mnt/dbdata/remote/{self.storage_dbstor_fs}/{self.namespace}" + dbstor_page_fs_path = f"/mnt/dbdata/remote/{self.storage_dbstor_page_fs}/{self.namespace}" + LOGGER.info("Start change owner of %s and %s", dbstor_fs_path, dbstor_page_fs_path) + cmd = f"chown -hR {cantian_user}:{cantian_group} {dbstor_fs_path} &&" \ + f" chown -hR {cantian_user}:{cantian_group} {dbstor_page_fs_path}" return_code, _, stderr = exec_popen(cmd) if return_code: - err_msg = f"Failed chown {dbstore_fs_path} {dbstore_page_fs_path}, details:{stderr}" + err_msg = f"Failed chown {dbstor_fs_path} {dbstor_page_fs_path}, details:{stderr}" LOGGER.info(err_msg) diff --git a/pkg/deploy/action/uninstall.sh b/pkg/deploy/action/uninstall.sh index 2b34f0d2571a72429da5b21243014fa109f10fd0..7437c0697e5ded39c898d579793d70b4474d15eb 100644 --- a/pkg/deploy/action/uninstall.sh +++ b/pkg/deploy/action/uninstall.sh @@ -13,7 +13,7 @@ deploy_group=$(python3 ${CURRENT_PATH}/get_config_info.py "deploy_group") cantian_in_container=$(python3 ${CURRENT_PATH}/get_config_info.py "cantian_in_container") # 获取已创建路径的路径名 -storage_dbstore_fs=$(python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstore_fs") +storage_dbstor_fs=$(python3 ${CURRENT_PATH}/get_config_info.py "storage_dbstor_fs") storage_share_fs=$(python3 ${CURRENT_PATH}/get_config_info.py "storage_share_fs") storage_archive_fs=$(python3 ${CURRENT_PATH}/get_config_info.py "storage_archive_fs") storage_metadata_fs=$(python3 ${CURRENT_PATH}/get_config_info.py "storage_metadata_fs") @@ -104,6 +104,9 @@ function umount_fs() { if [[ ${storage_archive_fs} != '' ]] && [[ -d /mnt/dbdata/remote/archive_"${storage_archive_fs}"/binlog ]] && [[ "${node_id}" == "0" ]]; then rm -rf /mnt/dbdata/remote/archive_"${storage_archive_fs}"/binlog + if [[ -f /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success ]]; then + rm -rf /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success + fi fi if [[ ${storage_archive_fs} != '' ]] && [[ -d /mnt/dbdata/remote/archive_"${storage_archive_fs}"/logicrep_conf ]] && [[ "${node_id}" == "0" ]]; then rm -rf /mnt/dbdata/remote/archive_"${storage_archive_fs}"/logicrep_conf @@ -115,10 +118,10 @@ function umount_fs() { umount -f -l /mnt/dbdata/remote/share_${storage_share_fs} > /dev/null 2>&1 umount -f -l /mnt/dbdata/remote/archive_${storage_archive_fs} > /dev/null 2>&1 umount -f -l /mnt/dbdata/remote/metadata_${storage_metadata_fs} > /dev/null 2>&1 - umount -f -l /mnt/dbdata/remote/storage_${storage_dbstore_fs} > /dev/null 2>&1 + umount -f -l /mnt/dbdata/remote/storage_${storage_dbstor_fs} > /dev/null 2>&1 rm -rf /mnt/dbdata/remote/archive_${storage_archive_fs} > /dev/null 2>&1 - rm -rf /mnt/dbdata/remote/storage_${storage_dbstore_fs}/data > /dev/null 2>&1 + rm -rf /mnt/dbdata/remote/storage_${storage_dbstor_fs}/data > /dev/null 2>&1 rm -rf /mnt/dbdata/remote/share_${storage_share_fs} > /dev/null 2>&1 rm -rf /mnt/dbdata/remote/metadata_${storage_metadata_fs} > /dev/null 2>&1 } diff --git a/pkg/deploy/action/update_config.py b/pkg/deploy/action/update_config.py index 832e70c5c74a29d30573fa57ecb0dc7e7e26eca3..4672d1c375be7181a4cf9c6c965be1d34f939e4a 100644 --- a/pkg/deploy/action/update_config.py +++ b/pkg/deploy/action/update_config.py @@ -116,7 +116,7 @@ def write_config_file(file_path, content): file_obj.write(json.dumps(content)) -def update_dbstore_conf(action, key, value=None): +def update_dbstor_conf(action, key, value=None): file_list = [ "/mnt/dbdata/local/cantian/tmp/data/dbstor/conf/dbs/dbstor_config.ini", "/opt/cantian/dbstor/conf/dbs/dbstor_config.ini", @@ -129,8 +129,8 @@ def update_dbstore_conf(action, key, value=None): "/opt/cantian/cms/dbstor/conf/dbs/dbstor_config_tool_3.ini", "/opt/cantian/cms/dbstor/conf/dbs/dbstor_config.ini" ] - opt_dbstore_config = "/opt/cantian/dbstor/tools/dbstor_config.ini" - file_list.append(opt_dbstore_config) + opt_dbstor_config = "/opt/cantian/dbstor/tools/dbstor_config.ini" + file_list.append(opt_dbstor_config) for file_path in file_list: if not os.path.exists(file_path): continue @@ -255,7 +255,7 @@ def main(): key = args.key value = args.value func_dict = { - "dbstor": update_dbstore_conf, + "dbstor": update_dbstor_conf, "cantian": update_cantian_conf, "cantian_ini": update_cantian_ini_conf, "cms": update_cms_conf, diff --git a/pkg/deploy/action/upgrade.sh b/pkg/deploy/action/upgrade.sh index 1076cbb5f3b7ca4bac3c54b12ae40c00dfd0fd97..a4507cc2748ab4a1d5c94a33105437634c592f34 100644 --- a/pkg/deploy/action/upgrade.sh +++ b/pkg/deploy/action/upgrade.sh @@ -98,7 +98,8 @@ function input_params_check() { fi # 若使用入湖,需校验so依赖文件路径进行文件拷贝 - if [[ -f /opt/software/tools/logicrep/start.success ]]; then + if [[ -f /opt/software/tools/logicrep/start.success ]] || [[ -f /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success ]]; then + touch /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success read -p "please input the so rely path of logicrep: " SO_PATH if [ ! -d "${SO_PATH}" ]; then logAndEchoInfo "pass upgrade mode check, current upgrade mode: ${UPGRADE_MODE}" @@ -259,15 +260,15 @@ function creat_snapshot() { ####################################################################################### ## 文件系统拆分 ####################################################################################### -function split_dbstore_file_system() { - logAndEchoInfo "Begin to split dbstore file system." - echo -e "${DORADO_IP}\n${dorado_user}\n${dorado_pwd}\n" | python3 "${CURRENT_PATH}"/storage_operate/split_dbstore_fs.py "upgrade" "${CURRENT_PATH}"/../config/deploy_param.json +function check_dbstor_client_compatibility() { + logAndEchoInfo "Begin to split dbstor file system." + echo -e "${DORADO_IP}\n${dorado_user}\n${dorado_pwd}\n" | python3 "${CURRENT_PATH}"/storage_operate/split_dbstor_fs.py "upgrade" "${CURRENT_PATH}"/../config/deploy_param.json if [ $? -ne 0 ]; then - logAndEchoError "Split dbstore file system failed, details see /opt/cantian/ct_om/log/om_deploy.log" + logAndEchoError "Split dbstor file system failed, details see /opt/cantian/ct_om/log/om_deploy.log" exit 1 fi - logAndEchoInfo "Split dbstore file system success" + logAndEchoInfo "Split dbstor file system success" } function migrate_file_system() { @@ -334,7 +335,7 @@ function update_config() { } -function install_dbstore(){ +function install_dbstor(){ local arrch=$(uname -p) local dbstor_path="${CURRENT_PATH}"/../repo local dbstor_package_file=$(ls "${dbstor_path}"/DBStor_Client*_"${arrch}"*.tgz) @@ -404,7 +405,7 @@ function install_rpm() tar -zxf ${RPM_UNPACK_PATH_FILE}/Cantian-RUN-CENTOS-64bit.tar.gz -C ${RPM_PACK_ORG_PATH} if [ x"${deploy_mode}" != x"file" ];then echo "start replace rpm package" - install_dbstore + install_dbstor if [ $? -ne 0 ];then sh ${CURRENT_PATH}/uninstall.sh ${config_install_type} exit 1 @@ -1055,8 +1056,8 @@ function show_cantian_version() { function main() { logAndEchoInfo ">>>>> begin to upgrade, current upgrade mode: ${UPGRADE_MODE} <<<<<" - input_params_check get_mnt_dir_name + input_params_check get_config_info rpm_check diff --git a/pkg/deploy/action/upgrade_commit.sh b/pkg/deploy/action/upgrade_commit.sh index be9d0bebef7f212b13e67d93e8d00dc6666d82ed..17f7e27ac9acca17dba55db7eeb8fee8f52b0a59 100644 --- a/pkg/deploy/action/upgrade_commit.sh +++ b/pkg/deploy/action/upgrade_commit.sh @@ -14,14 +14,14 @@ cluster_status_flag="" business_code_backup_path="" cluster_commit_flag="" modify_sys_table_success_flag="" -DEPLOY_MODE_DBSTORE_UNIFY_FLAG=/opt/cantian/log/deploy/.dbstor_unify_flag +DEPLOY_MODE_DBSTOR_UNIFY_FLAG=/opt/cantian/log/deploy/.dbstor_unify_flag source "${CURRENT_PATH}"/log4sh.sh source ${CURRENT_PATH}/docker/dbstor_tool_opt_common.sh source "${CURRENT_PATH}"/env.sh -if [ -f DEPLOY_MODE_DBSTORE_UNIFY_FLAG ]; then +if [ -f DEPLOY_MODE_DBSTOR_UNIFY_FLAG ]; then CLUSTER_COMMIT_STATUS=("prepared" "commit") else CLUSTER_COMMIT_STATUS=("rollup" "prepared" "commit") @@ -225,6 +225,9 @@ function clear_upgrade_residual_data() { if [[ -n $(ls "${upgrade_path}"/upgrade_node*."${target_version}") ]];then rm -f "${upgrade_path}"/upgrade_node*."${target_version}" fi + if [[ -f /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success ]]; then + rm -rf /mnt/dbdata/remote/archive_"${storage_archive_fs}"/start.success + fi delete_fs_upgrade_file_or_path_by_dbstor call_ctback_tool.success delete_fs_upgrade_file_or_path_by_dbstor cluster_and_node_status delete_fs_upgrade_file_or_path_by_dbstor updatesys.* diff --git a/pkg/deploy/action/utils/config/file_system_info.json b/pkg/deploy/action/utils/config/file_system_info.json index 3f74ec61ec1264cd2ef9426f73de1bb4a0590ad2..140493f78ad8d4d8ef5e905702e636de2bff47ff 100644 --- a/pkg/deploy/action/utils/config/file_system_info.json +++ b/pkg/deploy/action/utils/config/file_system_info.json @@ -1,13 +1,13 @@ { "PARENTID": 0, "client_ip": "127.0.0.1/24", - "storage_dbstore_fs": { + "storage_dbstor_fs": { "vstoreId": 0, "SNAPSHOTRESERVEPER": 20, "CAPACITYTHRESHOLD": 90, "CAPACITY": "300GB" }, - "storage_dbstore_page_fs": { + "storage_dbstor_page_fs": { "vstoreId": 0, "SNAPSHOTRESERVEPER": 20, "CAPACITYTHRESHOLD": 90, diff --git a/pkg/deploy/single_options/install_config.json b/pkg/deploy/single_options/install_config.json index b2978cf161f55031debd337d318293d5fbe870bf..a1f484b63e634eda3d0d997b1f8e906f97765121 100644 --- a/pkg/deploy/single_options/install_config.json +++ b/pkg/deploy/single_options/install_config.json @@ -4,7 +4,7 @@ "D_DATA_PATH": "/mnt/dbdata/local/cantian/tmp/data", "l_LOG_FILE": "/opt/cantian/log/cantian/cantian_deploy.log", "M_RUNING_MODE": "cantiand_with_mysql_in_cluster", - "USE_DBSTORE": "--dbstor", + "USE_DBSTOR": "--dbstor", "p_PACKAGE_AND_VERSION": "-P", "g_INSTALL_USER_PRIVILEGE": "", "Z_KERNEL_PARAMETER1": "CHECKPOINT_PERIOD=1", diff --git a/pkg/install/funclib.py b/pkg/install/funclib.py index 6cefd50c10d4d45bd7a040ae88beb4ca897382eb..2dfde827b6cb60fcd49db9d72ec4865691bffe11 100644 --- a/pkg/install/funclib.py +++ b/pkg/install/funclib.py @@ -200,7 +200,7 @@ class DefaultConfigValue(object): "CLUSTER_DATABASE": "TRUE", "_DOUBLEWRITE": "FALSE", "TEMP_BUFFER_SIZE": "1G", - "DATA_BUFFER_SIZE": "8G", + "DATA_BUFFER_SIZE": "1G", "SHARED_POOL_SIZE": "1G", "LOG_BUFFER_COUNT": 16, "LOG_BUFFER_SIZE": "64M", diff --git a/pkg/install/install.py b/pkg/install/install.py index 69c455de68caec36bfb9992d7ed5289c373810ab..3dbe6c14e44af684d99a0e745b763052b23388d8 100644 --- a/pkg/install/install.py +++ b/pkg/install/install.py @@ -2521,7 +2521,7 @@ class Installer: raise ValueError("The content got from pipe not find passwd.") self.verify_new_passwd(g_opts.db_passwd, 8) - def copy_dbstore_path(self): + def copy_dbstor_path(self): str_cmd = "" if g_opts.use_dbstor: os.makedirs("%s/dbstor/conf/dbs" % self.data, CommonValue.KEY_DIRECTORY_PERMISSION) @@ -2570,7 +2570,7 @@ class Installer: cantian_pkg_file, cantian_pkg_file, cantian_pkg_file, self.installPath)) - str_cmd += self.copy_dbstore_path() + str_cmd += self.copy_dbstor_path() str_cmd += " && rm -rf %s" % cantian_pkg_file log("Decompress cmd: " + str_cmd) ret_code, _, stderr = _exec_popen(str_cmd) @@ -2724,7 +2724,7 @@ class Installer: if g_opts.install_user_privilege == "withoutroot": cmd = "%s/bin/ctencrypt -e PBKDF2" % self.installPath else: - cmd = (""" su - '%s' -c "%s/bin/ctencrypt -e PBKDF2" """ + cmd = (""" su - '%s' -c "source ~/.bashrc && %s/bin/ctencrypt -e PBKDF2" """ % (self.user, self.installPath)) g_opts.db_passwd = g_opts.db_passwd if len(plain_passwd.strip()) == 0 else plain_passwd.strip() values = [g_opts.db_passwd, g_opts.db_passwd] @@ -2943,7 +2943,7 @@ class Installer: cmd = "%s/bin/ctencrypt -r -d '%s'" % (self.installPath, self.data) else: - cmd = (""" su - '%s' -c "%s/bin/ctencrypt -r -d '%s'" """ + cmd = (""" su - '%s' -c "source ~/.bashrc && %s/bin/ctencrypt -r -d '%s'" """ % (self.user, self.installPath, self.data)) ret_code, stdout, stderr = _exec_popen(cmd, [passwd, passwd]) if ret_code: @@ -3441,12 +3441,12 @@ class Installer: cmd, [g_opts.db_passwd]) else: if self.enableSysdbaLogin: - cmd = ("su - '%s' -c \"%s/bin/ctsql / as sysdba " + cmd = ("su - '%s' -c \"source ~/.bashrc && %s/bin/ctsql / as sysdba " "-q -D %s -f \"%s\" \"" % (self.user, self.installPath, self.data, sql_file)) return_code, stdout_data, stderr_data = _exec_popen(cmd) else: - cmd = ("su - '%s' -c \"%s/bin/ctsql %s@%s:%s -q -f \"%s\"\"" % ( + cmd = ("su - '%s' -c \"source ~/.bashrc && %s/bin/ctsql %s@%s:%s -q -f \"%s\"\"" % ( self.user, self.installPath, g_opts.db_user, @@ -3519,12 +3519,12 @@ class Installer: cmd, [g_opts.db_passwd]) else: if self.enableSysdbaLogin: - cmd = ("su - '%s' -c \"%s/bin/ctsql / as sysdba " + cmd = ("su - '%s' -c \"source ~/.bashrc && %s/bin/ctsql / as sysdba " "-q -D %s -c \\\"%s\\\" \"" % (self.user, self.installPath, self.data, sql)) return_code, stdout_data, stderr_data = _exec_popen(cmd) else: - cmd = ("su - '%s' -c \"%s/bin/ctsql %s@%s:%s -q" + cmd = ("su - '%s' -c \"source ~/.bashrc && %s/bin/ctsql %s@%s:%s -q" " -c \\\"%s\\\"\"" % (self.user, self.installPath, g_opts.db_user, @@ -3697,7 +3697,7 @@ class Installer: if g_opts.install_user_privilege == "withoutroot": cmd = "%s/bin/ctencrypt -g" % self.installPath else: - cmd = "su - '%s' -c \"%s/bin/ctencrypt -g \"" % (self.user, self.installPath) + cmd = "su - '%s' -c \"source ~/.bashrc && %s/bin/ctencrypt -g \"" % (self.user, self.installPath) ret_code, stdout, stderr = _exec_popen(cmd) if ret_code: raise OSError("Failed to generate encrypted keys. Error: %s" @@ -3732,7 +3732,7 @@ class Installer: if g_opts.install_user_privilege == "withoutroot": cmd = "%s/bin/ctencrypt -g -o '%s' " % (self.installPath, f_factor1) else: - cmd = ("su - '%s' -c \"%s/bin/ctencrypt -g -o '%s' \"" + cmd = ("su - '%s' -c \"source ~/.bashrc && %s/bin/ctencrypt -g -o '%s' \"" % (self.user, self.installPath, f_factor1)) ret_code, stdout, stderr = _exec_popen(cmd) if ret_code: @@ -3767,7 +3767,7 @@ class Installer: cmd = ("""%s/bin/ctencrypt -e AES256 -f %s -k %s """ % (self.installPath, key_, work_key)) else: - cmd = ("su - '%s' -c \"%s/bin/ctencrypt -e AES256" + cmd = ("su - '%s' -c \"source ~/.bashrc && %s/bin/ctencrypt -e AES256" " -f '%s' -k '%s' \"" % (self.user, self.installPath, key_, work_key)) values = [ssl_passwd, ssl_passwd] diff --git a/pkg/install/installdb.sh b/pkg/install/installdb.sh index d3c33a37434c0ca6e0dabb6ec2e4af22ffdecac1..39335abfec97f0d55b719d3933413031275c61ab 100644 --- a/pkg/install/installdb.sh +++ b/pkg/install/installdb.sh @@ -2,6 +2,7 @@ # # This library is using the variables listed in cfg/cluster.ini, and value come from install.py#set_cluster_conf # +source ~/.bashrc running_mode=$(grep '"M_RUNING_MODE"' /opt/cantian/action/cantian/install_config.json | cut -d '"' -f 4) single_mode="multiple" if [ "$running_mode" = "cantiand_with_mysql" ] || diff --git a/pkg/install/script/cluster/cluster.sh b/pkg/install/script/cluster/cluster.sh index 5702fb727088ff341487ad47683a92f19f16d33e..5af6f6c66e828890ff416fb9dbc48429f59dc763 100644 --- a/pkg/install/script/cluster/cluster.sh +++ b/pkg/install/script/cluster/cluster.sh @@ -52,6 +52,7 @@ function check_process() return 0 else echo "res_count= ${res_count}" + echo "RES_EAGAIN" return 1 fi fi @@ -142,8 +143,8 @@ function stop_cantian() { echo "RES_SUCCESS" exit 0 else - echo "RES_MULTI" - exit 1 + echo "RES_EAGAIN" + exit 3 fi fi } @@ -169,8 +170,8 @@ function stop_cantian_by_force() { echo "RES_SUCCESS" exit 0 else - echo "RES_FAILED" - exit 1 + echo "RES_EAGAIN" + exit 3 fi fi } diff --git a/pkg/src/cluster/dtc_buffer.c b/pkg/src/cluster/dtc_buffer.c index adc5aabbd6eb90b696cd855b671c31d211dd09f9..7a1789af66ad812c1d699c7eebccf555880c76e8 100644 --- a/pkg/src/cluster/dtc_buffer.c +++ b/pkg/src/cluster/dtc_buffer.c @@ -287,12 +287,22 @@ status_t dtc_get_exclusive_owner_pages(knl_session_t *session, buf_ctrl_t **ctrl return CT_SUCCESS; } -bool32 dtc_lock_in_rcy_space_set(uint16 uid) +bool32 dtc_lock_in_rcy_space_set(knl_session_t *session, uint16 uid) { dtc_rcy_context_t *dtc_rcy = DTC_RCY_CONTEXT; rcy_set_t *rcy_set = &dtc_rcy->rcy_set; + if (uid >= CT_MAX_USERS || uid == CT_INVALID_ID16) { + CT_LOG_RUN_ERR("Invalid user id:%u when get space id", uid); + return CT_FALSE; + } + dc_user_t *user = session->kernel->dc_ctx.users[uid]; + if (user == NULL) { + CT_LOG_RUN_ERR("User context is null for uid:%u", uid); + return CT_FALSE; + } + uint32 space_id = user->desc.data_space_id; for (uint32 i = 0; i < rcy_set->space_set_size; i++) { - if (uid == rcy_set->space_id_set[i]) { + if (space_id == rcy_set->space_id_set[i]) { return CT_TRUE; } } @@ -370,6 +380,6 @@ bool32 dtc_dls_readable(knl_session_t *session, drid_t *lock_id) if (uid == CT_INVALID_ID16) { return CT_FALSE; } - bool32 lock_need_recover = dtc_lock_in_rcy_space_set(uid); + bool32 lock_need_recover = dtc_lock_in_rcy_space_set(session, uid); return !lock_need_recover; } \ No newline at end of file diff --git a/pkg/src/cluster/dtc_dcs.c b/pkg/src/cluster/dtc_dcs.c index 4de3343f239221d7882c25393db0de0879f16b6f..693a45df75f591802e9f9852e9fa2bad4b3e060b 100644 --- a/pkg/src/cluster/dtc_dcs.c +++ b/pkg/src/cluster/dtc_dcs.c @@ -2942,6 +2942,7 @@ static status_t dcs_process_heap_pcr_construct(knl_session_t *session, msg_pcr_r // if current page is not heap page, just return error to requester if (page->head.type != PAGE_TYPE_PCRH_DATA) { buf_leave_page(session, CT_FALSE); + CT_LOG_RUN_ERR("Table has been dropped or truncated, page->head.type: %d", page->head.type); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } diff --git a/pkg/src/cluster/dtc_recovery.c b/pkg/src/cluster/dtc_recovery.c index 7f2226160e9bf1d93bb4e9244da6aff19dfb3a46..ba35c8825a6fac808648d121f9eeed6fd94439bc 100644 --- a/pkg/src/cluster/dtc_recovery.c +++ b/pkg/src/cluster/dtc_recovery.c @@ -2443,7 +2443,7 @@ status_t dtc_rcy_fetch_log_batch(knl_session_t *session, log_batch_t **batch_out rcy_log_point->rcy_point.block_id, (uint64)rcy_log_point->rcy_point.lfn, rcy_log_point->rcy_point.lsn); CM_ABORT_REASONABLE(!cm_dbs_is_enable_dbs() || session->kernel->db.recover_for_restore, - "[DTC RCY] ABORT INFO: DBStore batch not continuous"); + "[DTC RCY] ABORT INFO: dbstor batch not continuous"); rcy_node->recover_done = CT_TRUE; continue; } diff --git a/pkg/src/cms/BUILD.bazel b/pkg/src/cms/BUILD.bazel index 3a51240891fd85a64599f627a8ec63e01c5e2156..cc920c6a1d7152437e3063e884a923d9c6c9e8f4 100644 --- a/pkg/src/cms/BUILD.bazel +++ b/pkg/src/cms/BUILD.bazel @@ -142,7 +142,7 @@ cc_binary ( ":zecms", "//library:z", "//library:cgw_client", - "//library:dbstoreClient", + "//library:dbstorClient", "//library:dbstor_tool", "//library:nomlnx/xnetlite", "//library:iod", diff --git a/pkg/src/cms/cms/cms_cmd_imp.c b/pkg/src/cms/cms/cms_cmd_imp.c index c2ddaa0cbc84897c3296cbb155585b690ac92583..0fc46a7e537417517424fb17b6156d11ffb011d0 100644 --- a/pkg/src/cms/cms/cms_cmd_imp.c +++ b/pkg/src/cms/cms/cms_cmd_imp.c @@ -43,7 +43,7 @@ #include "cms_vote.h" #include "cms_log.h" #include "cms_uds_client.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include "cm_dbs_file.h" static const char *g_cms_lock_file = "cms_server.lck"; diff --git a/pkg/src/cms/cms/cms_disk_lock.c b/pkg/src/cms/cms/cms_disk_lock.c index 83f78448d3577a77a74c18b63b9b67fb3be76018..848c8311e3847d15bc8f921907055ee275e9b723 100644 --- a/pkg/src/cms/cms/cms_disk_lock.c +++ b/pkg/src/cms/cms/cms_disk_lock.c @@ -137,7 +137,7 @@ status_t cms_disk_lock_init_nfs(cms_disk_lock_t* lock, const char* dev, const ch return CT_SUCCESS; } -status_t cms_dbstore_lock_init(cms_disk_lock_t* lock, uint64 l_start, uint64 l_len) +status_t cms_dbstor_lock_init(cms_disk_lock_t* lock, uint64 l_start, uint64 l_len) { status_t ret = CT_SUCCESS; char file_name[CMS_MAX_NAME_LEN] = {0}; @@ -156,15 +156,15 @@ status_t cms_dbstore_lock_init(cms_disk_lock_t* lock, uint64 l_start, uint64 l_l return CT_ERROR; } if (cm_dbs_lock_init(file_name, l_start, l_len, &lock->fd) != CT_SUCCESS) { - CMS_LOG_ERR("init dbstore lock(%s) start(%llu) len(%llu) failed.", file_name, l_start, l_len); + CMS_LOG_ERR("init dbstor lock(%s) start(%llu) len(%llu) failed.", file_name, l_start, l_len); return CT_ERROR; } return CT_SUCCESS; } -/* 生成当前加锁路径文件的dbstore lock锁句柄 */ -status_t cms_gen_dbstore_lock_obj(cms_disk_lock_t* lock, uint64 l_start, uint64 l_len) +/* 生成当前加锁路径文件的dbstor lock锁句柄 */ +status_t cms_gen_dbstor_lock_obj(cms_disk_lock_t* lock, uint64 l_start, uint64 l_len) { int path_depth = 0; status_t ret = cm_get_file_path_depth(lock->flock->file_name, "/", &path_depth); @@ -180,13 +180,13 @@ status_t cms_gen_dbstore_lock_obj(cms_disk_lock_t* lock, uint64 l_start, uint64 ret = cm_get_dbs_file_path_handle(lock->flock->file_name, "/", lock->dbs_fd, path_depth); if (ret != CT_SUCCESS) { - CMS_LOG_ERR("get dbstore file path fd failed, file %s, ret %d", lock->flock->file_name, ret); + CMS_LOG_ERR("get dbstor file path fd failed, file %s, ret %d", lock->flock->file_name, ret); CM_FREE_PTR(lock->dbs_fd); return ret; } - ret = cms_dbstore_lock_init(lock, l_start, l_len); + ret = cms_dbstor_lock_init(lock, l_start, l_len); if (ret != CT_SUCCESS) { - CMS_LOG_ERR("dbstore lock init name:%s offset:%lld len:%lld failed.", lock->flock->file_name, l_start, l_len); + CMS_LOG_ERR("dbstor lock init name:%s offset:%lld len:%lld failed.", lock->flock->file_name, l_start, l_len); CM_FREE_PTR(lock->dbs_fd); return ret; } @@ -215,9 +215,9 @@ status_t cms_disk_lock_init_dbs(cms_disk_lock_t* lock, const char* dev, const ch CM_FREE_PTR(lock->flock); return CT_ERROR; } - ret = cms_gen_dbstore_lock_obj(lock, l_start, l_len); + ret = cms_gen_dbstor_lock_obj(lock, l_start, l_len); if (ret != CT_SUCCESS) { - CMS_LOG_ERR("gen dbstore lock obj failed, file %s, ret %d", lock->flock->file_name, ret); + CMS_LOG_ERR("gen dbstor lock obj failed, file %s, ret %d", lock->flock->file_name, ret); CM_FREE_PTR(lock->flock); return ret; } @@ -278,7 +278,7 @@ status_t cms_disk_lock_init(cms_dev_type_t type, const char* dev, const char* fi } else if (type == CMS_DEV_TYPE_DBS) { ret = cms_disk_lock_init_dbs(lock, dev, file, l_start, l_len, inst_id, is_write); if (ret != CT_SUCCESS) { - CMS_LOG_ERR("cms disk lock init dbstore failed, ret %d, dev %s, file %s, l_start %llu, l_len %llu, " + CMS_LOG_ERR("cms disk lock init dbstor failed, ret %d, dev %s, file %s, l_start %llu, l_len %llu, " " inst_id %lld", ret, dev, file, l_start, l_len, inst_id); return ret; } diff --git a/pkg/src/cms/cms/cms_disk_lock.h b/pkg/src/cms/cms/cms_disk_lock.h index 526d41cdfc326f1b02a13a6c4be65f63971e4e6a..993f624f05204f4e54c40e3ada719850f3a91b41 100644 --- a/pkg/src/cms/cms/cms_disk_lock.h +++ b/pkg/src/cms/cms/cms_disk_lock.h @@ -76,9 +76,9 @@ typedef struct _st_cms_disk_lock_t { thread_lock_t slock; // protect seek&read(reopen) or seek&write(reopen) as atomic operation uint32 flag; char dev_name[CMS_FILE_NAME_BUFFER_SIZE]; - object_id_t* dbs_fd; // only used when type is CMS_DEV_TYPE_DBSTORE - int fd_len; // only used when type is CMS_DEV_TYPE_DBSTORE - char file_name[CMS_MAX_NAME_LEN]; // only used when type is CMS_DEV_TYPE_DBSTORE + object_id_t* dbs_fd; // only used when type is CMS_DEV_TYPE_DBSTOR + int fd_len; // only used when type is CMS_DEV_TYPE_DBSTOR + char file_name[CMS_MAX_NAME_LEN]; // only used when type is CMS_DEV_TYPE_DBSTOR }cms_disk_lock_t; typedef union u_cms_master_info_t { diff --git a/pkg/src/cms/cms/cms_gcc.c b/pkg/src/cms/cms/cms_gcc.c index 025dd2d25269845093daba4e9889c0eceefdd0b3..22083ab4c73badca46767ab2a6d31856db5dbaa7 100644 --- a/pkg/src/cms/cms/cms_gcc.c +++ b/pkg/src/cms/cms/cms_gcc.c @@ -33,7 +33,7 @@ #include "cm_utils.h" #include "cms_log.h" #include "cm_dbs_defs.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" typedef struct st_gcc_buffer { uint64 buff[CMS_BLOCK_SIZE / sizeof(uint64)][(sizeof(cms_gcc_t) / CMS_BLOCK_SIZE + 1)]; @@ -1159,13 +1159,10 @@ const cms_res_t* cms_find_res_type(const cms_gcc_t* gcc, const char* res_type) if (gcc->res[res_id].magic == CMS_GCC_RES_MAGIC && cm_strcmpi(gcc->res[res_id].type, res_type) == 0) { res = &gcc->res[res_id]; - break; + return res; } - CMS_LOG_WAR("res type is not found, res_id = %u, res[res_id].magic = %llu, CMS_GCC_RES_MAGIC = " - "%llu,res[res_id].type = %s, ", - res_id, gcc->res[res_id].magic, CMS_GCC_RES_MAGIC, gcc->res[res_id].type); } - + CMS_LOG_WAR("res type [%s] is not found", res_type); return res; } diff --git a/pkg/src/cms/cms/cms_gcc_exp.c b/pkg/src/cms/cms/cms_gcc_exp.c index 7d3f315e184addf3bd96e844cd92c3e23f2274df..7c0674c5344fdfafdb54c2a5f69e2c75f1855d75 100644 --- a/pkg/src/cms/cms/cms_gcc_exp.c +++ b/pkg/src/cms/cms/cms_gcc_exp.c @@ -32,7 +32,7 @@ #include "cm_malloc.h" #include "cm_defs.h" #include "cms_log.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include "cm_dbs_file.h" #define DBS_CONFIG_MAX_PARAM 256 @@ -661,7 +661,9 @@ status_t cms_keep_recent_files_remote(const char *bak_path, char *prefix) return CT_ERROR; } - if (g_cms_param->gcc_type != CMS_DEV_TYPE_DBS) { + if (CMS_DEV_TYPE_SD == g_cms_param->gcc_type) { + return CT_SUCCESS; + } else if (g_cms_param->gcc_type != CMS_DEV_TYPE_DBS) { ret = cms_remove_old_files(dirname, prefix); } else { ret = cms_remove_old_files_dbs(dirname, prefix); diff --git a/pkg/src/cms/cms/cms_gcc_imp.c b/pkg/src/cms/cms/cms_gcc_imp.c index f111b58a9042eb1c3a04a1bc4c41a7440ad64e0b..367ae9f57a2c17c0eeb64996fdaa23dc426bce73 100644 --- a/pkg/src/cms/cms/cms_gcc_imp.c +++ b/pkg/src/cms/cms/cms_gcc_imp.c @@ -31,7 +31,7 @@ #include "cm_ip.h" #include "cms_disk_lock.h" #include "cms_log.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #define GCC_IMP_OBJ_MAGIC_LEN 10 @@ -938,14 +938,14 @@ static status_t cms_import_gcc_read_file_dbs(const char* file_name, char* buf, i (char*)file_name + strlen(g_cms_param->cms_gcc_bak) + 1, FILE_TYPE, &gcc_backup_file_handle); if (ret != EOK) { - CT_LOG_RUN_ERR("Failed to open gcc export file %s by dbstore", + CT_LOG_RUN_ERR("Failed to open gcc export file %s by dbstor", (char*)file_name + strlen(g_cms_param->cms_gcc_bak) + 1); return CT_ERROR; } ret = dbs_global_handle()->dbs_get_file_size(&gcc_backup_file_handle, &file_size); if (ret != EOK) { - CT_LOG_RUN_ERR("Failed to get file size by dbstore, file: %s", + CT_LOG_RUN_ERR("Failed to get file size by dbstor, file: %s", (char*)file_name + strlen(g_cms_param->cms_gcc_bak) + 1); return CT_ERROR; } @@ -1062,7 +1062,7 @@ status_t cms_restore_gcc_dbs(const char* file_name) (char*)file_name + strlen(g_cms_param->cms_gcc_bak) + 1, FILE_TYPE, &gcc_backup_file_handle); if (ret != EOK) { - CT_LOG_RUN_ERR("Failed to open gcc backup file %s by dbstore", + CT_LOG_RUN_ERR("Failed to open gcc backup file %s by dbstor", (char*)file_name + strlen(g_cms_param->cms_gcc_bak) + 1); return CT_ERROR; } diff --git a/pkg/src/cms/cms/cms_instance.c b/pkg/src/cms/cms/cms_instance.c index 7aabd05e7c5a6a48d3e76ba9c348c7565a1dc8b7..d5b1c7eec85ef0eda61a051455eb12a18bfa785f 100644 --- a/pkg/src/cms/cms/cms_instance.c +++ b/pkg/src/cms/cms/cms_instance.c @@ -43,7 +43,7 @@ #include "cm_dbs_intf.h" #include "mes_config.h" #include "cms_log.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" static cms_instance_t g_cms_instance = {.is_server = CT_FALSE, .is_dbstor_cli_init = CT_FALSE}; @@ -185,7 +185,7 @@ status_t cms_init_dbs_client(char* cfg_name, dbs_init_mode init_mode) CT_RETURN_IFERR(dbs_init_lib()); cm_dbs_cfg_s *cfg = cm_dbs_get_cfg(); if (!cfg->enable) { - CT_LOG_RUN_INF("dbstore is not enabled"); + CT_LOG_RUN_INF("dbstor is not enabled"); return CT_SUCCESS; } diff --git a/pkg/src/cms/cms/cms_iofence.c b/pkg/src/cms/cms/cms_iofence.c index e3c36adf4602b97d2e94e724968eaacce2c88139..69adfe327ef309cf20c30162b0a3d24b85a187c9 100644 --- a/pkg/src/cms/cms/cms_iofence.c +++ b/pkg/src/cms/cms/cms_iofence.c @@ -26,6 +26,7 @@ #include "cms_iofence.h" #include "cm_dbs_iofence.h" #include "cm_file_iofence.h" +#include "cm_dss_iofence.h" #include "cms_gcc.h" #include "cm_queue.h" #include "cms_client.h" @@ -171,7 +172,7 @@ status_t cms_kick_node_by_ns(const char* name, uint32 node_id, iofence_type_t io ret = cm_dbs_iof_kick_by_ns(&iof); CMS_SYNC_POINT_GLOBAL_END; if (ret != CT_SUCCESS) { - CT_LOG_RUN_WAR("dbstore iof failed, node_id : %u", node_id); + CT_LOG_RUN_WAR("dbstor iof failed, node_id : %u", node_id); } return ret; } @@ -216,7 +217,7 @@ void try_cms_dbs_kick_node(uint32 node_id, uint32 res_id, iofence_type_t iofence date_t now_time; cm_dbs_cfg_s *cfg = cm_dbs_get_cfg(); if (!cfg->enable) { - CT_LOG_RUN_INF("dbstore is not enabled"); + CT_LOG_RUN_INF("dbstor is not enabled"); return; } @@ -236,12 +237,12 @@ void try_cms_dbs_kick_node(uint32 node_id, uint32 res_id, iofence_type_t iofence return; } cm_sleep(IOF_CHECK_INTERVAL); - CMS_LOG_ERR("dbstore iof failed, ret %d, namespace %s, node_id %u", ret, (char *)cfg->ns, node_id); + CMS_LOG_ERR("dbstor iof failed, ret %d, namespace %s, node_id %u", ret, (char *)cfg->ns, node_id); } if (cms_daemon_stop_pull() != CT_SUCCESS) { CMS_LOG_ERR("stop cms daemon process failed."); } - CM_ABORT_REASONABLE(0, "[CMS] ABORT INFO: cms exec iof error, please check if dbstoreclient and dbstoreserver are disconnected."); + CM_ABORT_REASONABLE(0, "[CMS] ABORT INFO: cms exec iof error, please check if dbstorclient and dbstorserver are disconnected."); } void try_cms_file_kick_node(uint32 node_id, uint32 res_id, iofence_type_t iofence_type) @@ -262,11 +263,35 @@ void try_cms_file_kick_node(uint32 node_id, uint32 res_id, iofence_type_t iofenc CM_ABORT_REASONABLE(0, "[CMS] ABORT INFO: cms exec iof error."); } +void try_cms_dss_kick_node(uint32 node_id, uint32 res_id, iofence_type_t iofence_type) +{ + status_t ret = CT_ERROR; + for (int i = 0; i < IOF_RETRY_NUM; i++) { + ret = cm_dss_iof_kick_by_inst_id(node_id); + if (ret == CT_SUCCESS) { + CMS_LOG_INF("kick node succ, node_id %u", node_id); + return; + } + cm_sleep(IOF_CHECK_INTERVAL); + CMS_LOG_ERR("dss iof failed, ret %d, node_id %u", ret, node_id); + } + if (cms_daemon_stop_pull() != CT_SUCCESS) { + CMS_LOG_ERR("stop cms daemon process failed."); + } + CM_ABORT_REASONABLE(0, "[CMS] ABORT INFO: cms exec iof error."); +} + void try_cms_kick_node(uint32 node_id, uint32 res_id, iofence_type_t iofence_type) { if (cm_dbs_is_enable_dbs() == CT_TRUE) { try_cms_dbs_kick_node(node_id, res_id, iofence_type); - } else { - try_cms_file_kick_node(node_id, res_id, iofence_type); + return; + } + + if (g_cms_param->gcc_type == CMS_DEV_TYPE_SD) { + try_cms_dss_kick_node(node_id, res_id, iofence_type); + return; } + + try_cms_file_kick_node(node_id, res_id, iofence_type); } \ No newline at end of file diff --git a/pkg/src/cms/cms/cms_main.c b/pkg/src/cms/cms/cms_main.c index f061dd88382f077b1f2adca5c33446225e655eda..1bcaef1b7a41e1f385506bb0ee61ddcf17a1dc0d 100644 --- a/pkg/src/cms/cms/cms_main.c +++ b/pkg/src/cms/cms/cms_main.c @@ -37,7 +37,7 @@ #include "cms_interface.h" #include "cms_persistent.h" #include "cms_log.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" typedef enum e_proc_type_t { CMS_SERVER, CMS_TOOLS, diff --git a/pkg/src/cms/cms/cms_param.c b/pkg/src/cms/cms/cms_param.c index bdad7a7fb17ee3b5ac8d6b6ffb812aa8690369ff..fe3f6ccce376cd0c6d624ec0759e50b2786319cb 100644 --- a/pkg/src/cms/cms/cms_param.c +++ b/pkg/src/cms/cms/cms_param.c @@ -461,7 +461,7 @@ status_t cms_get_mes_ssl_config(config_t *cfg) return CT_SUCCESS; } -status_t cms_get_dbstore_config_value(config_t *cfg) +status_t cms_get_dbstor_config_value(config_t *cfg) { char* use_dbs_value; char* gcc_type; @@ -473,18 +473,18 @@ status_t cms_get_dbstore_config_value(config_t *cfg) use_dbs_value = cm_get_config_value(cfg, "_USE_DBSTOR"); gcc_type = cm_get_config_value(cfg, "GCC_TYPE"); if (cm_strcmpi(gcc_type, "FILE") == 0 && cm_strcmpi(use_dbs_value, "FALSE") == 0) { - CMS_LOG_INF("DBStore disabled for FILE"); + CMS_LOG_INF("DBStor disabled for FILE"); ret = cm_dbs_set_cfg(CT_FALSE, dataPgSize, CT_DFLT_CTRL_BLOCK_SIZE, namespace_value, 0, CT_FALSE, 0); return ret; } if (cm_strcmpi(gcc_type, "NFS") == 0 && cm_strcmpi(use_dbs_value, "FALSE") == 0) { - CMS_LOG_INF("DBStore disabled for NFS"); + CMS_LOG_INF("DBStor disabled for NFS"); ret = cm_dbs_set_cfg(CT_FALSE, dataPgSize, CT_DFLT_CTRL_BLOCK_SIZE, namespace_value, 0, CT_FALSE, 0); return ret; } if (cm_strcmpi(gcc_type, "SD") == 0 && cm_strcmpi(use_dbs_value, "FALSE") == 0) { - CMS_LOG_INF("DBStore disabled for SD"); + CMS_LOG_INF("DBStor disabled for SD"); ret = cm_dbs_set_cfg(CT_FALSE, dataPgSize, CT_DFLT_CTRL_BLOCK_SIZE, namespace_value, 0, CT_FALSE, 0); return ret; } @@ -496,12 +496,12 @@ status_t cms_get_dbstore_config_value(config_t *cfg) } if (cm_strcmpi(gcc_type, "DBS") == 0 && (use_dbs_value == NULL || cm_strcmpi(use_dbs_value, "TRUE") == 0)) { - CMS_LOG_INF("Configuring DBStore for DBS"); + CMS_LOG_INF("Configuring DBStor for DBS"); cms_set_recv_timeout(); ret = cm_dbs_set_cfg(CT_TRUE, dataPgSize, CT_DFLT_CTRL_BLOCK_SIZE, namespace_value, 0, CT_FALSE, 0); if (ret != CT_SUCCESS) { - CMS_LOG_ERR("cms set dbstore config failed"); + CMS_LOG_ERR("cms set dbstor config failed"); return CT_ERROR; } return CT_SUCCESS; @@ -721,15 +721,15 @@ status_t cms_load_param(int64* time_stamp) enable = CT_FALSE; } else if (cm_strcmpi(gcc_type, "DBS") == 0 && (value == NULL || cm_strcmpi(value, "TRUE") == 0)) { enable = CT_TRUE; - CMS_LOG_INF("DBStore not enabled for DBS"); + CMS_LOG_INF("DBStor not enabled for DBS"); cms_set_recv_timeout(); g_cms_dbstor_enable = CT_TRUE; } else if (cm_strcmpi(gcc_type, "NFS") == 0 && cm_strcmpi(value, "FALSE") == 0) { enable = CT_FALSE; - CMS_LOG_INF("DBStore disabled for NFS"); + CMS_LOG_INF("DBStor disabled for NFS"); } else if (cm_strcmpi(gcc_type, "SD") == 0 && cm_strcmpi(value, "FALSE") == 0) { enable = CT_FALSE; - CMS_LOG_INF("DBStore not enabled for SD"); + CMS_LOG_INF("DBStor not enabled for SD"); } else { CMS_LOG_ERR("Invalid parameters for '_USE_DBSTOR': gcc_type=%s, value=%s", gcc_type, value); return CT_ERROR; @@ -763,7 +763,7 @@ status_t cms_load_param(int64* time_stamp) if (g_param.cms_mes_pipe_type == CS_TYPE_UC || g_param.cms_mes_pipe_type == CS_TYPE_UC_RDMA || enable) { CT_RETURN_IFERR(set_all_inst_lsid(val_uint32, 1)); } - CT_RETURN_IFERR(cms_get_dbstore_config_value(&cfg)); + CT_RETURN_IFERR(cms_get_dbstor_config_value(&cfg)); for(int idx = 0; idx < MES_TIME_STAMP_NUM; idx++) { time_stamp[idx] = g_mes_config_time[idx]; } diff --git a/pkg/src/cms/cms/cms_param.h b/pkg/src/cms/cms/cms_param.h index f1a4d9620d45106449f2f33f190a50b6cf7ecf64..6e3327545613eaeb63548dd310395f086e8deaa3 100644 --- a/pkg/src/cms/cms/cms_param.h +++ b/pkg/src/cms/cms/cms_param.h @@ -76,7 +76,7 @@ status_t cms_get_detect_file(char *detect_file_all, uint32 detect_file_all_len, status_t cms_init_detect_file(char *detect_file_all); status_t cms_get_value_is_valid(char* value, uint32 *val_uint32); void cms_get_mes_config_value(config_t *cfg); -status_t cms_get_dbstore_config_value(config_t *cfg); +status_t cms_get_dbstor_config_value(config_t *cfg); status_t cms_load_keyfiles(void); extern const cms_param_t* g_cms_param; diff --git a/pkg/src/cms/cms/cms_stat.c b/pkg/src/cms/cms/cms_stat.c index 25a5902b5c3ef49afb86853fdf29b18aea7d540a..4239d73c20b11f5256487b962a2fc140009ac2f4 100644 --- a/pkg/src/cms/cms/cms_stat.c +++ b/pkg/src/cms/cms/cms_stat.c @@ -44,7 +44,7 @@ #include "cms_log.h" #include "cms_cmd_upgrade.h" #include "cms_stat.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" void cms_date2str(date_t date, char* str, uint32 max_size); @@ -322,7 +322,7 @@ status_t cms_init_file_dbs(object_id_t *handle, const char *filename) uint64 file_size = 0; int ret = dbs_global_handle()->dbs_get_file_size(handle, &file_size); if (ret != 0) { - CT_LOG_RUN_ERR("Failed to get file size by dbstore, file: %s", (char*)filename); + CT_LOG_RUN_ERR("Failed to get file size by dbstor, file: %s", (char*)filename); return CT_ERROR; } @@ -558,7 +558,7 @@ void cms_exec_res_script_print_log(const char* arg, char *cmd) status_t cms_exec_res_script(const char* script, const char* arg, uint32 timeout_ms, status_t* result) { - CMS_LOG_INF("begin cms exec res script."); + CMS_LOG_TIMER("begin cms exec res script."); char cmd[CMS_CMD_BUFFER_SIZE] = {0}; *result = CT_ERROR; errno_t ret = EOK; @@ -595,22 +595,23 @@ status_t cms_exec_res_script(const char* script, const char* arg, uint32 timeout } cmd_out[size] = 0; - CMS_LOG_INF("end cms exec res script."); - if (strstr(cmd_out, "RES_SUCCESS") != NULL) { - *result = CT_SUCCESS; - return CT_SUCCESS; - } + CMS_LOG_TIMER("end cms exec res script."); if (strstr(cmd_out, CMS_TIMEOUT_ERROR_NUMBER) != NULL) { *result = CT_TIMEDOUT; return CT_SUCCESS; } - if (strstr(cmd_out, "RES_MULTI") != NULL) { + if (strstr(cmd_out, "RES_EAGAIN") != NULL) { *result = CT_EAGAIN; return CT_SUCCESS; } + if (strstr(cmd_out, "RES_SUCCESS") != NULL) { + *result = CT_SUCCESS; + return CT_SUCCESS; + } + *result = CT_ERROR; return CT_SUCCESS; } @@ -926,6 +927,33 @@ status_t cms_check_res_running(uint32 res_id) return CT_SUCCESS; } +status_t cms_check_dss_stat(cms_res_t res, cms_res_stat_t stat) +{ + status_t dss_status; + status_t ret; + + ret = cms_res_check(res.res_id, &dss_status); + if (ret != CT_SUCCESS || dss_status == CT_ERROR) { + CMS_LOG_ERR_LIMIT(LOG_PRINT_INTERVAL_SECOND_20, + "check dss failed, res_id=%u, script=%s, ret=%d, dss_status=%d", res.res_id, res.script, ret, dss_status); + return CT_ERROR; + } + + if (dss_status == CT_TIMEDOUT) { + CMS_LOG_ERR_LIMIT(LOG_PRINT_INTERVAL_SECOND_20, + "check dss stat timeout, res_id=%u, script=%s", res.res_id, res.script); + return CT_ERROR; + } + + if (dss_status == CT_EAGAIN) { + CMS_LOG_ERR_LIMIT(LOG_PRINT_INTERVAL_SECOND_20, + "dss work stat is abnormal, res_id=%u, script=%s", res.res_id, res.script); + return CT_ERROR; + } + cms_res_hb(res.res_id); + return CT_SUCCESS; +} + status_t cms_res_start(uint32 res_id, uint32 timeout_ms) { status_t ret; @@ -936,6 +964,24 @@ status_t cms_res_start(uint32 res_id, uint32 timeout_ms) CT_RETURN_IFERR(cms_get_res_by_id(res_id, &res)); CMS_LOG_INF("begin start res, res_id=%u", res_id); + if (g_cms_param->gcc_type == CMS_DEV_TYPE_SD && cm_strcmpi(res.name, CMS_RES_TYPE_DB) == 0) { + CMS_LOG_INF("cms check dss stat before start db, res_id=%u", res_id); + cms_res_t dss_res; + uint32 dss_res_id; + cms_res_stat_t dss_res_stat; + // find dss resource + if (cms_get_res_id_by_type(CMS_RES_TYPE_DSS, &dss_res_id) != CT_SUCCESS) { + CMS_LOG_ERR("cms get res id failed, res_type %s", CMS_RES_TYPE_DSS); + return CT_ERROR; + } + + CT_RETURN_IFERR(cms_get_res_by_id(dss_res_id, &dss_res)); + get_cur_res_stat(dss_res_id, &dss_res_stat); + if (cms_check_dss_stat(dss_res, dss_res_stat) != CT_SUCCESS) { + CMS_LOG_ERR("DSS status is abnormal, unable to start Cantian."); + return CT_ERROR; + } + } CMS_LOG_INF("begin to get start lock, res_id=%u", res_id); if (cms_get_res_start_lock(res_id) != CT_SUCCESS) { @@ -950,7 +996,9 @@ status_t cms_res_start(uint32 res_id, uint32 timeout_ms) return CT_SUCCESS; } - if (wait_for_cluster_reform_done(res_id) != CT_SUCCESS) { + if (cm_strcmpi(res.name, CMS_RES_TYPE_DSS) == 0) { + CMS_LOG_INF("CMS does not need to wait for reform when starting DSS"); + } else if (wait_for_cluster_reform_done(res_id) != CT_SUCCESS) { CMS_LOG_ERR("cms wait cluster reform done failed"); cms_release_res_start_lock(res_id); return CT_ERROR; @@ -1170,6 +1218,11 @@ status_t cms_res_detect_online(uint32 res_id, cms_res_stat_t *old_stat) cm_thread_unlock(&g_res_session[res_id].lock); return CT_ERROR; } + res_stat->last_stat_change = cm_now(); + res_stat->work_stat = 1; + res_stat->hb_time = cm_now(); + res_stat->session_id = res_id; + res_stat->inst_id = g_cms_param->node_id; cms_stat_set(res_stat, CMS_RES_ONLINE, &is_changed); if (!(is_changed)) { cm_thread_unlock(&g_res_session[res_id].lock); diff --git a/pkg/src/cms/cms/cms_stat.h b/pkg/src/cms/cms/cms_stat.h index 0164347f31ca8b0be36f8d6cd7b4e3a82a6ca763..df4ab6501d4b3251f9c17f53bd3666dc85f29e81 100644 --- a/pkg/src/cms/cms/cms_stat.h +++ b/pkg/src/cms/cms/cms_stat.h @@ -335,6 +335,7 @@ status_t cms_get_node_view(uint64* cms_online_bitmap); status_t cms_check_res_running(uint32 res_id); bool32 cms_check_node_dead(uint32 node_id); +status_t cms_check_dss_stat(cms_res_t res, cms_res_stat_t stat); status_t cms_init_mes_channel_version(void); status_t cms_get_mes_channel_version(uint64* version); status_t cms_get_cluster_res_list_4tool(uint32 res_id, cms_tool_res_stat_list_t *res_list); diff --git a/pkg/src/cms/cms/cms_work.c b/pkg/src/cms/cms/cms_work.c index 0f8cde3ea601c1d19e49ce19e080a2098b8d1560..007bee35175eb9ce1add0b1ff2f7fd68e8cddfec 100644 --- a/pkg/src/cms/cms/cms_work.c +++ b/pkg/src/cms/cms/cms_work.c @@ -245,8 +245,17 @@ status_t cms_start_res_cluster(cms_packet_head_t* msg, char* err_info, uint32 er return CT_SUCCESS; } -status_t cms_create_res_disable() +status_t cms_create_res_disable(uint32 res_id) { + cms_res_t res; + if (cms_get_res_by_id(res_id, &res) != CT_SUCCESS) { + return CT_ERROR; + } + + if (cm_strcmpi(res.name, CMS_RES_TYPE_DB) != 0) { + return CT_SUCCESS; + } + int32 handle = CT_INVALID_HANDLE; char res_disable_file[CMS_PATH_BUFFER_SIZE] = { 0 }; errno_t err = sprintf_s(res_disable_file, CMS_PATH_BUFFER_SIZE, "%s/%s", g_cms_param->cms_home, "res_disable"); @@ -260,8 +269,16 @@ status_t cms_create_res_disable() return CT_SUCCESS; } -status_t cms_remove_res_disable() +status_t cms_remove_res_disable(uint32 res_id) { + cms_res_t res; + if (cms_get_res_by_id(res_id, &res) != CT_SUCCESS) { + return CT_ERROR; + } + + if (cm_strcmpi(res.name, CMS_RES_TYPE_DB) != 0) { + return CT_SUCCESS; + } char res_disable_file[CMS_PATH_BUFFER_SIZE] = { 0 }; errno_t err = sprintf_s(res_disable_file, CMS_PATH_BUFFER_SIZE, "%s/%s", g_cms_param->cms_home, "res_disable"); PRTS_RETURN_IFERR(err); @@ -285,7 +302,7 @@ status_t cms_start_res_local(uint32 res_id, uint32 timeout, char* err_info) return ret; } - ret = cms_remove_res_disable(); + ret = cms_remove_res_disable(res_id); if (ret != CT_SUCCESS) { err = strcpy_s(err_info, CMS_INFO_BUFFER_SIZE, "remove res disable file failed"); cms_securec_check(err); @@ -532,7 +549,7 @@ status_t cms_stop_res_local_force(uint32 res_id, char* err_info) return ret; } - ret = cms_create_res_disable(); + ret = cms_create_res_disable(res_id); if (ret != CT_SUCCESS) { err = strcpy_s(err_info, CMS_INFO_BUFFER_SIZE, "create res disable file failed"); cms_securec_check(err); @@ -561,7 +578,7 @@ status_t cms_check_res_status(uint32 res_id, char* err_info) } if (res_status == CT_EAGAIN) { - err = strcpy_s(err_info, CMS_INFO_BUFFER_SIZE, "More than one resource is running."); + err = strcpy_s(err_info, CMS_INFO_BUFFER_SIZE, "resource work stat is abnormal"); cms_securec_check(err); return CT_ERROR; } @@ -601,7 +618,7 @@ status_t cms_stop_res_local(uint32 res_id, char* err_info) return ret; } - ret = cms_create_res_disable(); + ret = cms_create_res_disable(res_id); if (ret != CT_SUCCESS) { err = strcpy_s(err_info, CMS_INFO_BUFFER_SIZE, "create res disable file failed"); cms_securec_check(err); @@ -3915,8 +3932,11 @@ void cms_res_check_timer_entry(thread_t* thread) } min_interval = MIN(res.check_interval, min_interval); min_interval = MAX(max_interval, min_interval); - get_cur_res_stat(i, &stat); + if (cm_strcmpi(res.name, CMS_RES_TYPE_DSS) == 0 && cms_check_dss_stat(res, stat) == CT_SUCCESS) { + cms_res_detect_online(i, &stat); + continue; + } now_time = cm_now(); // cluster time cms_detect_osclock_abnormal(now_time, last_refresh_time); last_refresh_time = now_time; diff --git a/pkg/src/cms/interface/cms_comm.c b/pkg/src/cms/interface/cms_comm.c index 55d8278713a70edf3c898b443ba8c03a461cb12d..354638b60c2e130f115b46a41676d35bfdd17447 100644 --- a/pkg/src/cms/interface/cms_comm.c +++ b/pkg/src/cms/interface/cms_comm.c @@ -32,6 +32,7 @@ #include "cs_uds.h" #include "securec.h" bool32 g_cluster_no_cms = CT_FALSE; + status_t cms_check_addr_dev_stat(struct sockaddr_in* addr) { struct ifaddrs* ifaddr; @@ -62,3 +63,8 @@ status_t cms_check_addr_dev_stat(struct sockaddr_in* addr) freeifaddrs(ifaddr); return CT_ERROR; } + +void cms_set_cluster_no_cms_switch(bool32 no_cms) +{ + g_cluster_no_cms = no_cms; +} diff --git a/pkg/src/cms/interface/cms_comm.h b/pkg/src/cms/interface/cms_comm.h index 4c44a53e45c384abb79ebc040129543ab8265c97..11579f1a8a782e2859c05f61f40d31d350325087 100644 --- a/pkg/src/cms/interface/cms_comm.h +++ b/pkg/src/cms/interface/cms_comm.h @@ -60,6 +60,7 @@ extern "C" { } while (0); status_t cms_check_addr_dev_stat(struct sockaddr_in* addr); +void cms_set_cluster_no_cms_switch(bool32 no_cms); #ifdef __cplusplus } diff --git a/pkg/src/cms/interface/cms_interface.c b/pkg/src/cms/interface/cms_interface.c index b8ba6130962c5ef5cca2b796af77da137bcdf861..012e7f460223fb09efa95b33133ebb51259583ea 100644 --- a/pkg/src/cms/interface/cms_interface.c +++ b/pkg/src/cms/interface/cms_interface.c @@ -43,6 +43,7 @@ #include "cm_signal.h" #include "cm_dbs_intf.h" #include "cm_file_iofence.h" +#include "cm_dss_iofence.h" const char* g_stat_str[] = { "UNKNOWN", @@ -64,6 +65,7 @@ static thread_t g_cli_worker_thread; static bool32 g_cli_workert_term = CT_TRUE; static char g_cms_home[CT_MAX_PATH_LEN] = {0}; static uint16 g_node_id = -1; +static bool32 g_dss_enable = CT_FALSE; static thread_lock_t g_cli_lock; static cms_que_t g_cli_recv_que; static config_item_t g_cms_params[] = { @@ -73,6 +75,8 @@ static config_item_t g_cms_params[] = { EFFECT_REBOOT, CFG_INS, NULL, NULL}, {"_PORT", CT_TRUE, CT_FALSE, "", NULL, NULL, "-", "-", "CT_TYPE_STRING", NULL, 0, \ EFFECT_REBOOT, CFG_INS, NULL, NULL}, + {"GCC_TYPE", CT_TRUE, CT_FALSE, "", NULL, NULL, "-", "-", "CT_TYPE_STRING", NULL, 0, \ + EFFECT_REBOOT, CFG_INS, NULL, NULL}, }; const char* cms_stat_str(cms_stat_t stat) @@ -153,7 +157,12 @@ static status_t cms_load_param(void) g_node_id = (uint16)size; CT_LOG_RUN_INF("CMS NODE_ID:%d", (int32)g_node_id); - + value = cm_get_config_value(&cfg, "GCC_TYPE"); + if (value != NULL && cm_strcmpi(value, "SD") == 0) { + g_dss_enable = CT_TRUE; + } else { + g_dss_enable = CT_FALSE; + } return CT_SUCCESS; } @@ -563,7 +572,7 @@ static void cms_cli_proc_msg_req_dbs_iof_kick(cms_packet_head_t* msg) cm_dbs_cfg_s *cfg = cm_dbs_get_cfg(); ret = cm_dbs_get_ns_name(DEV_TYPE_PGPOOL, &iof.nsName); if (ret != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore page pool nsid failed, namespace name %s, ret %d.", cfg->ns, ret); + CT_LOG_RUN_ERR("get dbstor page pool nsid failed, namespace name %s, ret %d.", cfg->ns, ret); return; } @@ -582,6 +591,21 @@ static void cms_cli_proc_msg_req_dbs_iof_kick(cms_packet_head_t* msg) CT_LOG_DEBUG_INF("proc msg req iof kick succ"); } +static void cms_cli_proc_msg_req_dss_iof_kick(cms_packet_head_t* msg) +{ + int32 ret = CT_SUCCESS; + cms_cli_msg_req_iof_kick_t* req = (cms_cli_msg_req_iof_kick_t*)msg; + + CT_LOG_DEBUG_INF("begin proc msg req iof kick"); + ret = cm_dss_iof_kick_by_inst_id(req->node_id); + if (ret != CT_SUCCESS) { + CT_LOG_RUN_ERR("proc msg req iof kick failed, kick node %u, ret %d.", req->node_id, ret); + } + + (void)cms_cli_iof_kick_res(ret); + CT_LOG_DEBUG_INF("proc msg req iof kick succ"); +} + static void cms_cli_proc_msg_req_file_iof_kick(cms_packet_head_t* msg) { int32 ret = CT_SUCCESS; @@ -601,6 +625,8 @@ static void cms_cli_proc_msg_req_iof_kick(cms_packet_head_t* msg) { if (cm_dbs_is_enable_dbs() == CT_TRUE) { cms_cli_proc_msg_req_dbs_iof_kick(msg); + } else if (g_dss_enable == CT_TRUE) { + cms_cli_proc_msg_req_dss_iof_kick(msg); } else { cms_cli_proc_msg_req_file_iof_kick(msg); } @@ -653,7 +679,7 @@ static void cms_cli_proc_msg(cms_packet_head_t* msg) static void cms_uds_cli_retry_conn(void) { status_t ret = CT_SUCCESS; - for (int32 i = 0; i < CMS_RETRY_CONN_COUNT; i++) { + for (int32 i = 0; i <= CMS_RETRY_CONN_COUNT; i++) { if (g_cli_conn_try == CT_FALSE) { return; } @@ -664,7 +690,9 @@ static void cms_uds_cli_retry_conn(void) return; } CT_LOG_RUN_ERR("cms cli retry conn failed, ret %d, i %d", ret, i); - cm_sleep(CMS_RETRY_CONN_INTERVAL); + if (i < CMS_RETRY_CONN_COUNT) { + cm_sleep(CMS_RETRY_CONN_INTERVAL); + } } CM_ABORT_REASONABLE(0, "[CMS_CLI] ABORT INFO: cms cli conn retry failed"); diff --git a/pkg/src/cms/interface/cms_interface.h b/pkg/src/cms/interface/cms_interface.h index 3aeb5e928d32012fbaddc654d7182427f19bdf9c..92dd7546117857f817ebea8a6a57d0783385ec36 100644 --- a/pkg/src/cms/interface/cms_interface.h +++ b/pkg/src/cms/interface/cms_interface.h @@ -39,6 +39,7 @@ extern "C" #define CMS_CFG_FILENAME "cms.ini" #define CMS_RES_TYPE_CTSTORE "CTSTORE" #define CMS_RES_TYPE_DB "DB" +#define CMS_RES_TYPE_DSS "DSS" #define CMS_MAX_RES_SLOT_COUNT 8 #define CMS_CLI_HB_INTERVAL (MICROSECS_PER_SECOND * 2) #define CMS_CLI_UDS_SEND_TMOUT 1000 @@ -88,6 +89,7 @@ status_t cms_set_res_data(uint32 slot_id, char* data, uint32 size); status_t cms_get_res_data(uint32 slot_id, char* data, uint32 max_size, uint32* size); status_t cms_env_init(void); void cms_res_inst_register_upgrade(cms_upgrade_op_t upgrade_func); +void cms_set_cluster_no_cms_switch(bool32 no_cms); #ifdef __cplusplus } diff --git a/pkg/src/common/BUILD.bazel b/pkg/src/common/BUILD.bazel index ce6e55a2c2c2e856a5bc91931d8894d1afe84784..dd4ba2c5a0ed5109cf473eaf113213c734edf4d0 100644 --- a/pkg/src/common/BUILD.bazel +++ b/pkg/src/common/BUILD.bazel @@ -209,7 +209,7 @@ cc_binary( "//library:SDP", "//library:pcre2-8", "//library:cgw_client", - "//library:dbstoreClient", + "//library:dbstorClient", "//library:dbstor_tool", "//library:nomlnx/xnetlite", "//library:iod", diff --git a/pkg/src/common/cm_context_pool.c b/pkg/src/common/cm_context_pool.c index d484c73f298fca00b0374f60ecf6fac5f69bdf09..7c8aaa4e15b4b6e280991e8ae6170d2f8b6292cc 100644 --- a/pkg/src/common/cm_context_pool.c +++ b/pkg/src/common/cm_context_pool.c @@ -49,19 +49,19 @@ status_t ctx_pool_create(context_pool_profile_t *profile, context_pool_t **pool) } /* initialize ctx_pool memory object */ - ctx_pool->memory = (memory_pool_t *)((char*)ctx_pool + pool_size); + ctx_pool->memory = (memory_pool_t *)((char *)ctx_pool + pool_size); /* initialize ctx_pool ctx_map */ - ctx_pool->map = (context_map_t *)((char*)ctx_pool + pool_size + sizeof(memory_pool_t)); + ctx_pool->map = (context_map_t *)((char *)ctx_pool + pool_size + sizeof(memory_pool_t)); ctx_pool->map->map_size = profile->optimize_pages; ctx_pool->map->free_items.first = CT_INVALID_ID32; /* initialize ctx_pool lru_list */ - ctx_pool->lru_list = (lru_list_t *)((char*)ctx_pool + pool_size + sizeof(memory_pool_t) + map_size); + ctx_pool->lru_list = (lru_list_t *)((char *)ctx_pool + pool_size + sizeof(memory_pool_t) + map_size); ctx_pool->lru_list_cnt = CT_LRU_LIST_CNT; - if (mpool_create(profile->area, profile->name, - profile->init_pages, profile->optimize_pages, ctx_pool->memory) != CT_SUCCESS) { + if (mpool_create(profile->area, profile->name, profile->init_pages, profile->optimize_pages, ctx_pool->memory) != + CT_SUCCESS) { CM_FREE_PTR(ctx_pool); return CT_ERROR; } @@ -212,7 +212,7 @@ bool32 ctx_pool_try_remove(context_pool_t *pool, context_ctrl_t *ctrl) cm_spin_lock(&pool->lock, NULL); ctx_map_remove(pool, ctrl); cm_spin_unlock(&pool->lock); - + lru_list = &pool->lru_list[ctrl->hash_value % pool->lru_list_cnt]; ctx_lru_remove(lru_list, ctrl); @@ -296,7 +296,7 @@ bool32 ctx_recycle_internal_core(context_pool_t *pool) bool32 removed = CT_FALSE; uint32 idx = pool->lru_list_idx++ % pool->lru_list_cnt; - for (uint32 i = 0 ; i < pool->lru_list_cnt; i++) { + for (uint32 i = 0; i < pool->lru_list_cnt; i++) { lru_list = &pool->lru_list[(idx + i) % pool->lru_list_cnt]; cm_spin_lock(&lru_list->lock, NULL); @@ -489,8 +489,7 @@ static inline void ctx_ctrl_dec_ref(context_ctrl_t *ctrl) } #ifndef TEST_MEM -static bool32 ctx_matched(context_pool_t *pool, context_ctrl_t *ctrl, uint32 hash_value, text_t *text, uint32 uid, - uint32 remote_conn_type, bool32 is_direct_route) +static bool32 ctx_matched(context_pool_t *pool, context_ctrl_t *ctrl, uint32 hash_value, text_t *text, uint32 uid) { text_t piece, sub_text; uint32 remain_size, page_id; @@ -498,8 +497,7 @@ static bool32 ctx_matched(context_pool_t *pool, context_ctrl_t *ctrl, uint32 has /* firstly check: hash value,sql length,valid,etc */ cm_spin_lock(&ctrl->lock, NULL); - bool32 cond = (ctrl->hash_value != hash_value || text->len != ctrl->text_size || !ctrl->valid || ctrl->uid != uid || - ctrl->remote_conn_type != remote_conn_type || ctrl->is_direct_route != is_direct_route); + bool32 cond = (ctrl->hash_value != hash_value || text->len != ctrl->text_size || !ctrl->valid || ctrl->uid != uid); if (cond) { cm_spin_unlock(&ctrl->lock); return CT_FALSE; @@ -546,20 +544,15 @@ static bool32 ctx_matched(context_pool_t *pool, context_ctrl_t *ctrl, uint32 has ctx_ctrl_dec_ref(ctrl); return CT_FALSE; } - + return CT_TRUE; } #else -static bool32 ctx_matched(context_pool_t *pool, context_ctrl_t *ctrl, uint32 hash_value, text_t *text, uint32 uid, - uint32 remote_conn_type, bool32 is_direct_route) +static bool32 ctx_matched(context_pool_t *pool, context_ctrl_t *ctrl, uint32 hash_value, text_t *text, uint32 uid) { /* firstly check: hash value,sql length,valid,etc */ cm_spin_lock(&ctrl->lock, NULL); - if (ctrl->hash_value != hash_value - || text->len != ctrl->text_size || !ctrl->valid - || ctrl->uid != uid - || ctrl->remote_conn_type != remote_conn_type - || ctrl->is_direct_route != is_direct_route) { + if (ctrl->hash_value != hash_value || text->len != ctrl->text_size || !ctrl->valid || ctrl->uid != uid) { cm_spin_unlock(&ctrl->lock); return CT_FALSE; } @@ -577,8 +570,7 @@ static bool32 ctx_matched(context_pool_t *pool, context_ctrl_t *ctrl, uint32 has } #endif // TEST_MEM -void *ctx_pool_find(context_pool_t *pool, text_t *text, uint32 hash_value, uint32 uid, uint32 remote_conn_type, - bool32 is_direct_route) +void *ctx_pool_find(context_pool_t *pool, text_t *text, uint32 hash_value, uint32 uid) { context_bucket_t *bucket = NULL; context_ctrl_t *ctrl = NULL; @@ -589,7 +581,7 @@ void *ctx_pool_find(context_pool_t *pool, text_t *text, uint32 hash_value, uint3 ctrl = bucket->first; while (ctrl != NULL) { - if (ctx_matched(pool, ctrl, hash_value, text, uid, remote_conn_type, is_direct_route)) { + if (ctx_matched(pool, ctrl, hash_value, text, uid)) { cm_spin_unlock(&bucket->enque_lock); return ctrl; } @@ -687,8 +679,8 @@ status_t ctx_read_text(context_pool_t *pool, context_ctrl_t *ctrl, text_t *text, if (text->len <= ctrl->text_size && is_cut == CT_FALSE) { CT_THROW_ERROR(ERR_BUFFER_OVERFLOW, ctrl->text_size, text->len); return CT_ERROR; - } else if (text->len <= ctrl->text_size && - is_cut == CT_TRUE) { // when buffer length is not enough and sql_text needs cut off. + } else if (text->len <= ctrl->text_size && is_cut == CT_TRUE) { // when buffer length is not enough and sql_text + // needs cut off. remain_size = text->len - 1; } else { remain_size = ctrl->text_size; @@ -711,7 +703,8 @@ status_t ctx_read_text(context_pool_t *pool, context_ctrl_t *ctrl, text_t *text, piece_len = (piece_len > remain_size) ? remain_size : piece_len; if (piece_len != 0) { - MEMS_RETURN_IFERR(memcpy_sp(text->str + offset, (size_t)(text->len - offset), piece_str, (size_t)piece_len)); + MEMS_RETURN_IFERR( + memcpy_sp(text->str + offset, (size_t)(text->len - offset), piece_str, (size_t)piece_len)); } offset += piece_len; remain_size -= piece_len; diff --git a/pkg/src/common/cm_context_pool.h b/pkg/src/common/cm_context_pool.h index 872fe42f32452bb61e9557115a90d5ec088b00ac..98bd208bf4868c1dbbf88d86b6c69651fbb41411 100644 --- a/pkg/src/common/cm_context_pool.h +++ b/pkg/src/common/cm_context_pool.h @@ -130,8 +130,7 @@ void ctx_insert(context_pool_t *pool, context_ctrl_t *ctrl); void ctx_bucket_insert(context_bucket_t *bucket, context_ctrl_t *ctrl); status_t ctx_write_text(context_ctrl_t *ctrl, text_t *text); status_t ctx_read_text(context_pool_t *pool, context_ctrl_t *ctrl, text_t *text, bool32 is_cut); -void *ctx_pool_find(context_pool_t *pool, text_t *text, uint32 hash_value, uint32 uid, uint32 remote_conn_type, - bool32 is_direct_route); +void *ctx_pool_find(context_pool_t *pool, text_t *text, uint32 hash_value, uint32 uid); void ctx_dec_ref(context_pool_t *pool, context_ctrl_t *ctrl); void ctx_dec_exec(context_ctrl_t *ctrl); void ctx_pool_lru_move_to_head(context_pool_t *pool, context_ctrl_t *ctrl); diff --git a/pkg/src/common/cm_dbs_ctrl.c b/pkg/src/common/cm_dbs_ctrl.c index 05af192c97800fae1d6a552954b8acdb307a7715..d6c53788bc6a3d8cb5c5a6f5090e7e14d28f5952 100644 --- a/pkg/src/common/cm_dbs_ctrl.c +++ b/pkg/src/common/cm_dbs_ctrl.c @@ -30,7 +30,7 @@ #include "cm_dbs_map.h" #include "cm_text.h" #include "cm_dbs_iofence.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include "srv_param_common.h" typedef struct { @@ -243,7 +243,7 @@ void exit_panic(void) void cm_set_dbs_uuid_lsid(const char* uuid, uint32 lsid) { dbs_global_handle()->dbs_client_set_uuid_lsid(uuid, lsid); - CT_LOG_RUN_INF("set dbstore uuid %s and lsid %u", uuid, lsid); + CT_LOG_RUN_INF("set dbstor uuid %s and lsid %u", uuid, lsid); return; } @@ -274,9 +274,9 @@ status_t cm_dbs_init(const char *home_path, char *cfg_name, dbs_init_mode init_m CT_LOG_RUN_ERR("Failed(%d) to init dbstor client at %s.", ret, dbstor_work_path); return CT_ERROR; } - CT_LOG_RUN_INF("START WAIT DBSTORE INIT"); + CT_LOG_RUN_INF("START WAIT DBSTOR INIT"); cm_dbs_map_init(); - CT_LOG_RUN_INF("END WAIT DBSTORE INIT"); + CT_LOG_RUN_INF("END WAIT DBSTOR INIT"); return CT_SUCCESS; } @@ -287,7 +287,7 @@ status_t cm_dbs_iof_reg_all_ns(uint32 inst_id) iof_info_t iof = {0}; if (!cfg->enable) { - CT_LOG_RUN_INF("dbstore is not enabled"); + CT_LOG_RUN_INF("dbstor is not enabled"); return CT_SUCCESS; } diff --git a/pkg/src/common/cm_dbs_defs.h b/pkg/src/common/cm_dbs_defs.h index 40e7a2a6e0541d33928bf41340ea318337912776..207f0c678abac56d3474f9005ef3c29b3fffc3fc 100644 --- a/pkg/src/common/cm_dbs_defs.h +++ b/pkg/src/common/cm_dbs_defs.h @@ -54,10 +54,13 @@ extern "C" { #define MAX_DBS_FILE_NAME_LEN 64 #define MAX_DBS_FILE_NUM_INDIR 1024 #define MAX_DBS_VSTORE_ID_LEN 11 - +#define MAX_DBS_STATISTICAL_SIZE 7200 // define-namespace #define NS_MAX_NODE_NUM 64 #define NS_MAX_TERM_NUM 64 +#define INVALID_VALUE32 0xFFFFFFFF +// dbstor 工具进程id,需要与dbstor保持一致 +#define NS_TERM_CLIENT_TEST_NS_IDX (INVALID_VALUE32 - 10) // define-pagepool #define MAX_PAGE_POOL_NUM_IN_NAMESPACE (8192) @@ -298,6 +301,11 @@ typedef struct { CsTermAccess accessMode; // 设置权限 } TermAccessAttr; +typedef struct { + uint32_t nsIdx; // dbstor内部namespace数组下标 + uint32_t termIdx; // namespace中进程下标 +} NsTermHandle; + // struct-pagepool typedef struct { char nsName[CSS_MAX_NAME_LEN]; diff --git a/pkg/src/common/cm_dbs_file.c b/pkg/src/common/cm_dbs_file.c index 13ff27f98c4be1871b2a232e9721863941a0b241..e2409330d54a4ed23ccb2d6240948b127f9703b8 100644 --- a/pkg/src/common/cm_dbs_file.c +++ b/pkg/src/common/cm_dbs_file.c @@ -37,7 +37,7 @@ #include "cm_dbs_ctrl.h" #include "cm_dbs_intf.h" #include "cm_dbs_defs.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include "cm_dbs_file.h" #define DBSTOR_MAX_FILE_SIZE (1024ULL * 1024 * 1024 * 1024) diff --git a/pkg/src/common/cm_dbs_iofence.c b/pkg/src/common/cm_dbs_iofence.c index 78b5a1910e308e760520a7470c3b6b46ce0094ad..b8a6caf1a253e842ea83758bbf34df6691892c96 100644 --- a/pkg/src/common/cm_dbs_iofence.c +++ b/pkg/src/common/cm_dbs_iofence.c @@ -26,7 +26,7 @@ #include "cm_log.h" #include "cm_dbs_iofence.h" #include "cm_dbs_intf.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" int32 cm_dbs_iof_register(iof_info_t* iof_info) { diff --git a/pkg/src/common/cm_dbs_iofence.h b/pkg/src/common/cm_dbs_iofence.h index 14ad3a71e4fc632532bb31e53608809508e1b333..bfb6aeb4127e35885611be8ec2eb99dd0c1f7c1c 100644 --- a/pkg/src/common/cm_dbs_iofence.h +++ b/pkg/src/common/cm_dbs_iofence.h @@ -35,7 +35,7 @@ extern "C" { typedef struct st_iof_info { uint32 nodeid; - NameSpaceId nsid; // dbstore name space id + NameSpaceId nsid; // dbstor name space id char* nsName; uint32 termid; // ctd process id uint64 sn; // serial num for dbstor iof request diff --git a/pkg/src/common/cm_dbs_module.h b/pkg/src/common/cm_dbs_module.h index 4a928ac220835a6c65d3d41a0be41e972cd9900d..bce5b8e63d758eebb67eeff8a38a9cc42e5e8779 100644 --- a/pkg/src/common/cm_dbs_module.h +++ b/pkg/src/common/cm_dbs_module.h @@ -33,7 +33,7 @@ extern "C" { #endif #ifndef MODULE_ID -#define MODULE_ID DBSTORE +#define MODULE_ID DBSTOR #endif #ifdef __cplusplus diff --git a/pkg/src/common/cm_dbs_pgpool.c b/pkg/src/common/cm_dbs_pgpool.c index dcd314b5e3e779c91556d76978210f6a7d0fe798..ec66cb97879a3d89f130ae38bebf137ff6b4e372 100644 --- a/pkg/src/common/cm_dbs_pgpool.c +++ b/pkg/src/common/cm_dbs_pgpool.c @@ -35,7 +35,7 @@ #include "cm_dbs_map.h" #include "cm_dbs_ctrl.h" #include "cm_dbs_intf.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #define DBS_DEFAULT_POOL_SIZE (1 << 20) #define DBS_PAGE_POOL_PART_SIZE SIZE_M(64) // PagePool的Partition大小 diff --git a/pkg/src/common/cm_dbs_snapshot.h b/pkg/src/common/cm_dbs_snapshot.h new file mode 100644 index 0000000000000000000000000000000000000000..2cb996c34933fdcc3d50f2a97dce08fc9db2e59a --- /dev/null +++ b/pkg/src/common/cm_dbs_snapshot.h @@ -0,0 +1,43 @@ +/* ------------------------------------------------------------------------- +* This file is part of the Cantian project. +* Copyright (c) 2024 Huawei Technologies Co.,Ltd. +* +* Cantian is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +* ------------------------------------------------------------------------- +* +* cm_dbs_snapshot.h +* +* +* IDENTIFICATION +* src/common/cm_dbs_snapshot.h +* +* ------------------------------------------------------------------------- +*/ + +#ifndef CANTIANDB_CM_DBS_SNAPSHOT_H +#define CANTIANDB_CM_DBS_SNAPSHOT_H + +#include "cm_defs.h" +#include "cm_dbstor.h" +#include "cm_dbs_file.h" + +status_t cm_dbs_create_fs_snap(char *name, uint32 v_storId, snapshot_result_info* snap_info); +status_t cm_dbs_get_page_from_fs_snap(char *name, uint32_t vstor_id, char *snap_name, char *buffer, char *buffer_len); +status_t cm_dbs_create_snapshot_file(int32 *handle, const char *file_name); +status_t cm_dbs_write_snapshot_file(int32 handle, int64 offset, const void *buf, int32 size); +status_t cm_dbs_create_fs_snap_diff_session(const char *name, uint32 v_storId); +status_t cm_dbs_delete_fs_snap_diff_session(const char *name, uint32 v_storId); +status_t cm_dbs_delete_fs_snap(const char *name, uint32 v_storId); +status_t cm_dbs_query_file_diff_between_fs_snaps(const char *name, uint32 v_storId, char *diff); + + +#endif // CANTIANDB_CM_DBS_SNAPSHOT_H diff --git a/pkg/src/common/cm_dbs_ulog.c b/pkg/src/common/cm_dbs_ulog.c index f5cb6c97148e28b57b086c13c1e833d64cdea7a4..c28261ce96c83e2440585fed14095ba04b6d838b 100644 --- a/pkg/src/common/cm_dbs_ulog.c +++ b/pkg/src/common/cm_dbs_ulog.c @@ -33,7 +33,7 @@ #include "cm_dbs_ctrl.h" #include "cm_debug.h" #include "cm_io_record.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #define CM_DBS_ULOG_FRAGMENT_RESERVATION SIZE_M(256) #define CM_DBS_ULOG_UNSUPPORTED(op) CT_LOG_RUN_WAR("Unsupported operation(%s) for dbstor object(ulog).", op) diff --git a/pkg/src/common/cm_dbstore.c b/pkg/src/common/cm_dbstor.c similarity index 92% rename from pkg/src/common/cm_dbstore.c rename to pkg/src/common/cm_dbstor.c index 5ab1f6bc1afe11767d492e3e33eca4577e72aa6d..06403e6793bc95ef8a94098eec559405acab1593 100644 --- a/pkg/src/common/cm_dbstore.c +++ b/pkg/src/common/cm_dbstor.c @@ -14,11 +14,11 @@ * See the Mulan PSL v2 for more details. * ------------------------------------------------------------------------- * - * cm_dbstore.c + * cm_dbstor.c * * * IDENTIFICATION - * src/common/cm_dbstore.c + * src/common/cm_dbstor.c * * ------------------------------------------------------------------------- */ @@ -26,7 +26,7 @@ #include #include "cm_log.h" #include "cm_error.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include "cm_dbs_module.h" #ifdef __cplusplus @@ -62,11 +62,17 @@ static status_t dbs_load_symbol(void *lib_handle, char *symbol, void **sym_lib_h status_t dbs_init_lib(void) { dbs_interface_t *intf = dbs_global_handle(); - intf->dbs_handle = dlopen("libdbstoreClient.so", RTLD_LAZY); - const char *dlopen_err = dlerror(); + const char *dlopen_err = NULL; + intf->dbs_handle = dlopen("libdbstorClient.so", RTLD_LAZY); + dlopen_err = dlerror(); if (intf->dbs_handle == NULL) { - CT_LOG_RUN_WAR("failed to load libdbstoreClient.so, maybe lib path error, errno %s", dlopen_err); - return CT_ERROR; + CT_LOG_RUN_WAR("Failed to load libdbstorClient.so, trying libdbstoreClient.so instead, original error: %s", dlopen_err); + intf->dbs_handle = dlopen("libdbstoreClient.so", RTLD_LAZY); + dlopen_err = dlerror(); + if (intf->dbs_handle == NULL) { + CT_LOG_RUN_ERR("Failed to load libdbstoreClient.so, maybe lib path error, errno %s", dlopen_err); + return CT_ERROR; + } } // namespace @@ -87,6 +93,7 @@ status_t dbs_init_lib(void) (void)(dbs_load_symbol(intf->dbs_handle, "DbsGetFileSize", (void **)(&intf->dbs_get_file_size))); (void)(dbs_load_symbol(intf->dbs_handle, "DbsUlogArchive", (void **)(&intf->dbs_ulog_archive))); (void)(dbs_load_symbol(intf->dbs_handle, "DbsGetNsIoForbiddenStat", (void **)(&intf->dbs_get_ns_io_forbidden_stat))); + (void)(dbs_load_symbol(intf->dbs_handle, "SetNsTermHandle", (void **)(&intf->dbs_set_ns_term_handle))); // dbs (void)(dbs_load_symbol(intf->dbs_handle, "DbsQueryFsInfo", (void **)(&intf->dbs_query_fs_info))); cm_reset_error(); @@ -133,7 +140,7 @@ status_t dbs_init_lib(void) CT_RETURN_IFERR(dbs_load_symbol(intf->dbs_handle, "ReadUlogRecordList", (void **)(&intf->read_ulog_record_list))); CT_RETURN_IFERR(dbs_load_symbol(intf->dbs_handle, "GetUlogUsedCap", (void **)(&intf->get_ulog_used_cap))); CT_RETURN_IFERR(dbs_load_symbol(intf->dbs_handle, "GetUlogInitCapacity", (void **)(&intf->get_ulog_init_capacity))); - CT_LOG_RUN_INF("load libdbstoreClient.so done"); + CT_LOG_RUN_INF("load libdbstorClient.so done"); return CT_SUCCESS; } diff --git a/pkg/src/common/cm_dbstore.h b/pkg/src/common/cm_dbstor.h similarity index 49% rename from pkg/src/common/cm_dbstore.h rename to pkg/src/common/cm_dbstor.h index 541842737ee9c47452ba287e2c940a15ec297504..4ffa5e2fb92766a21cd2df6d2d3e177f72a9c710 100644 --- a/pkg/src/common/cm_dbstore.h +++ b/pkg/src/common/cm_dbstor.h @@ -1,30 +1,30 @@ /* ------------------------------------------------------------------------- - * This file is part of the Cantian project. - * Copyright (c) 2024 Huawei Technologies Co.,Ltd. - * - * Cantian is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * cm_dbstore.h - * - * - * IDENTIFICATION - * src/common/cm_dbstore.h - * - * ------------------------------------------------------------------------- - */ - -#ifndef __CM_DBSTORE_H__ -#define __CM_DBSTORE_H__ +* This file is part of the Cantian project. +* Copyright (c) 2024 Huawei Technologies Co.,Ltd. +* +* Cantian is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +* ------------------------------------------------------------------------- +* +* cm_dbstor.h +* +* +* IDENTIFICATION +* src/common/cm_dbstor.h +* +* ------------------------------------------------------------------------- +*/ + +#ifndef __CM_DBSTOR_H__ +#define __CM_DBSTOR_H__ #include "cm_dbs_defs.h" @@ -34,33 +34,55 @@ extern "C" { #define DBS_CONFIG_NAME "dbstor_config.ini" #define DBS_CONFIG_NAME_WITHOUT_SUFFIX "dbstor_config" +#define CSS_MAX_FSNAME_LEN 256 #define CSS_MAX_NAME_LEN 68 #define DBS_DIR_MAX_FILE_NUM 10240 #define DBS_DIR_DEFAULT_FILE_NUM 1024 #define DBS_ADDR_STR_LEN 20 #define DBS_MAX_LINK_NUMS 32 +#define FS_SNAP_UUID_LEN 16 typedef struct { - uint64 offset; - uint64 length; - uint64 start_lsn; + uint64 offset; + uint64 length; + uint64 start_lsn; } ulog_archive_option_t; typedef struct { - uint64 end_lsn; - uint64 real_len; + uint64 end_lsn; + uint64 real_len; } ulog_archive_result_t; typedef struct { - char local_ip[DBS_ADDR_STR_LEN]; - char remote_ip[DBS_ADDR_STR_LEN]; + char local_ip[DBS_ADDR_STR_LEN]; + char remote_ip[DBS_ADDR_STR_LEN]; } dbs_ip_pairs; -// libdbstoreClient.so +typedef struct { + char *fsName; + uint32_t vstorId; + char *snapName; + char *buffer; + char *bufferLen; +} dbs_get_page_from_sf_snap_param; + +typedef struct { + char snapName[CSS_MAX_FSNAME_LEN]; + uint32_t snapshotID; + uint32_t timepoint; + uint8_t snapUUID[FS_SNAP_UUID_LEN]; +} snapshot_result_info; + +typedef struct { + uint8_t buf[FS_SNAP_UUID_LEN]; +} SNAP_UUID_S; + +// libdbstorClient.so // namespace typedef int (*create_namespace_t)(char *, NameSpaceAttr *); typedef int (*open_namespace_t)(char *, NameSpaceAttr *); typedef int (*set_term_access_mode_for_ns_t)(char *, TermAccessAttr *); +typedef void (*dbs_set_ns_term_handle_t)(NsTermHandle *); typedef int (*dbs_ns_io_forbidden_t)(char *, bool); typedef int (*dbs_get_ns_io_forbidden_stat_t)(char *, bool *); @@ -124,102 +146,112 @@ typedef int (*get_ulog_init_capacity_t)(uint64_t *); typedef int32_t (*get_curr_log_offset_t)(char*, uint32_t, uint32_t*, uint32_t*, uint64_t*); typedef int32_t (*get_correct_page_id_t)(uint32_t, uint32_t, uint32_t, uint64_t); +// snapshot +typedef int (*create_fs_snap)(char *, uint32_t, snapshot_result_info *); +typedef int (*delete_fs_snap)(char *, uint32_t, uint32_t, uint32_t, SNAP_UUID_S); + typedef struct st_dbs_interface { - void *dbs_handle; - // namespace - create_namespace_t create_namespace; - open_namespace_t open_namespace; - set_term_access_mode_for_ns_t set_term_access_mode_for_ns; - dbs_ns_io_forbidden_t dbs_ns_io_forbidden; - dbs_get_ns_io_forbidden_stat_t dbs_get_ns_io_forbidden_stat; - - // dbs - dbs_client_set_uuid_lsid_t dbs_client_set_uuid_lsid; - dbs_client_lib_init_t dbs_client_lib_init; - dbs_set_init_mode_t dbs_set_init_mode; - dbs_client_flush_log_t dbs_client_flush_log; - reg_role_info_callback_t reg_role_info_callback; - dbs_link_down_event_reg_t dbs_link_down_event_reg; - dbs_init_lock_t dbs_init_lock; - dbs_inst_lock_t dbs_inst_lock; - dbs_inst_unlock_t dbs_inst_unlock; - dbs_inst_unlock_force_t dbs_inst_unlock_force; - dbs_check_inst_heart_beat_is_normal_t dbs_check_inst_heart_beat_is_normal; - dbs_file_open_root_t dbs_file_open_root; - dbs_file_open_root_by_vstorid_t dbs_file_open_root_by_vstorid; - dbs_file_create_t dbs_file_create; - dbs_file_open_t dbs_file_open; - dbs_file_write_t dbs_file_write; - dbs_file_read_t dbs_file_read; - dbs_file_remove_t dbs_file_remove; - dbs_clear_cms_name_space_t dbs_clear_cms_name_space; - dbs_file_create_by_path_t dbs_file_create_by_path; - dbs_file_open_by_path_t dbs_file_open_by_path; - dbs_file_rename_t dbs_file_rename; - dbs_file_get_num_t dbs_file_get_num; - dbs_file_get_list_t dbs_file_get_list; - dbs_file_get_list_detail_t dbs_file_get_list_detail; - dbs_get_file_size_t dbs_get_file_size; - dbs_ulog_archive_t dbs_ulog_archive; - dbs_get_ip_pairs_t dbs_get_ip_pairs; - dbs_create_link_t dbs_create_link; - dbs_check_single_link_t dbs_check_single_link; - dbs_query_fs_info_t dbs_query_fs_info; - - // pagepool - create_pagepool_t create_pagepool; - destroy_pagepool_t destroy_pagepool; - open_pagepool_t open_pagepool; - close_pagepool_t close_pagepool; - dbs_put_page_async_t dbs_put_page_async; - sync_page_by_part_index_t sync_page_by_part_index; - dbs_mput_continue_pages_t dbs_mput_continue_pages; - dbs_mget_page_t dbs_mget_page; - get_pagepool_logic_capacity_t get_pagepool_logic_capacity; - expand_pagepool_logic_capacity_t expand_pagepool_logic_capacity; - rename_pagepool_t rename_pagepool; - - // ulog - create_ulog_t create_ulog; - destroy_ulog_t destroy_ulog; - open_ulog_t open_ulog; - append_ulog_record_t append_ulog_record; - truncate_ulog_t truncate_ulog; - read_ulog_record_list_t read_ulog_record_list; - get_ulog_used_cap_t get_ulog_used_cap; - get_ulog_init_capacity_t get_ulog_init_capacity; + void *dbs_handle; + // namespace + create_namespace_t create_namespace; + open_namespace_t open_namespace; + set_term_access_mode_for_ns_t set_term_access_mode_for_ns; + dbs_set_ns_term_handle_t dbs_set_ns_term_handle; + dbs_ns_io_forbidden_t dbs_ns_io_forbidden; + dbs_get_ns_io_forbidden_stat_t dbs_get_ns_io_forbidden_stat; + + // dbs + dbs_client_set_uuid_lsid_t dbs_client_set_uuid_lsid; + dbs_client_lib_init_t dbs_client_lib_init; + dbs_set_init_mode_t dbs_set_init_mode; + dbs_client_flush_log_t dbs_client_flush_log; + reg_role_info_callback_t reg_role_info_callback; + dbs_link_down_event_reg_t dbs_link_down_event_reg; + dbs_init_lock_t dbs_init_lock; + dbs_inst_lock_t dbs_inst_lock; + dbs_inst_unlock_t dbs_inst_unlock; + dbs_inst_unlock_force_t dbs_inst_unlock_force; + dbs_check_inst_heart_beat_is_normal_t dbs_check_inst_heart_beat_is_normal; + dbs_file_open_root_t dbs_file_open_root; + dbs_file_open_root_by_vstorid_t dbs_file_open_root_by_vstorid; + dbs_file_create_t dbs_file_create; + dbs_file_open_t dbs_file_open; + dbs_file_write_t dbs_file_write; + dbs_file_read_t dbs_file_read; + dbs_file_remove_t dbs_file_remove; + dbs_clear_cms_name_space_t dbs_clear_cms_name_space; + dbs_file_create_by_path_t dbs_file_create_by_path; + dbs_file_open_by_path_t dbs_file_open_by_path; + dbs_file_rename_t dbs_file_rename; + dbs_file_get_num_t dbs_file_get_num; + dbs_file_get_list_t dbs_file_get_list; + dbs_file_get_list_detail_t dbs_file_get_list_detail; + dbs_get_file_size_t dbs_get_file_size; + dbs_ulog_archive_t dbs_ulog_archive; + dbs_get_ip_pairs_t dbs_get_ip_pairs; + dbs_create_link_t dbs_create_link; + dbs_check_single_link_t dbs_check_single_link; + dbs_query_fs_info_t dbs_query_fs_info; + + // pagepool + create_pagepool_t create_pagepool; + destroy_pagepool_t destroy_pagepool; + open_pagepool_t open_pagepool; + close_pagepool_t close_pagepool; + dbs_put_page_async_t dbs_put_page_async; + sync_page_by_part_index_t sync_page_by_part_index; + dbs_mput_continue_pages_t dbs_mput_continue_pages; + dbs_mget_page_t dbs_mget_page; + get_pagepool_logic_capacity_t get_pagepool_logic_capacity; + expand_pagepool_logic_capacity_t expand_pagepool_logic_capacity; + rename_pagepool_t rename_pagepool; + + // ulog + create_ulog_t create_ulog; + destroy_ulog_t destroy_ulog; + open_ulog_t open_ulog; + append_ulog_record_t append_ulog_record; + truncate_ulog_t truncate_ulog; + read_ulog_record_list_t read_ulog_record_list; + get_ulog_used_cap_t get_ulog_used_cap; + get_ulog_init_capacity_t get_ulog_init_capacity; + + // snapshot + create_fs_snap create_fs_snap; + delete_fs_snap delete_fs_snap; + } dbs_interface_t; typedef struct st_dbs_tool_interface { - void *dbs_tool_handle; - get_curr_log_offset_t get_curr_log_offset; - get_correct_page_id_t get_correct_page_id; + void *dbs_tool_handle; + get_curr_log_offset_t get_curr_log_offset; + get_correct_page_id_t get_correct_page_id; } dbs_tool_interface_t; typedef enum { - CS_FILE_TYPE_DIR = 0, - CS_FILE_TYPE_FILE, - CS_FILE_TYPE_BUTT, + CS_FILE_TYPE_DIR = 0, + CS_FILE_TYPE_FILE, + CS_FILE_TYPE_BUTT, } cs_file_type; typedef struct cm_dbstor_file_info { - char file_name[CSS_MAX_NAME_LEN]; - cs_file_type type; - object_id_t handle; + char file_name[CSS_MAX_NAME_LEN]; + cs_file_type type; + object_id_t handle; } dbstor_file_info; typedef struct cm_dbstor_file_info_detail { - char file_name[CSS_MAX_NAME_LEN]; - cs_file_type type; - object_id_t handle; - uint32_t file_size; - uint32_t mode; - uint32_t uid; - uint32_t gid; - uint64_t mtimeSec; - uint64_t mtimeNsec; - uint64_t crtimeSec; - uint64_t crtimeNsec; + char file_name[CSS_MAX_NAME_LEN]; + cs_file_type type; + object_id_t handle; + uint32_t file_size; + uint32_t mode; + uint32_t uid; + uint32_t gid; + uint64_t mtimeSec; + uint64_t mtimeNsec; + uint64_t crtimeSec; + uint64_t crtimeNsec; } dbstor_file_info_detail; dbs_interface_t *dbs_global_handle(void); @@ -230,26 +262,26 @@ void dbs_close_lib(void); void dbs_tool_close_lib(void); typedef struct cm_dbstor_fs_info { - uint64 actual_size; - uint64 total_capacity; - uint64 used_size; - uint32 fs_id; - uint32 cluster_id; - uint32 pool_id; - uint32 grain_size; - uint32 block_size; - uint32 work_load_type_id; - uint8 fs_mode; - uint8 fs_type; - uint8 is_gfs; - uint8 fs_type_verify_switch; - uint8 fs_status; - bool is_dedup; - bool is_compress; + uint64 actual_size; + uint64 total_capacity; + uint64 used_size; + uint32 fs_id; + uint32 cluster_id; + uint32 pool_id; + uint32 grain_size; + uint32 block_size; + uint32 work_load_type_id; + uint8 fs_mode; + uint8 fs_type; + uint8 is_gfs; + uint8 fs_type_verify_switch; + uint8 fs_status; + bool is_dedup; + bool is_compress; } dbstor_fs_info; #ifdef __cplusplus } #endif -#endif // __CM_DBSTORE_H__ \ No newline at end of file +#endif // __CM_DBSTOR_H__ \ No newline at end of file diff --git a/pkg/src/common/cm_defs.h b/pkg/src/common/cm_defs.h index d02033cf2e21379bfbfb6201b77d396c1b210da3..927a72c37b79e3feabb60c6472c00dab37f8ab54 100644 --- a/pkg/src/common/cm_defs.h +++ b/pkg/src/common/cm_defs.h @@ -432,6 +432,7 @@ typedef enum en_cs_distribute_type { #define CT_MAX_PART_COLUMN_SIZE (uint32)4000 #define CT_MAX_LOB_SIZE ((uint64)SIZE_M(1024) * 4) #define CT_MAX_SQL_PARAM_COUNT (uint32)0x8000 +#define CT_MAX_VIRTUAL_COLS (uint32)512 #define CT_MAX_INDEX_COLUMNS (uint32)16 #define CT_MAX_PARTKEY_COLUMNS (uint32)16 #define CT_MAX_PART_COUNT (uint32)(PART_GROUP_SIZE * PART_GROUP_SIZE) diff --git a/pkg/src/common/cm_device.c b/pkg/src/common/cm_device.c index 79c3e8f9848258971b093e8e8786e5a21b6c3dea..6f35ab1aa4dbe345e9cea885d3c537245230aca4 100644 --- a/pkg/src/common/cm_device.c +++ b/pkg/src/common/cm_device.c @@ -25,7 +25,7 @@ #include "cm_device_module.h" #include "cm_device.h" #include "cm_file.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include "cm_dbs_ulog.h" #include "cm_dbs_pgpool.h" #include "cm_dbs_file.h" diff --git a/pkg/src/common/cm_disk.c b/pkg/src/common/cm_disk.c index d24a2428ad2634b5ffa57f6d82c6f94a2ac093c3..0aeba6d8efe4f5352824d84a0851bdd8c068e775 100644 --- a/pkg/src/common/cm_disk.c +++ b/pkg/src/common/cm_disk.c @@ -29,7 +29,7 @@ #endif #include "cm_date.h" #include "cm_error.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #define CM_FILE_BLOCK_SIZE 8192 #define CM_DISK_PART_COUNT 16 diff --git a/pkg/src/common/cm_dss_iofence.c b/pkg/src/common/cm_dss_iofence.c new file mode 100644 index 0000000000000000000000000000000000000000..7a9ebcd3e5efebaac0c67deaa658b7da5a4f6bb4 --- /dev/null +++ b/pkg/src/common/cm_dss_iofence.c @@ -0,0 +1,98 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * cm_dss_iofence.c + * + * + * IDENTIFICATION + * src/common/cm_dss_iofence.c + * + * ------------------------------------------------------------------------- + */ + +#include "cm_common_module.h" +#include "cm_log.h" +#include "cm_error.h" +#include "cm_dss_iofence.h" + +#define DSS_CMD_TIOMEOUT_SECOND (float)10.0 +#define DSS_SCRIPT_NAME "dss_contrl.sh" +#define DSS_CMD_OUT_BUFFER_SIZE (CT_MAX_CMD_LEN + 1) +#define DSS_TIMEOUT_ERROR_NUMBER "124" + +status_t cm_exec_dss_cmd(const char* arg, uint32 node_id) +{ + errno_t ret = EOK; + char cmd[CT_MAX_CMD_LEN] = {0}; + char cmd_out[DSS_CMD_OUT_BUFFER_SIZE] = {0}; + ret = sprintf_s(cmd, CT_MAX_CMD_LEN, + "echo 'script begin';source ~/.bashrc;timeout %.2f ${DSS_HOME}/%s %s %u;echo $?;echo 'script end\n';", + DSS_CMD_TIOMEOUT_SECOND, DSS_SCRIPT_NAME, arg, node_id); + PRTS_RETURN_IFERR(ret); + CT_LOG_RUN_INF("begin exec dss cmd, cmd=%s", cmd); + FILE* fp = popen(cmd, "r"); + if (fp == NULL) { + CT_LOG_RUN_ERR("popen failed, cmd=%s", cmd); + return CT_ERROR; + } + + size_t size = 0; + size = fread(cmd_out, 1, DSS_CMD_OUT_BUFFER_SIZE, fp); + (void)pclose(fp); + + if (size == 0 || size >= sizeof(cmd_out)) { + CT_LOG_RUN_ERR("fread failed, cmd=%s, size=%lu", cmd, size); + return CT_ERROR; + } + + cmd_out[size] = 0; + CT_LOG_RUN_INF("end exec dss cmd."); + + if (strstr(cmd_out, DSS_TIMEOUT_ERROR_NUMBER) != NULL) { + CT_LOG_RUN_ERR("DSS script exec timeout, cmd=%s, cmd_out=%s", cmd, cmd_out); + return CT_ERROR; + } + + if (strstr(cmd_out, "RES_SUCCESS") != NULL) { + CT_LOG_RUN_ERR("DSS script exec succeed, cmd=%s", cmd); + return CT_SUCCESS; + } + + CT_LOG_RUN_ERR("DSS script exec failed, cmd=%s, cmd_out=%s", cmd, cmd_out); + return CT_ERROR; +} + +status_t cm_dss_iof_register() +{ + if (cm_exec_dss_cmd("-reg", 0) != CT_SUCCESS){ + CT_LOG_RUN_ERR("DSS iof register failed"); + return CT_ERROR; + } + + CT_LOG_RUN_INF("DSS iof register succeed"); + return CT_SUCCESS; +} + +status_t cm_dss_iof_kick_by_inst_id(uint32 inst_id) +{ + if (cm_exec_dss_cmd("-kick", inst_id) != CT_SUCCESS){ + CT_LOG_RUN_ERR("DSS iof kick node %u failed", inst_id); + return CT_ERROR; + } + + CT_LOG_RUN_INF("DSS iof unregister succeed"); + return CT_SUCCESS; +} \ No newline at end of file diff --git a/pkg/src/common/cm_dss_iofence.h b/pkg/src/common/cm_dss_iofence.h new file mode 100644 index 0000000000000000000000000000000000000000..c63a435b09689cb3866cd664be22784f14c5fc47 --- /dev/null +++ b/pkg/src/common/cm_dss_iofence.h @@ -0,0 +1,42 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * cm_dss_iofence.h + * + * + * IDENTIFICATION + * src/common/cm_dss_iofence.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef __CM_DSS_IO_FENCE_H__ +#define __CM_DSS_IO_FENCE_H__ + +#include "cm_defs.h" + +#ifdef __cplusplus +extern "C" { +#endif + +status_t cm_dss_iof_register(); +status_t cm_dss_iof_kick_by_inst_id(uint32 inst_id); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/pkg/src/common/cm_file.c b/pkg/src/common/cm_file.c index ca719292e682d41aaea81a0f5e9b25a13bd9fc22..46b87f4795e68a967d45df082dcc83f417ed26fc 100644 --- a/pkg/src/common/cm_file.c +++ b/pkg/src/common/cm_file.c @@ -27,7 +27,7 @@ #include "cm_log.h" #include "cm_system.h" #include "cm_date.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #ifdef WIN32 #else @@ -190,7 +190,7 @@ status_t cm_get_dbs_root_dir_handle(char* fs_name, object_id_t* root_handle) { int ret = 0; if (fs_name == NULL || root_handle == NULL) { - CT_LOG_RUN_ERR("get dbstore root dir fd failed, invalid param."); + CT_LOG_RUN_ERR("get dbstor root dir fd failed, invalid param."); return CT_ERROR; } @@ -209,7 +209,7 @@ status_t cm_get_dbs_dir_handle(object_id_t* phandle, char* dir_name, object_id_t { int ret = 0; if (phandle == NULL || dir_name == NULL || handle == NULL) { - CT_LOG_RUN_ERR("get dbstore dir handle failed, invalid param."); + CT_LOG_RUN_ERR("get dbstor dir handle failed, invalid param."); return CT_ERROR; } ret = dbs_global_handle()->dbs_file_open(phandle, dir_name, DIR_TYPE, handle); @@ -225,7 +225,7 @@ status_t cm_open_dbs_dir_handle(object_id_t* phandle, char* dir_name, object_id_ { int ret = 0; if (phandle == NULL || dir_name == NULL || handle == NULL) { - CT_LOG_RUN_ERR("get dbstore dir handle failed, invalid param."); + CT_LOG_RUN_ERR("get dbstor dir handle failed, invalid param."); return CT_ERROR; } ret = dbs_global_handle()->dbs_file_open(phandle, dir_name, DIR_TYPE, handle); @@ -237,7 +237,7 @@ status_t cm_get_dbs_file_handle(object_id_t* phandle, char* file_name, object_id { int ret = 0; if (phandle == NULL || file_name == NULL || handle == NULL) { - CT_LOG_RUN_ERR("get dbstore file handle failed, invalid param."); + CT_LOG_RUN_ERR("get dbstor file handle failed, invalid param."); return CT_ERROR; } @@ -262,7 +262,7 @@ status_t cm_open_dbs_file(object_id_t* pHandle, char* file, object_id_t* handle) status_t ret; char file_name[CT_FILE_NAME_BUFFER_SIZE] = {0}; if (pHandle == NULL || file == NULL || handle == NULL) { - CT_LOG_RUN_ERR("get dbstore file fd failed, invalid param."); + CT_LOG_RUN_ERR("get dbstor file fd failed, invalid param."); return CT_ERROR; } ret = cm_get_path_file_name(file, file_name, CT_FILE_NAME_BUFFER_SIZE); @@ -285,7 +285,7 @@ status_t cm_open_dbs_file(object_id_t* pHandle, char* file, object_id_t* handle) status_t cm_get_dbs_file_path_handle(const char* path, const char* delim, object_id_t* handle_ids, int handle_len) { if (path == NULL || handle_ids == NULL || handle_len == 0) { - CT_LOG_RUN_ERR("get dbstore file path fd failed, invalid param."); + CT_LOG_RUN_ERR("get dbstor file path fd failed, invalid param."); return CT_ERROR; } char* token = NULL; @@ -308,17 +308,17 @@ status_t cm_get_dbs_file_path_handle(const char* path, const char* delim, object if (cur_depth == 0) { // token中保存的是根目录名 MEMS_RETURN_IFERR(strcpy_sp(fs_name, CT_MAX_FILE_NAME_LEN, token)); if (cm_get_dbs_root_dir_handle(fs_name, &handle_ids[cur_depth]) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore fs(%s) root dir(%s) fd failed.", fs_name, token); + CT_LOG_RUN_ERR("get dbstor fs(%s) root dir(%s) fd failed.", fs_name, token); return CT_ERROR; } } else if (cur_depth < handle_len - 1) { // token中保存的是目录名 if (cm_get_dbs_dir_handle(&handle_ids[cur_depth - 1], token, &handle_ids[cur_depth]) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore fs(%s) dir(%s) fd failed.", fs_name, token); + CT_LOG_RUN_ERR("get dbstor fs(%s) dir(%s) fd failed.", fs_name, token); return CT_ERROR; } } else if (cur_depth == handle_len - 1) { // token中保存的是文件名 if (cm_get_dbs_file_handle(&handle_ids[cur_depth - 1], token, &handle_ids[cur_depth]) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore fs(%s) file(%s) fd failed.", fs_name, token); + CT_LOG_RUN_ERR("get dbstor fs(%s) file(%s) fd failed.", fs_name, token); return CT_ERROR; } } @@ -334,7 +334,7 @@ status_t cm_get_dbs_last_file_handle(const char* file, object_id_t* last_handle) errno_t ret = 0; int path_depth = 0; if (cm_get_file_path_depth(file, "/", &path_depth) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore file(%s) path depth failed.", file); + CT_LOG_RUN_ERR("get dbstor file(%s) path depth failed.", file); return CT_ERROR; } object_id_t* handle = (object_id_t *)malloc((path_depth + 1) * sizeof(object_id_t)); @@ -343,7 +343,7 @@ status_t cm_get_dbs_last_file_handle(const char* file, object_id_t* last_handle) return CT_ERROR; } if (cm_get_dbs_file_path_handle(file, "/", handle, path_depth) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore file path fd failed, file %s, ret %d", file, ret); + CT_LOG_RUN_ERR("get dbstor file path fd failed, file %s, ret %d", file, ret); CM_FREE_PTR(handle); return CT_ERROR; } @@ -362,7 +362,7 @@ status_t cm_get_dbs_last_file_handle(const char* file, object_id_t* last_handle) status_t cm_get_dbs_full_dir_handle(const char* path, const char* delim, object_id_t* handle_ids, int handle_len) { if (path == NULL || handle_ids == NULL || handle_len == 0) { - CT_LOG_RUN_ERR("get dbstore file path fd failed, invalid param."); + CT_LOG_RUN_ERR("get dbstor file path fd failed, invalid param."); return CT_ERROR; } char* token = NULL; @@ -379,18 +379,18 @@ status_t cm_get_dbs_full_dir_handle(const char* path, const char* delim, object_ token = strtok_s(file, delim, &context); while (token != NULL) { if (cur_depth >= handle_len) { - CT_LOG_RUN_ERR("get dbstore file(%s) fd failed, fd len exceed (%d - %d).", path, handle_len, cur_depth); + CT_LOG_RUN_ERR("get dbstor file(%s) fd failed, fd len exceed (%d - %d).", path, handle_len, cur_depth); return CT_ERROR; } if (cur_depth == 0) { MEMS_RETURN_IFERR(strcpy_sp(fs_name, CT_MAX_FILE_NAME_LEN, token)); if (cm_get_dbs_root_dir_handle(fs_name, &handle_ids[cur_depth]) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore fs(%s) root dir(%s) fd failed.", fs_name, token); + CT_LOG_RUN_ERR("get dbstor fs(%s) root dir(%s) fd failed.", fs_name, token); return CT_ERROR; } } else if (cur_depth <= handle_len - 1) { // token中保存的是目录名 if (cm_get_dbs_dir_handle(&handle_ids[cur_depth - 1], token, &handle_ids[cur_depth]) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore fs(%s) dir(%s) fd failed.", fs_name, token); + CT_LOG_RUN_ERR("get dbstor fs(%s) dir(%s) fd failed.", fs_name, token); return CT_ERROR; } } @@ -406,7 +406,7 @@ status_t cm_get_dbs_last_dir_handle(const char* file, object_id_t* last_handle) errno_t ret = 0; int path_depth = 0; if (cm_get_file_path_depth(file, "/", &path_depth) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore file(%s) path depth failed.", file); + CT_LOG_RUN_ERR("get dbstor file(%s) path depth failed.", file); return CT_ERROR; } object_id_t* handle = (object_id_t *)malloc((path_depth + 1) * sizeof(object_id_t)); @@ -415,7 +415,7 @@ status_t cm_get_dbs_last_dir_handle(const char* file, object_id_t* last_handle) return CT_ERROR; } if (cm_get_dbs_full_dir_handle(file, "/", handle, path_depth) != CT_SUCCESS) { - CT_LOG_RUN_ERR("get dbstore file path fd failed, file %s, ret %d", file, ret); + CT_LOG_RUN_ERR("get dbstor file path fd failed, file %s, ret %d", file, ret); CM_FREE_PTR(handle); return CT_ERROR; } @@ -434,7 +434,7 @@ status_t cm_rm_dbs_dir_file(object_id_t* phandle, char* name) { int ret = 0; if (phandle == NULL || name == NULL) { - CT_LOG_RUN_ERR("delete dbstore file or dir failed, invalid param."); + CT_LOG_RUN_ERR("delete dbstor file or dir failed, invalid param."); return CT_ERROR; } diff --git a/pkg/src/common/cm_list.h b/pkg/src/common/cm_list.h index b284e7984bd7f657431943c4a420fa88bab99c87..6eb1b6c17bee0bfdbb9cac731b2056805ba23408 100644 --- a/pkg/src/common/cm_list.h +++ b/pkg/src/common/cm_list.h @@ -72,6 +72,7 @@ static inline void cm_galist_init(galist_t *list, void *owner, ga_alloc_func_t a list->count = 0; list->group_capacity = 0; list->group_count = 0; + list->latest_ext_cap = 0; list->latest_ext_cnt = 0; list->owner = owner; list->alloc_func = alloc_func; diff --git a/pkg/src/common/cm_log.c b/pkg/src/common/cm_log.c index 6a4abd82e07a27cf5eb05f44a6599f4037e65c9c..3c84e5c9242b54e8c1c22a7fae91e1850e112f91 100644 --- a/pkg/src/common/cm_log.c +++ b/pkg/src/common/cm_log.c @@ -29,7 +29,7 @@ #include "cm_thread.h" #include "cm_timer.h" #include "cm_dbs_intf.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include #ifndef _WIN32 @@ -52,7 +52,7 @@ static char *g_module_info[] = { "RC", "TBOX", "PE", - "DBSTORE", + "DBSTOR", "CTC", "SERVER", "KNL_COMM", diff --git a/pkg/src/common/cm_log.h b/pkg/src/common/cm_log.h index 7b37b773ed3ce4d434412e34a39ad5bf00854182..133062c6cd0d3aed33161f819ccaff01554a07b8 100644 --- a/pkg/src/common/cm_log.h +++ b/pkg/src/common/cm_log.h @@ -72,7 +72,7 @@ typedef enum en_module_id { RC, TBOX, PE, - DBSTORE, + DBSTOR, CTC, SERVER, KNL_COMM, diff --git a/pkg/src/common/cm_scsi.c b/pkg/src/common/cm_scsi.c index a433329d58dfc25885c41eb5aef8baa0d580ba88..63f8839871b0810001d01f420fca955634376e18 100644 --- a/pkg/src/common/cm_scsi.c +++ b/pkg/src/common/cm_scsi.c @@ -586,7 +586,7 @@ int32 cm_scsi3_caw(int32 fd, int64 block_addr, char *buff, int32 buff_len) status = ioctl(fd, SG_IO, &hdr); if (status < 0) { - CT_LOG_DEBUG_ERR("Sending SCSI caw command failed, status %d, errno %d.", status, errno); + CT_LOG_RUN_ERR("Sending SCSI caw command failed, status %d, errno %d.", status, errno); return CT_ERROR; } diff --git a/pkg/src/common/cm_text.c b/pkg/src/common/cm_text.c index d37ece8280e4f9f900ece2eccbdc07b42dbd6162..9251d341dd5d506fec7bcac008fa14156d005e96 100644 --- a/pkg/src/common/cm_text.c +++ b/pkg/src/common/cm_text.c @@ -1581,6 +1581,20 @@ void cm_str_upper(char *str) return; } +void cm_str_to_upper(char *src, char *dst) +{ + CM_POINTER2(src, dst); + + while (*src != '\0') { + *dst = UPPER(*src); + dst++; + src++; + } + *dst = '\0'; + + return; +} + void cm_str_lower(char *str) { char *tmp = NULL; @@ -1595,6 +1609,20 @@ void cm_str_lower(char *str) return; } + +void cm_str_to_lower(char *src, char *dst) +{ + CM_POINTER2(src, dst); + + while (*src != '\0') { + *dst = LOWER(*src); + dst++; + src++; + } + *dst = '\0'; + + return; +} /* calculate how many different character bits of two strings,for example: 1."abc" and "ab" : one different character bit 2."abc" and "accd" : two different character bits diff --git a/pkg/src/common/cm_text.h b/pkg/src/common/cm_text.h index e0514017c98bd5f67bd486be0f3930bce84441b5..201a1ba70df504a162e0e5d817ae3587d7b56691 100644 --- a/pkg/src/common/cm_text.h +++ b/pkg/src/common/cm_text.h @@ -52,6 +52,14 @@ typedef struct st_text { } text_t; #pragma pack() +#pragma pack(4) +typedef struct st_var_text { + char *str; + uint32 len; + uint32 cap; +} var_text_t; +#pragma pack() + #ifdef Z_SHARDING typedef struct st_long_text { char *text_addr; @@ -483,6 +491,25 @@ static inline status_t cm_concat_n_string(text_t *text, uint32 maxsize, const ch return CT_SUCCESS; } +static inline status_t cm_concat_var_string(var_text_t *text, const char *part) +{ + uint32 len = (uint32)strlen(part); + if (len != 0 && len <= (text->cap - text->len)) { + MEMS_RETURN_IFERR(memcpy_sp(text->str + text->len, text->cap - text->len, part, len)); + text->len += len; + } + return CT_SUCCESS; +} + +static inline status_t cm_concat_n_var_string(var_text_t *text, const char *part, uint32 size) +{ + if (size != 0) { + MEMS_RETURN_IFERR(memcpy_sp(text->str + text->len, text->cap - text->len, part, size)); + text->len += size; + } + return CT_SUCCESS; +} + static inline void cm_concat_int32(text_t *text, uint32 max_len, uint32 i32) { char buf[CT_NUMBER_BUFFER_SIZE]; @@ -1093,6 +1120,8 @@ bool32 cm_fetch_line(text_t *text, text_t *line, bool32 eof); void cm_str_upper(char *str); void cm_str_lower(char *str); +void cm_str_to_upper(char *src, char *dst); +void cm_str_to_lower(char *src, char *dst); void cm_text_upper(text_t *text); void cm_text_lower(text_t *text); void cm_text_upper_self_name(text_t *name); diff --git a/pkg/src/ctbox/ct_miner.c b/pkg/src/ctbox/ct_miner.c index 20d6fab72eab8d65a62fa7b57adf17374402c9a0..de5f9088e1b60cae34763182cb783f0990732fd0 100644 --- a/pkg/src/ctbox/ct_miner.c +++ b/pkg/src/ctbox/ct_miner.c @@ -39,7 +39,7 @@ #include "ct_tbox_audit.h" #include "knl_ctrl_restore.h" #include "dtc_database.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #ifdef WIN32 #define cm_strdup _strdup #else diff --git a/pkg/src/ctc/ctc_cbo.c b/pkg/src/ctc/ctc_cbo.c index c346e2f17bacf4f76faa3456381ad9264f8851d8..f0dbba4d20a6eacac8b7abf4231d0fffb6be53aa 100644 --- a/pkg/src/ctc/ctc_cbo.c +++ b/pkg/src/ctc/ctc_cbo.c @@ -109,6 +109,8 @@ status_t fill_part_table_cbo_stats_table_t(knl_handle_t handle, dc_entity_t *ent cbo_stats_table_t *table_stats, uint32 part_id, uint32 stats_idx) { ctc_cbo_stats_table[stats_idx].estimate_rows = table_stats->rows; + ctc_cbo_stats_table[stats_idx].avg_row_len = table_stats->avg_row_len; + ctc_cbo_stats_table[stats_idx].blocks = table_stats->blocks; for (uint32 col_id = 0; col_id <= table_stats->max_col_id; col_id++) { cbo_stats_column_t *column = knl_get_cbo_part_column(handle, entity, part_id, col_id); if (fill_cbo_stats_column(column, &ctc_cbo_stats_table[stats_idx].columns[col_id], col_id, entity) != CT_SUCCESS) { @@ -123,6 +125,8 @@ status_t fill_sub_part_table_cbo_stats_table_t(knl_handle_t handle, dc_entity_t uint32 stats_idx) { ctc_cbo_stats_table[stats_idx].estimate_rows = table_stats->rows; + ctc_cbo_stats_table[stats_idx].avg_row_len = table_stats->avg_row_len; + ctc_cbo_stats_table[stats_idx].blocks = table_stats->blocks; for (uint32 col_id = 0; col_id <= table_stats->max_col_id; col_id++) { cbo_stats_column_t *column = knl_get_cbo_subpart_column(handle, entity, part_id, col_id, subpart_id); if (fill_cbo_stats_column(column, &ctc_cbo_stats_table[stats_idx].columns[col_id], col_id, entity) != CT_SUCCESS) { @@ -151,12 +155,15 @@ status_t get_cbo_stats(knl_handle_t handle, dc_entity_t *entity, ctc_cbo_stats_t cbo_stats_table_t *table_stats = NULL; ctc_cbo_stats_table->estimate_rows = 0; stats->records = 0; + stats->page_size = DEFAULT_PAGE_SIZE(handle); uint32 max_part_no = 0; uint32 max_sub_part_no = 0; if (!knl_is_part_table(entity)) { table_stats = knl_get_cbo_table(handle, entity); if (table_stats != NULL && table_stats->is_ready && STATS_GLOBAL_CBO_STATS_EXIST(entity)) { ctc_cbo_stats_table->estimate_rows = table_stats->rows; + ctc_cbo_stats_table->avg_row_len = table_stats->avg_row_len; + ctc_cbo_stats_table->blocks = table_stats->blocks; stats->records = table_stats->rows; stats->is_updated = CT_TRUE; ret = fill_cbo_stats_table_t(handle, entity, stats, table_stats, ctc_cbo_stats_table); @@ -175,6 +182,8 @@ status_t get_cbo_stats(knl_handle_t handle, dc_entity_t *entity, ctc_cbo_stats_t max_part_no = table_stats->max_part_no; } else { ctc_cbo_stats_table[i].estimate_rows = 0; + ctc_cbo_stats_table[i].avg_row_len = 0; + ctc_cbo_stats_table[i].blocks = 0; } stats->records += ctc_cbo_stats_table[i].estimate_rows; } @@ -196,6 +205,8 @@ status_t get_cbo_stats(knl_handle_t handle, dc_entity_t *entity, ctc_cbo_stats_t max_sub_part_no = table_stats->max_subpart_info.subpart_no; } else { ctc_cbo_stats_table[i].estimate_rows = 0; + ctc_cbo_stats_table[i].avg_row_len = 0; + ctc_cbo_stats_table[i].blocks = 0; } stats->records += ctc_cbo_stats_table[i].estimate_rows; } diff --git a/pkg/src/ctc/ctc_ddl.c b/pkg/src/ctc/ctc_ddl.c index 45d0a318b1bf284aea7243533a048d501b517c63..e8be4d4305992d3f1f26c15a09e26e134fa2eb02 100644 --- a/pkg/src/ctc/ctc_ddl.c +++ b/pkg/src/ctc/ctc_ddl.c @@ -546,6 +546,12 @@ static void ctc_write_rd_invalid_dd_4mysql_ddl(knl_session_t *knl_session, ctc_i redo->op_type = RD_INVALID_DD_FOR_MYSQL_DDL; redo->buff_len = broadcast_req->buff_len; redo->is_dcl = broadcast_req->is_dcl; + + ctrl_version_t release_version = FLUSH_PRIVILEGES_BROADCAST_FIX_VERSION; + if (db_cur_ctrl_version_is_higher_or_equal(knl_session, release_version)) { + redo->is_flush = broadcast_req->is_flush; + } + if (broadcast_req->buff_len > 0) { ret = memcpy_sp(redo->buff, broadcast_req->buff_len, broadcast_req->buff, broadcast_req->buff_len); knl_securec_check(ret); @@ -1438,6 +1444,12 @@ status_t fill_column_default_text(session_t *session, sql_stmt_t *stmt, knl_colu int ret = sprintf_s(column->default_text.str, strlen(def->default_text) + appendLen, format, def->default_text); column->default_text.len = strlen(column->default_text.str); knl_securec_check_ss(ret); + knl_column_t column_t = { 0 }; + char column_name[CT_NAME_BUFFER_SIZE] = { 0 }; + column_t.name = column_name; + db_convert_column_def(&column_t, CT_INVALID_ID32, CT_INVALID_ID32, column, NULL, CT_INVALID_ID32); + ret = g_knl_callback.verify_default_from_text(session, &column_t, column->default_text); + CT_RETURN_IFERR_NOCLEAR(ret, ddl_ctrl); return CT_SUCCESS; } @@ -2115,6 +2127,9 @@ static int ctc_fill_def_for_create_table(sql_stmt_t *stmt, TcDb__CtcDDLCreateTab if (column->primary) { def->pk_inline = CT_TRUE; } + if (column->is_virtual) { + def->vcol_count++; + } def->rf_inline = def->rf_inline || (column->is_ref); def->uq_inline = def->uq_inline || (column->unique); def->chk_inline = def->chk_inline || (column->is_check); @@ -2134,11 +2149,10 @@ static int ctc_fill_def_for_create_table(sql_stmt_t *stmt, TcDb__CtcDDLCreateTab def->pctfree = CT_PCT_FREE; } def->cr_mode = CR_PAGE; - if (ddl_ctrl->table_flags & CTC_INTERNAL_TMP_TABLE) { + if (ddl_ctrl->table_flags & CTC_FLAG_INTERNAL_TMP_TABLE) { def->is_intrinsic = CT_TRUE; } - def->contains_vircol = (ddl_ctrl->table_flags & CTC_TABLE_CONTAINS_VIRCOL) ? CT_TRUE : CT_FALSE; return CT_SUCCESS; } @@ -2203,14 +2217,19 @@ static int ctc_create_table_impl(TcDb__CtcDDLCreateTableDef *req, ddl_ctrl_t *dd CT_RETURN_IFERR(sql_alloc_mem(stmt->context, sizeof(ctc_ddl_def_node_t), (void **)&def_node)); // 创建临时表 - if (ddl_ctrl->table_flags & CTC_TMP_TABLE) { + if (ddl_ctrl->table_flags & CTC_FLAG_TMP_TABLE) { + // Temp table unsupport virtual columns + if (ddl_ctrl->table_flags & CTC_FLAG_TABLE_CONTAINS_VIRCOL) { + CT_THROW_ERROR(ERR_OPERATIONS_NOT_ALLOW, "create with virtual column", "tmp table"); + return CT_ERROR; + } bool32 is_existed = CT_FALSE; cm_latch_x(&stmt->session->knl_session.ltt_latch, stmt->session->knl_session.id, NULL); def->type = TABLE_TYPE_SESSION_TEMP; status = knl_create_ltt(&stmt->session->knl_session, def, &is_existed); cm_unlatch(&stmt->session->knl_session.ltt_latch, NULL); CT_RETURN_IFERR_EX(status, stmt, ddl_ctrl); - if (!(ddl_ctrl->table_flags & CTC_INTERNAL_TMP_TABLE)) { + if (!(ddl_ctrl->table_flags & CTC_FLAG_INTERNAL_TMP_TABLE)) { ctc_ddl_clear_stmt(stmt); } return CT_SUCCESS; @@ -3017,7 +3036,6 @@ static int ctc_alter_table_atomic_impl(TcDb__CtcDDLAlterTableDef *req, ddl_ctrl_ if (status != CT_SUCCESS) { return status; } - start_def->contains_vircol = (ddl_ctrl->table_flags & CTC_TABLE_CONTAINS_VIRCOL) ? CT_TRUE : CT_FALSE; uint32 def_count = ((uint64)def - (uint64)start_def) / sizeof(knl_altable_def_t); ctc_context_t *ctc_context = ctc_get_ctx_by_addr(ddl_ctrl->tch.ctx_addr); @@ -3066,7 +3084,7 @@ EXTER_ATTACK int ctc_alter_table(void *alter_def, ddl_ctrl_t *ddl_ctrl) return ret; } -static void ctc_fill_column_by_dc(knl_column_def_t *column, knl_column_t *dc_column, sql_stmt_t *stmt) +static void ctc_fill_column_by_dc(knl_column_def_t *column, knl_column_t *dc_column, sql_stmt_t *stmt, knl_table_def_t *table_def) { proto_str2text(dc_column->name, &column->name); column->datatype = dc_column->datatype; @@ -3084,9 +3102,14 @@ static void ctc_fill_column_by_dc(knl_column_def_t *column, knl_column_t *dc_col column->is_default_null = KNL_COLUMN_IS_DEFAULT_NULL(dc_column); column->typmod.is_array = KNL_COLUMN_IS_ARRAY(dc_column); column->is_jsonb = KNL_COLUMN_IS_JSONB(dc_column); + column->is_virtual = KNL_COLUMN_IS_VIRTUAL(dc_column); + + if (column->is_virtual) { + table_def->vcol_count++; + } if (dc_column->default_text.len > 0) { - column->is_default = CT_TRUE; + column->is_default = !column->is_virtual; if (sql_alloc_mem(stmt->context, dc_column->default_text.len, (pointer_t *)&column->default_text.str) != CT_SUCCESS) { return; @@ -3383,11 +3406,11 @@ static status_t ctc_fill_index_by_dc(TcDb__CtcDDLRenameTableDef *req, dc_entity_ return status; } -static status_t ctc_get_auto_increment(knl_table_def_t *def, dc_entity_t *entity, - knl_session_t *knl_session, knl_column_t *dc_column) +static void ctc_get_auto_increment(knl_table_def_t *def, dc_entity_t *entity, + knl_session_t *knl_session, knl_column_t *dc_column) { if (!KNL_COLUMN_IS_SERIAL(dc_column)) { - return CT_SUCCESS; + return; } if (entity->has_serial_col) { if (!entity->table.heap.segment) { @@ -3396,6 +3419,56 @@ static status_t ctc_get_auto_increment(knl_table_def_t *def, dc_entity_t *entity def->serial_start = HEAP_SEGMENT(knl_session, entity->table.heap.entry, entity->table.heap.segment)->serial; } } +} + +static status_t ctc_fill_col_def_from_dc(knl_table_def_t *def, dc_entity_t *entity, knl_session_t *knl_session, + sql_stmt_t *stmt) +{ + for (uint32 i = 0; i < entity->column_count; i++) { + knl_column_def_t *column = NULL; + CT_RETURN_IFERR(cm_galist_new(&def->columns, sizeof(knl_column_def_t), (pointer_t *)&column)); + column->table = (void *)def; + knl_column_t *dc_column = dc_get_column(entity, i); + if (dc_column == NULL) { + CT_LOG_RUN_ERR("fill columns from dc failed, table_name: %s-%s, col_id: %u", + def->schema.str, def->name.str, i); + return CT_ERROR; + } + cm_galist_init(&column->ref_columns, stmt->context, sql_alloc_mem); + ctc_fill_column_by_dc(column, dc_column, stmt, def); + for (uint32 j = 0; j < entity->table.index_set.count; j++) { + knl_index_desc_t *index = &entity->table.index_set.items[j]->desc; + if (index->primary && index->columns[0] == dc_column->id) { + def->pk_inline = CT_TRUE; + } + } + def->rf_inline = def->rf_inline || column->is_ref; + def->uq_inline = def->uq_inline || column->unique; + def->chk_inline = def->chk_inline || column->is_check; + ctc_get_auto_increment(def, entity, knl_session, dc_column); + } + return CT_SUCCESS; +} + +static status_t ctc_fill_vircol_def_from_dc(knl_table_def_t *def, dc_entity_t *entity, knl_session_t *knl_session, + sql_stmt_t *stmt) +{ + for (uint32 i = 0; i < entity->vircol_count; i++) { + knl_column_def_t *column = NULL; + CT_RETURN_IFERR(cm_galist_new(&def->columns, sizeof(knl_column_def_t), (pointer_t *)&column)); + column->table = (void *)def; + knl_column_t *dc_column = entity->virtual_columns[i]; + if (dc_column == NULL) { + CT_LOG_RUN_ERR("fill columns from dc failed, table_name: %s-%s, col_id: %u", + def->schema.str, def->name.str, i + DC_VIRTUAL_COL_START); + return CT_ERROR; + } + cm_galist_init(&column->ref_columns, stmt->context, sql_alloc_mem); + ctc_fill_column_by_dc(column, dc_column, stmt, def); + def->rf_inline = def->rf_inline || column->is_ref; + def->uq_inline = def->uq_inline || column->unique; + def->chk_inline = def->chk_inline || column->is_check; + } return CT_SUCCESS; } @@ -3426,30 +3499,9 @@ static status_t ctc_fill_def_base_from_dc(TcDb__CtcDDLRenameTableDef *req, knl_t cm_galist_init(&def->indexs, stmt->context, sql_alloc_mem); cm_galist_init(&def->lob_stores, stmt->context, sql_alloc_mem); - for (int i = 0; i < entity->column_count; i++) { - knl_column_def_t *column = NULL; - if ((ret = cm_galist_new(&def->columns, sizeof(knl_column_def_t), (pointer_t *)&column))) { - break; - } - column->table = (void *)def; - knl_column_t *dc_column = dc_get_column(entity, i); - cm_galist_init(&column->ref_columns, stmt->context, sql_alloc_mem); - ctc_fill_column_by_dc(column, dc_column, stmt); - for (uint32 j = 0; j < entity->table.index_set.count; j++) { - if (entity->table.index_set.items[j]->desc.primary && - entity->table.index_set.items[j]->desc.columns[0] == dc_column->id) { - def->pk_inline = CT_TRUE; - } - } - def->rf_inline = def->rf_inline || (column->is_ref); - def->uq_inline = def->uq_inline || (column->unique); - def->chk_inline = def->chk_inline || (column->is_check); - ret = ctc_get_auto_increment(def, entity, knl_session, dc_column); - if (ret != CT_SUCCESS) { - break; - } - } - return ret; + CT_RETURN_IFERR(ctc_fill_col_def_from_dc(def, entity, knl_session, stmt)); + CT_RETURN_IFERR(ctc_fill_vircol_def_from_dc(def, entity, knl_session, stmt)); + return CT_SUCCESS; } static status_t ctc_fill_def_from_dc(TcDb__CtcDDLRenameTableDef *req, knl_table_def_t *def, @@ -3979,7 +4031,7 @@ static int ctc_drop_table_impl(TcDb__CtcDDLDropTableDef *req, ddl_ctrl_t *ddl_ct def->purge = true; // 删除临时表 - if (ddl_ctrl->table_flags & CTC_TMP_TABLE) { + if (ddl_ctrl->table_flags & CTC_FLAG_TMP_TABLE) { status = knl_drop_ltt(&stmt->session->knl_session, def); ctc_ddl_clear_stmt(stmt); return status; diff --git a/pkg/src/ctc/ctc_ddl.h b/pkg/src/ctc/ctc_ddl.h index 7900e3edb475e6bce506ae545e703e9a1be79cad..83ae3ead2a2d5a6ee96edcedfe869a63c8d2c8bb 100644 --- a/pkg/src/ctc/ctc_ddl.h +++ b/pkg/src/ctc/ctc_ddl.h @@ -37,12 +37,13 @@ #define EMPTY_DATABASE "" #define INVALID_PART_ID (uint32)0xFFFFFFFF #define CTC_DROP_NO_CHECK_FK_FOR_CANTIAN_AND_BROADCAST 0x01000000 // value need equal to "ha_ctc_ddl.h" file defined +#define FLUSH_PRIVILEGES_BROADCAST_FIX_VERSION {25, 6, 0, 4} #define DDL_ATOMIC_TABLE_LOCKED 2 -#define CTC_TMP_TABLE 1 -#define CTC_INTERNAL_TMP_TABLE 2 -#define CTC_TABLE_CONTAINS_VIRCOL 4 +#define CTC_FLAG_TMP_TABLE 1 +#define CTC_FLAG_INTERNAL_TMP_TABLE 2 +#define CTC_FLAG_TABLE_CONTAINS_VIRCOL 4 #define FILL_BROADCAST_REQ(broadcast_req, _db_name, _sql_str, _user_name, _user_ip, _mysql_inst_id, _sql_command) \ do { \ diff --git a/pkg/src/ctc/ctc_ddl_broadcast.c b/pkg/src/ctc/ctc_ddl_broadcast.c index c809f13abe880644f300734f98c0a9cd8483b139..8a4d0987e1e867cc4e9027f8fcd6c708f87c538f 100644 --- a/pkg/src/ctc/ctc_ddl_broadcast.c +++ b/pkg/src/ctc/ctc_ddl_broadcast.c @@ -231,6 +231,7 @@ status_t ctc_invalid_dd_in_slave_node(knl_handle_t session, void *buff) broadcast_req.buff[invalid_info->buff_len] = '\0'; broadcast_req.buff_len = invalid_info->buff_len; broadcast_req.is_dcl = invalid_info->is_dcl; + broadcast_req.is_flush = invalid_info->is_flush; broadcast_req.mysql_inst_id = CT_INVALID_ID32 - 1; broadcast_req.err_code = 0; CT_LOG_DEBUG_INF("[ctc_invalid_dd_in_slave_node] redo op_type = %d, buff = %s, buff_len = %d, is_dcl = %u", diff --git a/pkg/src/ctc/ctc_srv.c b/pkg/src/ctc/ctc_srv.c index df4cefb8c81d88257c164cd5e929779c3d722d63..db074fba5153e93e83455557bc733be4f8352626 100644 --- a/pkg/src/ctc/ctc_srv.c +++ b/pkg/src/ctc/ctc_srv.c @@ -2270,9 +2270,11 @@ EXTER_ATTACK int ctc_trx_rollback(ctc_handler_t *tch, uint64_t *cursors, int32_t for (int32_t i = def_list->count - 1; i >= 0; i--) { ctc_ddl_dc_array_t *dc_node = &(dc_array[i]); ctc_ddl_table_after_rollback(knl_session, dc_node); + // because of copy algorithm and creating table with constraints of key will lock link tables, + // we need invoke unlock_table_directly in the end of trx to release the lock. if (dc_node->def_mode == CREATE_DEF) { knl_table_def_t *create_def = (knl_table_def_t *)dc_node->ddl_def; - unlock_tables = !create_def->is_mysql_copy ? CT_FALSE : CT_TRUE; + unlock_tables = create_def->is_mysql_copy || create_def->constraints.count > 0 ? CT_TRUE : CT_FALSE; } } diff --git a/pkg/src/ctc/ctc_srv.h b/pkg/src/ctc/ctc_srv.h index 886fd0b634fab800cbad2bdb4f4ac272f3200caa..f29831d0e3ee598b30385c50f00334f514f304eb 100644 --- a/pkg/src/ctc/ctc_srv.h +++ b/pkg/src/ctc/ctc_srv.h @@ -160,6 +160,8 @@ typedef struct { typedef struct { uint32_t estimate_rows; ctc_cbo_stats_column_t *columns; + uint32_t blocks; + uint32_t avg_row_len; } ctc_cbo_stats_table_t; /* @@ -176,6 +178,7 @@ typedef struct { uint32_t num_str_cols; bool *col_type; ctc_cbo_stats_table_t *ctc_cbo_stats_table; + uint32_t page_size; } ctc_cbo_stats_t; #pragma pack() @@ -245,6 +248,7 @@ typedef struct { uint32_t buff_len; uint32_t mysql_inst_id; bool is_dcl; + bool is_flush; int err_code; } ctc_invalidate_broadcast_request; @@ -268,6 +272,7 @@ typedef union { uint32_t nullable : 1; // 是否允许为空(1允许为空 0不允许) uint32_t primary : 1; // if it is a primary key (主键) uint32_t unique : 1; // 是否唯一 + uint32_t is_virtual : 1; // 是否虚拟列 uint32_t is_serial : 1; // 自增 uint32_t is_check : 1; // 检查列(取值范围)约束 uint32_t is_ref : 1; // 指定外键 参考sql_parse_column_ref @@ -282,7 +287,7 @@ typedef union { uint32_t isKey : 1; // key关键字 uint32_t is_default_func : 1; // if default value is generated by mysql functions uint32_t is_curr_timestamp : 1; // if default value is current_timestamp - uint32_t unused_ops : 15; + uint32_t unused_ops : 14; }; uint32_t is_option_set; } ctc_column_option_set_bit; diff --git a/pkg/src/ctc/ctc_srv_util.c b/pkg/src/ctc/ctc_srv_util.c index 1df099329f2cc76315c99beeb1c2e0c56f0bc4ba..ef6debbda94dc55247d8fdadc9841102652aac31 100644 --- a/pkg/src/ctc/ctc_srv_util.c +++ b/pkg/src/ctc/ctc_srv_util.c @@ -495,7 +495,7 @@ status_t ctc_get_new_session(session_t **session_ptr) agent->session = NULL; status_t status; do { - status = srv_create_agent_private_area(agent); + status = srv_alloc_agent_res(agent); CT_BREAK_IF_ERROR(status); status = srv_alloc_session(&session, NULL, SESSION_TYPE_USER); CT_BREAK_IF_ERROR(status); diff --git a/pkg/src/ctrst/ctrst_main.c b/pkg/src/ctrst/ctrst_main.c index 030801f54c590a458d6003888ee891dfb56c9c74..0bd95195c4eefaf169e7876a515d21cd19a3ab5f 100644 --- a/pkg/src/ctrst/ctrst_main.c +++ b/pkg/src/ctrst/ctrst_main.c @@ -1,8 +1,26 @@ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2019-2023. All rights reserved. - * Description: start server - * Author: y00188255 - * Create: 2019-04-24 +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctrst_main.c + * + * + * IDENTIFICATION + * src/ctrst/ctrst_main.c + * + * ------------------------------------------------------------------------- */ #include "cm_defs.h" #include "cm_kmc.h" diff --git a/pkg/src/ctsql/CMakeLists.txt b/pkg/src/ctsql/CMakeLists.txt index 08b95af12c85640555e04199528559960b8c1b53..b74cba7ad357c016fb9adcdf0961f1e9bf0da8c1 100644 --- a/pkg/src/ctsql/CMakeLists.txt +++ b/pkg/src/ctsql/CMakeLists.txt @@ -5,6 +5,7 @@ include_directories( "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/catalog" "${CMAKE_CURRENT_SOURCE_DIR}/executor" + "${CMAKE_CURRENT_SOURCE_DIR}/executor/explain" "${CMAKE_CURRENT_SOURCE_DIR}/json" "${CMAKE_CURRENT_SOURCE_DIR}/node" "${CMAKE_CURRENT_SOURCE_DIR}/function" @@ -25,6 +26,7 @@ set(LIBRARY_OUTPUT_PATH ${CANTIANDB_LIB}) aux_source_directory(. SQL_SRC) aux_source_directory(./catalog SQL_CATALOG_SRC) aux_source_directory(./executor SQL_EXECUTOR_SRC) +aux_source_directory(./executor/explain SQL_EXECUTOR_EXPLAIN_SRC) aux_source_directory(./json SQL_JSON_SRC) aux_source_directory(./node SQL_NODE_SRC) aux_source_directory(./function SQL_FUNCTION_SRC) @@ -50,6 +52,7 @@ set(SQL_ALL_SRC ${SQL_SRC} ${SQL_CATALOG_SRC} ${SQL_EXECUTOR_SRC} + ${SQL_EXECUTOR_EXPLAIN_SRC} ${SQL_NODE_SRC} ${SQL_FUNCTION_SRC} ${SQL_JSON_SRC} diff --git a/pkg/src/ctsql/ctsql_context.h b/pkg/src/ctsql/ctsql_context.h index 2db1f567f68802d2d995c9601b904fb1a7aa6c8a..a52d3db93566849970dc68363b942e6253805638 100644 --- a/pkg/src/ctsql/ctsql_context.h +++ b/pkg/src/ctsql/ctsql_context.h @@ -35,6 +35,7 @@ #include "ctsql_statistics.h" #include "cm_lex.h" #include "cm_bilist.h" +#include "plan_join_bitmap.h" #ifdef __cplusplus extern "C" { @@ -1152,6 +1153,7 @@ typedef struct st_cbo_filter_info { bool32 is_ready; } cbo_filter_info_t; + // /////////////////////////////////////////////////////////////////////////////////// typedef struct st_sql_table { uint32 id; @@ -1215,6 +1217,8 @@ typedef struct st_sql_table { bool32 is_jsonb_table : 1; // indicates index cond of this table has been pruned bool32 reserved : 18; + bilist_t join_info; + tf_scan_flag_t tf_scan_flag; // Parallel Scan Indicator scan_part_info_t *scan_part_info; galist_t *for_update_cols; diff --git a/pkg/src/ctsql/ctsql_stmt.c b/pkg/src/ctsql/ctsql_stmt.c index f06d2217f86163783c1de9cc5753d58689e1086a..087c487d2407e2fa0d10dc2607a40e8992669a80 100644 --- a/pkg/src/ctsql/ctsql_stmt.c +++ b/pkg/src/ctsql/ctsql_stmt.c @@ -36,6 +36,7 @@ #include "ctsql_select.h" #include "ddl_column_parser.h" #include "pl_executor.h" +#include "expl_executor.h" #ifdef TIME_STATISTIC #include "cm_statistic.h" @@ -65,6 +66,8 @@ extern "C" { (stmt)->session->sql_id = __sql_id__; \ } while (0) +#define EXPLAIN_HEAD "EXPLAIN PLAN OUTPUT" + void sql_init_stmt(session_t *session, sql_stmt_t *stmt, uint32 stmt_id) { sql_context_t *context = (session->disable_soft_parse) ? stmt->context : NULL; @@ -1112,6 +1115,21 @@ static inline status_t sql_execute_dml_and_send(sql_stmt_t *stmt) return CT_SUCCESS; } +static inline status_t sql_execute_expl_and_send(sql_stmt_t *stmt) +{ + if (my_sender(stmt)->send_exec_begin(stmt) != CT_SUCCESS) { + return CT_ERROR; + } + + if (expl_execute(stmt) != CT_SUCCESS) { + return CT_ERROR; + } + + my_sender(stmt)->send_exec_end(stmt); + + return CT_SUCCESS; +} + static status_t sql_init_trigger_list_core(sql_stmt_t *stmt) { if (vmc_alloc(&stmt->vmc, sizeof(galist_t), (void **)&stmt->trigger_list) != CT_SUCCESS) { @@ -1271,7 +1289,9 @@ status_t sql_execute(sql_stmt_t *stmt) } /* do execute */ - if (SQL_TYPE(stmt) < CTSQL_TYPE_DML_CEIL) { + if (stmt->is_explain) { + status = sql_execute_expl_and_send(stmt); + } else if (SQL_TYPE(stmt) < CTSQL_TYPE_DML_CEIL) { status = sql_execute_dml_and_send(stmt); } else if (SQL_TYPE(stmt) == CTSQL_TYPE_ANONYMOUS_BLOCK) { status = ple_exec_anonymous_block(stmt); @@ -3407,6 +3427,32 @@ static void sql_set_column_attr(rs_column_t *rs_col, cs_column_def_t *col_def, t } } +status_t sql_send_parsed_stmt_explain(sql_stmt_t *stmt) +{ + cs_column_def_t *col_def = NULL; + cs_packet_t *send_pack = stmt->session->send_pack; + + uint32 column_def_offset = 0; + CT_RETURN_IFERR(cs_reserve_space(send_pack, sizeof(cs_column_def_t), &column_def_offset)); + col_def = (cs_column_def_t *)CS_RESERVE_ADDR(send_pack, column_def_offset); + MEMS_RETURN_IFERR(memset_sp(col_def, sizeof(cs_column_def_t), 0, sizeof(cs_column_def_t))); + + col_def->size = CT_MAX_COLUMN_SIZE; + col_def->datatype = CT_TYPE_STRING - CT_TYPE_BASE; + col_def->name_len = strlen(EXPLAIN_HEAD); + + uint32 column_name_offset = 0; + CT_RETURN_IFERR(cs_reserve_space(send_pack, col_def->name_len, &column_name_offset)); + char *name = (char *)CS_RESERVE_ADDR(send_pack, column_name_offset); + uint32 align_len = CM_ALIGN4(col_def->name_len); + MEMS_RETURN_IFERR(memcpy_sp(name, align_len, EXPLAIN_HEAD, col_def->name_len)); + if (col_def->name_len < align_len) { + name[col_def->name_len] = '\0'; + } + + return CT_SUCCESS; +} + status_t sql_send_parsed_stmt_normal(sql_stmt_t *stmt, uint16 columnCount) { rs_column_t *rs_col = NULL; @@ -3524,14 +3570,13 @@ static status_t sql_send_parsed_stmt_pl(sql_stmt_t *stmt) return status; } -void sql_set_ack_column_count(sql_stmt_t *stmt, cs_prepare_ack_t *prepare_ack) +uint16 sql_get_ack_column_count(sql_stmt_t *stmt) { if (stmt->is_explain) { - prepare_ack->column_count = 1; - } else { - sql_context_t *ctx = (sql_context_t *)stmt->context; - prepare_ack->column_count = (ctx->rs_columns == NULL) ? 0 : ctx->rs_columns->count; + return 1; } + sql_context_t *ctx = (sql_context_t *)stmt->context; + return (ctx->rs_columns == NULL) ? 0 : ctx->rs_columns->count; } static status_t sql_send_param_info_impl(sql_stmt_t *stmt, galist_t *params_list) @@ -3592,8 +3637,8 @@ static status_t sql_send_params_info(sql_stmt_t *stmt, cs_prepare_ack_t *prepare status_t sql_send_parsed_stmt(sql_stmt_t *stmt) { - cs_prepare_ack_t *prepare_ack = NULL; uint32 ack_offset; + cs_prepare_ack_t *prepare_ack = NULL; cs_packet_t *send_pack = stmt->session->send_pack; CT_BIT_RESET(send_pack->head->flags, CS_FLAG_WITH_TS); @@ -3605,9 +3650,8 @@ status_t sql_send_parsed_stmt(sql_stmt_t *stmt) CT_THROW_ERROR(ERR_INVALID_CURSOR); return CT_ERROR; } - prepare_ack->stmt_type = ACK_STMT_TYPE(stmt->lang_type, stmt->context->type); - sql_set_ack_column_count(stmt, prepare_ack); + prepare_ack->column_count = sql_get_ack_column_count(stmt); // Do not optimize the temporary variables column_count, // because the message expansion may cause the ack address to change, @@ -3615,7 +3659,9 @@ status_t sql_send_parsed_stmt(sql_stmt_t *stmt) uint16 column_count = prepare_ack->column_count; CT_RETURN_IFERR(sql_send_params_info(stmt, prepare_ack)); - { + if (stmt->is_explain) { + CT_RETURN_IFERR(sql_send_parsed_stmt_explain(stmt)); + } else { CT_RETURN_IFERR(sql_send_parsed_stmt_normal(stmt, column_count)); } diff --git a/pkg/src/ctsql/ctsql_stmt.h b/pkg/src/ctsql/ctsql_stmt.h index d83cfa34f0a02fcd1f14bb3978679e112418f5a8..46e37b2395625afe1a7aac08b22e6c44205edff7 100644 --- a/pkg/src/ctsql/ctsql_stmt.h +++ b/pkg/src/ctsql/ctsql_stmt.h @@ -94,6 +94,7 @@ typedef enum en_lang_type { LANG_DDL = 3, LANG_PL = 4, LANG_EXPLAIN = 5, + LANG_MAX, } lang_type_t; typedef struct st_sql_lob_info { diff --git a/pkg/src/ctsql/executor/ctsql_select.c b/pkg/src/ctsql/executor/ctsql_select.c index cae4c126959be29dfba8cd9e99a597ff8ed074d9..e8ff602bcfbf8d2dfb4b4e143b98d11e578a646d 100644 --- a/pkg/src/ctsql/executor/ctsql_select.c +++ b/pkg/src/ctsql/executor/ctsql_select.c @@ -602,7 +602,7 @@ static status_t sql_open_table_cursor(sql_stmt_t *stmt, sql_cursor_t *cursor, sq bool32 scn_type = (table->version.type == SCN_VERSION) ? CT_TRUE : CT_FALSE; CT_RETURN_IFERR(sql_convert_to_scn(stmt, table->version.expr, scn_type, &table_cur->scn)); } - + is_select = is_select && cursor->select_ctx; if (CT_IS_SUBSELECT_TABLE(table->type)) { CT_RETURN_IFERR(sql_alloc_cursor(stmt, &table_cur->sql_cur)); table_cur->sql_cur->scn = table_cur->scn; diff --git a/pkg/src/ctsql/executor/explain/expl_common.h b/pkg/src/ctsql/executor/explain/expl_common.h new file mode 100644 index 0000000000000000000000000000000000000000..d5f09ab31a169979c167dd534bf6d49b38615634 --- /dev/null +++ b/pkg/src/ctsql/executor/explain/expl_common.h @@ -0,0 +1,107 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * expl_common.h + * + * + * IDENTIFICATION + * src/ctsql/executor/explain/expl_common.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef __EXPL_COMMON_H__ +#define __EXPL_COMMON_H__ + +#include "cm_memory.h" +#include "cm_row.h" +#include "cm_defs.h" +#include "cm_list.h" +#include "ctsql_plan_defs.h" +#include "ctsql_stmt.h" +#include "srv_instance.h" + +typedef enum { + EXPL_COL_TYPE_ID, + EXPL_COL_TYPE_OPERATION, + EXPL_COL_TYPE_OWNER, + EXPL_COL_TYPE_TABLE, + EXPL_COL_TYPE_ROWS, + EXPL_COL_TYPE_COST, + EXPL_COL_TYPE_BYTES, + EXPL_COL_TYPE_REMARK, + EXPL_COL_TYPE_MAX +} expl_col_type_t; + +typedef struct st_row_helper { + int32 id; + text_t *operation; + text_t *owner; + text_t *name; // Table name + text_t *alias; // Table name alias + int64 rows; + double cost; + int64 bytes; + int64 remark; +} row_helper_t; + +typedef enum { + PREDICATE_FILTER = 0, + PREDICATE_ACCESS, + PREDICATE_JOIN_FILTER, +} predicate_type; +struct st_expl_helper; + +typedef struct st_pred_helper { + mtrl_rowid_t row_id; + uint32 mtrl_id; + + row_assist_t ra; + char *row_buf; + var_text_t content; // for description + + struct st_expl_helper *parent; + + bool32 is_enabled; + bool32 is_start_with; + predicate_type type; + + sql_query_t *query; // reference sql query context +} pred_helper_t; + +typedef struct st_expl_helper { + mtrl_rowid_t row_id; + uint32 mtrl_id; + + row_assist_t ra; + char *row_buf; + text_t content; // for operation + + row_helper_t row_helper; // for format; + uint32 fmt_sizes[EXPL_COL_TYPE_MAX]; // format sizes for every column + int32 depth; // depth + + pred_helper_t pred_helper; // predicate explain helper + + sql_cursor_t *cursor; + sql_query_t *query; // reference sql query context + sql_array_t *ssa; // SubSelect Array for subselect expr + + // format + uint32 width; +} expl_helper_t; + +#endif diff --git a/pkg/src/ctsql/executor/explain/expl_executor.c b/pkg/src/ctsql/executor/explain/expl_executor.c new file mode 100644 index 0000000000000000000000000000000000000000..4984ae43119b12376e005554c37f58e59793ac22 --- /dev/null +++ b/pkg/src/ctsql/executor/explain/expl_executor.c @@ -0,0 +1,379 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * expl_executor.c + * + * + * IDENTIFICATION + * src/ctsql/executor/explain/expl_executor.c + * + * ------------------------------------------------------------------------- + */ + +#include "ctsql_stmt.h" +#include "dml_executor.h" + +#include "expl_plan.h" +#include "expl_executor.h" + +#define EXPL_FMT_ALIGN_SIZE 3 // eg. "ID | Operation | ..." +#define EXPL_FMT_COL_OFFSET 2 // eg. '| ' +#define EXPLAIN_PREDICATE_HEAD "Predicate Information (identified by id):" + +static status_t expl_open_segment(sql_stmt_t *stmt, sql_cursor_t *cursor) +{ + CT_RETURN_IFERR(mtrl_create_segment(&stmt->mtrl, MTRL_SEGMENT_RS, NULL, &cursor->mtrl.rs.sid)); + CT_RETURN_IFERR(mtrl_open_segment(&stmt->mtrl, cursor->mtrl.rs.sid)); + CT_RETURN_IFERR(mtrl_create_segment(&stmt->mtrl, MTRL_SEGMENT_RS, NULL, &cursor->mtrl.predicate.sid)); + CT_RETURN_IFERR(mtrl_open_segment(&stmt->mtrl, cursor->mtrl.predicate.sid)); + return CT_SUCCESS; +} + +static void expl_close_segment(sql_stmt_t *stmt, sql_cursor_t *cursor) +{ + mtrl_close_segment(&stmt->mtrl, cursor->mtrl.rs.sid); + mtrl_close_segment(&stmt->mtrl, cursor->mtrl.predicate.sid); +} + +static status_t expl_pre_execute(sql_stmt_t *stmt, sql_cursor_t **cursor) +{ + sql_mtrl_handler_t *mtrl = NULL; + + CT_RETURN_IFERR(sql_alloc_cursor(stmt, cursor)); + (*cursor)->is_open = true; + CT_RETURN_IFERR(SQL_CURSOR_PUSH(stmt, *cursor)); + // TODO why ?? + status_t ret = expl_open_segment(stmt, *cursor); + if (ret != CT_SUCCESS) { + sql_free_cursor(stmt, *cursor); + } + return ret; +} + +status_t expl_send_explain_row(sql_stmt_t *stmt, sql_cursor_t *cursor, char *row_buf, char *info, bool32 *is_full) +{ + row_assist_t ra; + MEMS_RETURN_IFERR(memset_s(row_buf, CT_MAX_ROW_SIZE, 0, CT_MAX_ROW_SIZE)); + + row_init(&ra, row_buf, CT_MAX_ROW_SIZE, 1); + CT_RETURN_IFERR(row_put_str(&ra, info)); + CT_RETURN_IFERR(my_sender(stmt)->send_row_data(stmt, row_buf, is_full)); + + sql_inc_rows(stmt, cursor); + return CT_SUCCESS; +} + +static uint32 expl_get_explain_width(expl_helper_t *helper) +{ + uint32 width = 1; + for (uint32 i = 0; i < EXPL_COL_TYPE_MAX; i++) { + width += helper->fmt_sizes[i] + EXPL_FMT_ALIGN_SIZE; + } + + if (width > CT_MAX_ROW_SIZE - 1) { + width = CT_MAX_ROW_SIZE - 1; + } + helper->width = width; + + return width; +} + +static status_t expl_send_explain_divider(sql_stmt_t *stmt, sql_cursor_t *cursor, uint32 width, bool32 *is_full) +{ + char *row_buf = NULL; + char *divider = NULL; + + CTSQL_SAVE_STACK(stmt); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&row_buf)); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)÷r)); + + MEMS_RETURN_IFERR(memset_s(divider, CT_MAX_ROW_SIZE, '-', width)); + divider[width + 1] = '\0'; + + CT_RETURN_IFERR(expl_send_explain_row(stmt, cursor, row_buf, divider, is_full)); + CTSQL_RESTORE_STACK(stmt); + + return CT_SUCCESS; +} + +static status_t expl_send_explain_head(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper) +{ + char *info = NULL; + bool32 is_full = CT_FALSE; + text_t *col_name = NULL; + + CTSQL_SAVE_STACK(stmt); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&info)); + uint32 width = expl_get_explain_width(helper); + // send divider + CT_RETURN_IFERR(expl_send_explain_divider(stmt, cursor, width, &is_full)); + + // send column info + uint32 offset = 0; + MEMS_RETURN_IFERR(memset_s(info, CT_MAX_ROW_SIZE, ' ', width - 1)); + info[offset++] = '|'; + for (int32 i = 0; i < EXPL_COL_TYPE_MAX; i++) { + offset++; + if (offset + helper->fmt_sizes[i] >= CT_MAX_ROW_SIZE - 1) { + break; + } + col_name = expl_get_explcol_name(i); + MEMS_RETURN_IFERR(memcpy_s(&info[offset], CT_MAX_ROW_SIZE - offset, col_name->str, col_name->len)); + offset += helper->fmt_sizes[i] + 1; + info[offset++] = '|'; + } + info[offset] = '\0'; + CT_RETURN_IFERR(expl_send_explain_row(stmt, cursor, helper->row_buf, info, &is_full)); + + // send divider + CT_RETURN_IFERR(expl_send_explain_divider(stmt, cursor, width, &is_full)); + CTSQL_RESTORE_STACK(stmt); + + return CT_SUCCESS; +} + +static status_t expl_send_explain_tail(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper) +{ + bool32 is_full = CT_FALSE; + + CTSQL_SAVE_STACK(stmt); + // send divider + CT_RETURN_IFERR(expl_send_explain_divider(stmt, cursor, helper->width, &is_full)); + CTSQL_RESTORE_STACK(stmt); + + return CT_SUCCESS; +} + +bool32 check_rs_page_in_segment(mtrl_context_t *mtrl, mtrl_segment_t *segment, uint32 vmid) +{ + if (segment->vm_list.count <= 2) { + return (vmid == segment->vm_list.first || vmid == segment->vm_list.last); + } + + vm_ctrl_t *ctrl = NULL; + uint32 cur_id = segment->vm_list.first; + for (int32 i = 2; i < segment->vm_list.count; i++) { + ctrl = vm_get_ctrl(mtrl->pool, cur_id); + if (ctrl->next == vmid) { + return CT_TRUE; + } + cur_id = ctrl->next; + } + return CT_FALSE; +} + +status_t expl_fmt_column_content(mtrl_row_t *row, char *content, uint32 offset, uint16 fmt_size, uint32 col_id) +{ + uint16 col_len = row->lens[col_id]; + char *col_data = row->data + row->offsets[col_id]; + + content[offset] = '|'; + if (col_len == CT_NULL_VALUE_LEN || col_len == 0) { + return CT_SUCCESS; + } + col_len = (col_len > fmt_size) ? fmt_size : col_len; + + MEMS_RETURN_IFERR(memcpy_s(content + offset + EXPL_FMT_COL_OFFSET, fmt_size, col_data, col_len)); + return CT_SUCCESS; +} + +status_t expl_fmt_plan_content(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper, char *content) +{ + uint32 offset = 0; + uint32 *fmt_sizes = helper->fmt_sizes; + mtrl_row_t *row = &cursor->mtrl.cursor.row; + + (void)memset_s(content, CT_MAX_ROW_SIZE, ' ', CT_MAX_ROW_SIZE); + for (uint32 i = 0; i < EXPL_COL_TYPE_MAX; i++) { + if (offset + fmt_sizes[i] + EXPL_FMT_ALIGN_SIZE > CT_MAX_ROW_SIZE - 2) { + break; + } + + if (expl_fmt_column_content(row, content, offset, (uint16)fmt_sizes[i], i) != CT_SUCCESS) { + break; + } + offset += fmt_sizes[i] + EXPL_FMT_ALIGN_SIZE; + } + content[offset] = '|'; + content[offset + 1] = '\0'; + + return CT_SUCCESS; +} + +status_t expl_fmt_explain_content(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper, + expl_fmt_func_t fmt_func) +{ + char *row_buf = NULL; + char *content = NULL; + bool32 is_full = CT_FALSE; + mtrl_cursor_t *mtrl_cursor = &cursor->mtrl.cursor; + + CTSQL_SAVE_STACK(stmt); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&row_buf)); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&content)); + + while (1) { + CT_RETURN_IFERR(mtrl_fetch_rs(&stmt->mtrl, mtrl_cursor, CT_TRUE)); + if (mtrl_cursor->eof) { + break; + } + + CT_RETURN_IFERR(fmt_func(stmt, cursor, helper, content)); + CT_RETURN_IFERR(expl_send_explain_row(stmt, cursor, row_buf, content, &is_full)); + if (is_full) { + break; + } + } + CTSQL_RESTORE_STACK(stmt); + + return CT_SUCCESS; +} + +status_t expl_send_plan_info(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper) +{ + CT_RETURN_IFERR(mtrl_open_rs_cursor(&stmt->mtrl, cursor->mtrl.rs.sid, &cursor->mtrl.cursor)); + // header + CT_RETURN_IFERR(expl_send_explain_head(stmt, cursor, helper)); + if (!check_rs_page_in_segment(&stmt->mtrl, stmt->mtrl.segments[cursor->mtrl.rs.sid], cursor->mtrl.cursor.rs_vmid)) { + return CT_SUCCESS; + } + // content + CT_RETURN_IFERR(expl_fmt_explain_content(stmt, cursor, helper, expl_fmt_plan_content)); + if (!cursor->mtrl.cursor.eof) { + return CT_SUCCESS; + } + // tail + CT_RETURN_IFERR(expl_send_explain_tail(stmt, cursor, helper)); + return CT_SUCCESS; +} + +status_t expl_send_predicate_head(sql_stmt_t *stmt, sql_cursor_t *cursor, pred_helper_t *helper) +{ + char *info = NULL; + bool32 is_full = CT_FALSE; + int32 width = strlen(EXPLAIN_PREDICATE_HEAD); + + CTSQL_SAVE_STACK(stmt); + // predicate header + CT_RETURN_IFERR(sql_push(stmt, width + 1, (void **)&info)); + MEMS_RETURN_IFERR(memcpy_s(info, width + 1, EXPLAIN_PREDICATE_HEAD, width)); + info[width] = '\0'; + CT_RETURN_IFERR(expl_send_explain_row(stmt, cursor, helper->row_buf, info, &is_full)); + + // send divider + CT_RETURN_IFERR(expl_send_explain_divider(stmt, cursor, width, &is_full)); + CTSQL_RESTORE_STACK(stmt); + + return CT_SUCCESS; +} + +status_t expl_fmt_predicate_content(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper, char *content) +{ + mtrl_row_t *row = &cursor->mtrl.cursor.row; + char *data = row->data + row->offsets[0]; + uint32 len = row->lens[0]; + + MEMS_RETURN_IFERR(memcpy_s(content, CT_MAX_ROW_SIZE, data, len)); + content[len] = '\0'; + + return CT_SUCCESS; +} + +status_t expl_send_predicate_info(sql_stmt_t *stmt, sql_cursor_t *cursor, pred_helper_t *helper) +{ + if (!helper->is_enabled) { + return CT_SUCCESS; + } + + // segment of other expl helper + if (!cursor->mtrl.cursor.eof && !check_rs_page_in_segment(&stmt->mtrl, + stmt->mtrl.segments[cursor->mtrl.predicate.sid], cursor->mtrl.cursor.rs_vmid)) { + return CT_SUCCESS; + } + + if (cursor->mtrl.cursor.eof) { + CT_RETURN_IFERR(mtrl_open_rs_cursor(&stmt->mtrl, cursor->mtrl.predicate.sid, &cursor->mtrl.cursor)); + if (cursor->mtrl.cursor.rs_page->rows != 0) { + CT_RETURN_IFERR(expl_send_predicate_head(stmt, cursor, helper)); + } + } + + CT_RETURN_IFERR(expl_fmt_explain_content(stmt, cursor, helper->parent, expl_fmt_predicate_content)); + return CT_SUCCESS; +} + +status_t expl_init_executors(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper) +{ + CT_RETURN_IFERR(expl_helper_init(stmt, helper, cursor->mtrl.rs.sid)); + CT_RETURN_IFERR(expl_pred_helper_init(stmt, &helper->pred_helper, cursor->mtrl.predicate.sid)); + helper->pred_helper.parent = helper; + + return CT_SUCCESS; +} + +status_t expl_execute_executors(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan) +{ + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan, 0)); + return CT_SUCCESS; +} + +status_t expl_send_explain_rows(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper) +{ + CT_RETURN_IFERR(expl_send_plan_info(stmt, cursor, helper)); + CT_RETURN_IFERR(expl_send_predicate_info(stmt, cursor, &helper->pred_helper)); + // TODO how client<->server + if (stmt->batch_rows < stmt->prefetch_rows) { + stmt->eof = CT_TRUE; + cursor->eof = CT_TRUE; + } + return CT_SUCCESS; +} + +static status_t expl_execute_explain_plan(sql_stmt_t *stmt, sql_cursor_t *cursor, plan_node_t *plan) +{ + expl_helper_t helper = {0}; + + CTSQL_SAVE_STACK(stmt); + // explain-executors init + CT_RETURN_IFERR(expl_init_executors(stmt, cursor, &helper)); + + // explain-executors execute + CT_RETURN_IFERR(expl_execute_executors(stmt, &helper, plan)); + expl_close_segment(stmt, cursor); + + // get explain result and write to response package + CT_RETURN_IFERR(expl_send_explain_rows(stmt, cursor, &helper)); + CTSQL_RESTORE_STACK(stmt); + + return CT_SUCCESS; +} + +status_t expl_execute(sql_stmt_t *stmt) +{ + plan_node_t *node = (plan_node_t *)sql_get_plan(stmt); + CT_RETVALUE_IFTRUE(node == NULL, CT_ERRNO); + + sql_cursor_t *cursor = NULL; + CT_RETURN_IFERR(expl_pre_execute(stmt, &cursor)); + sql_init_ssa_cursor_maps(cursor, CT_MAX_SUBSELECT_EXPRS); + + CTSQL_SAVE_STACK(stmt); + // TODO: Exception handling + expl_execute_explain_plan(stmt, cursor, node); + CTSQL_RESTORE_STACK(stmt); + + return CT_SUCCESS; +} diff --git a/pkg/src/ctsql/executor/explain/expl_executor.h b/pkg/src/ctsql/executor/explain/expl_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..7186fafa7d7a7c17da91a1cb47ddb398afa268e4 --- /dev/null +++ b/pkg/src/ctsql/executor/explain/expl_executor.h @@ -0,0 +1,38 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * expl_executor.h + * + * + * IDENTIFICATION + * src/ctsql/executor/explain/expl_executor.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef __EXPL_EXECUTOR_H__ +#define __EXPL_EXECUTOR_H__ + +#include "cm_defs.h" +#include "cm_memory.h" +#include "cm_row.h" + +#include "cm_list.h" +#include "expl_plan.h" + +status_t expl_execute(sql_stmt_t *stmt); + +#endif diff --git a/pkg/src/ctsql/executor/explain/expl_plan.c b/pkg/src/ctsql/executor/explain/expl_plan.c new file mode 100644 index 0000000000000000000000000000000000000000..668132e31e5a6ec73a005b0941bd14dfe7ac9205 --- /dev/null +++ b/pkg/src/ctsql/executor/explain/expl_plan.c @@ -0,0 +1,1309 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * expl_plan.h + * + * + * IDENTIFICATION + * src/ctsql/executor/explain/expl_plan.h + * + * ------------------------------------------------------------------------- + */ + +#include "cm_row.h" +#include "cm_text.h" +#include "plan_rbo.h" +#include "ctsql_scan.h" +#include "expl_plan.h" +#include "expl_predicate.h" + +#define EXPL_SGL_INDENT_SIZE 2 +#define EXPL_DEPTH_CALC_LEVEL 2 +#define SORT_ORDER_BY_ROWNUM(plan) (g_instance->sql.topn_threshold !=0 && \ + (plan)->query_sort.rownum_upper <= g_instance->sql.topn_threshold) + +typedef enum { + INDEX_MODE_MULTI_PARTS_SCAN = 0, + INDEX_MODE_FAST_FULL_SCAN, + INDEX_MODE_FULL_SCAN, + INDEX_MODE_OPTIMIZED_RANGE_SCAN, + INDEX_MODE_UNIQUE_SCAN, + INDEX_MODE_SKIP_SCAN, + INDEX_MODE_RANGE_SCAN +} index_mode_type_t; + +typedef struct { + bool32 idx_cond; + index_mode_type_t idx_mode; +} index_mode_t; + +static char* g_index_mode_oper[] = { + "INDEX MULTI PARTS SCAN", + "INDEX FAST FULL SCAN", + "INDEX FULL SCAN", + "OPTIMIZED INDEX RANGE SCAN", + "INDEX UNIQUE SCAN", + "INDEX SKIP SCAN", + "INDEX RANGE SCAN" +}; + +static char* g_minus_names[] = { + "MINUS", + "INTERSECT", + "INTERSECT ALL", + "EXCEPT ALL" +}; + +static char* g_group_by_names[] = { + "SORT GROUP BY", + "MERGE SORT GROUP BY", + "HASH GROUP BY", + "INDEX GROUP BY" +}; + +static char *g_distinct_names[] = { + "SORT DISTINCT", + "HASH DISTINCT", + "INDEX DISTINCT" +}; + +static char* g_join_oper[][2] = { + { "", "" }, + { "NESTED LOOPS" , "NESTED LOOPS" }, + { "NESTED LOOPS" , "NESTED LOOPS" }, + { "NESTED LOOPS OUTER" , "NESTED LOOPS OUTER ANTI" }, + { "NESTED LOOPS FULL" , "NESTED LOOPS FULL" }, + { "HASH JOIN(R)" , "HASH JOIN(L)" }, + { "HASH JOIN OUTER(R)" , "HASH JOIN OUTER(L)" }, + { "HASH JOIN FULL(R)" , "HASH JOIN FULL(L)" }, + { "HASH JOIN OUTER(R)" , "HASH JOIN OUTER(L)" }, + { "MERGE JOIN" , "MERGE JOIN" }, + { "MERGE JOIN OUTER" , "MERGE JOIN OUTER" }, + { "MERGE JOIN FULL" , "MERGE JOIN FULL" }, + { "HASH JOIN SEMI(R)" , "HASH JOIN SEMI(L)" }, + { "HASH JOIN ANTI(R)" , "HASH JOIN ANTI(L)" }, + { "HASH JOIN ANTI NA(R)" , "HASH JOIN ANTI NA(L)" }, + { "HASH JOIN RIGHT SEMI(R)" , "HASH JOIN RIGHT SEMI(L)" }, + { "HASH JOIN RIGHT ANTI(R)" , "HASH JOIN RIGHT ANTI(L)" }, + { "HASH JOIN RIGHT ANTI NA(R)" , "HASH JOIN RIGHT ANTI NA(L)" }, + { "HASH JOIN PAR(R)" , "HASH JOIN PAR(L)" }, +}; + +status_t expl_format_plan_node(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth); +static status_t expl_format_join_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth); + +static inline status_t expl_row_put_text_data(expl_helper_t *helper, expl_col_type_t type, text_t *row_data) +{ + helper->fmt_sizes[type] = MAX(helper->fmt_sizes[type], row_data->len); + return row_put_text(&helper->ra, row_data); +} + +static status_t expl_format_plan_id(expl_helper_t *helper) +{ + char buff[CT_MAX_INT32_STRLEN + 1] = {0}; + int32 row_id = helper->row_helper.id++; + int32 len = snprintf_s(buff, CT_MAX_INT32_STRLEN + 1, CT_MAX_INT32_STRLEN, "%lld", row_id); + if (SECUREC_UNLIKELY(len == -1)) { + return CT_ERROR; + } + + text_t rowid_str = {buff, len}; + return expl_row_put_text_data(helper, EXPL_COL_TYPE_ID, &rowid_str); +} + +static status_t expl_expand_text(text_t *in, text_t *out, uint32 depth) +{ + uint32 indent_size = MIN(CT_MAX_DFLT_VALUE_LEN - 1, depth * EXPL_SGL_INDENT_SIZE); + if (indent_size != 0) { + (void)memset_s(out->str, CT_MAX_DFLT_VALUE_LEN - 1, ' ', indent_size); + } + + uint32 text_size = MIN(CT_MAX_DFLT_VALUE_LEN - indent_size - 1, in->len); + if (text_size != 0) { + (void)memcpy_s(out->str + indent_size, CT_MAX_DFLT_VALUE_LEN - indent_size - 1, in->str, text_size); + } + + out->len = indent_size + text_size; + + return CT_SUCCESS; +} + +static status_t expl_format_plan_operation(expl_helper_t *helper) +{ + status_t ret = expl_expand_text(helper->row_helper.operation, &helper->content, helper->depth); + if (ret != CT_SUCCESS) { + // todo : errlog... + return ret; + } + + return expl_row_put_text_data(helper, EXPL_COL_TYPE_OPERATION, &helper->content); +} + +static status_t expl_format_plan_owner(expl_helper_t *helper) +{ + if (helper->row_helper.owner == NULL) { + return row_put_null(&helper->ra); + } + + return expl_row_put_text_data(helper, EXPL_COL_TYPE_OWNER, helper->row_helper.owner); +} + +static status_t expl_format_plan_name(expl_helper_t *helper) +{ + row_helper_t *row_helper = &helper->row_helper; + if (row_helper->name == NULL && row_helper->alias == NULL) { + return row_put_null(&helper->ra); + } + + char buff[CT_MAX_DFLT_VALUE_LEN] = {0}; + text_t *name = row_helper->name; + uint32 offset = 0; + if (name != NULL && name->str != NULL && name->len > 0) { + MEMS_RETURN_IFERR(memcpy_s(buff, CT_MAX_DFLT_VALUE_LEN, name->str, name->len)); + offset = name->len; + } + + text_t *alias = row_helper->alias; + if (alias != NULL && alias->str != NULL && alias->len > 0) { + if (offset != 0) { + MEMS_RETURN_IFERR(memcpy_s(buff + offset, CT_MAX_DFLT_VALUE_LEN - offset, " ", 1)); + offset++; + } + MEMS_RETURN_IFERR(memcpy_s(buff + offset, CT_MAX_DFLT_VALUE_LEN - offset, alias->str, alias->len)); + offset += alias->len; + } + + text_t new_name = {buff, offset}; + return expl_row_put_text_data(helper, EXPL_COL_TYPE_TABLE, &new_name); +} + +static status_t expl_format_plan_rows(expl_helper_t *helper) +{ + if (!(CBO_ON)) { + return row_put_null(&helper->ra); + } + + char buff[CT_MAX_INT64_STRLEN + 1] = {0}; + int32 len = snprintf_s(buff, CT_MAX_INT64_STRLEN + 1, CT_MAX_INT64_STRLEN, "%lld", helper->row_helper.rows); + if (SECUREC_UNLIKELY(len == -1)) { + return CT_ERROR; + } + + text_t rows_str = {buff, len}; + return expl_row_put_text_data(helper, EXPL_COL_TYPE_ROWS, &rows_str); +} + +static status_t expl_format_plan_cost(expl_helper_t *helper) +{ + if (!(CBO_ON)) { + return row_put_null(&helper->ra); + } + + char buff[CT_MAX_INT64_STRLEN + 1] = {0}; + int64 cost = (int64)helper->row_helper.cost; + int32 len = snprintf_s(buff, CT_MAX_INT64_STRLEN + 1, CT_MAX_INT64_STRLEN, "%lld", cost); + if (SECUREC_UNLIKELY(len == -1)) { + return CT_ERROR; + } + + text_t cost_str = {buff, len}; + return expl_row_put_text_data(helper, EXPL_COL_TYPE_COST, &cost_str); +} + +static status_t expl_format_plan_bytes(expl_helper_t *helper) +{ + return row_put_null(&helper->ra); +} + +static status_t expl_format_plan_remarks(expl_helper_t *helper) +{ + return row_put_null(&helper->ra); +} + +expl_column_t g_expl_columns[] = {{EXPL_COL_TYPE_ID, {"Id", 2}, 4, expl_format_plan_id}, + {EXPL_COL_TYPE_OPERATION, {"Operation", 9}, 20, expl_format_plan_operation}, + {EXPL_COL_TYPE_OWNER, {"Owner", 5}, 10, expl_format_plan_owner}, + {EXPL_COL_TYPE_TABLE, {"Name", 4}, 10, expl_format_plan_name}, + {EXPL_COL_TYPE_ROWS, {"Rows", 4}, 10, expl_format_plan_rows}, + {EXPL_COL_TYPE_COST, {"Cost", 4}, 10, expl_format_plan_cost}, + {EXPL_COL_TYPE_BYTES, {"Bytes", 5}, 10, expl_format_plan_bytes}, + {EXPL_COL_TYPE_REMARK, {"Remark", 6}, 10, expl_format_plan_remarks}}; + +void expl_row_helper_init(row_helper_t *helper, plan_node_t *plan, text_t *operation, text_t *owner, text_t *name, + text_t *alias) +{ + helper->operation = operation; + helper->owner = owner; + helper->name = name; + helper->alias = alias; + + if (plan != NULL) { + helper->rows = plan->rows; + helper->cost = plan->cost; + } +} + +status_t expl_helper_init(sql_stmt_t *stmt, expl_helper_t *helper, uint32 mtrl_id) +{ + (void)memset_s(helper, sizeof(expl_helper_t), 0, sizeof(expl_helper_t)); + for (int32 i = 0; i < EXPL_COL_TYPE_MAX; i++) { + helper->fmt_sizes[i] = g_expl_columns[i].fmt_size; + } + + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&(helper->row_buf))); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&(helper->content.str))); + helper->mtrl_id = mtrl_id; + + return CT_SUCCESS; +} + +text_t *expl_get_explcol_name(uint32 idx) +{ + if (idx >= EXPL_COL_TYPE_MAX) { + return NULL; + } + return &g_expl_columns[idx].name; +} + +status_t expl_format_plan_node_row(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth, + char *oper_str, text_t *owner, text_t *name, text_t *alias) +{ + text_t oper = { oper_str, strlen(oper_str) }; + helper->depth = depth; + expl_row_helper_init(&helper->row_helper, plan, &oper, owner, name, alias); + row_init(&helper->ra, helper->row_buf, CT_MAX_ROW_SIZE, EXPL_COL_TYPE_MAX); + + for (int32 i = 0; i < EXPL_COL_TYPE_MAX; i++) { + expl_column_func_t expl_column_func = g_expl_columns[i].expl_column_func; + CT_RETURN_IFERR(expl_column_func(helper)); + } + return mtrl_insert_row(&stmt->mtrl, helper->mtrl_id, helper->row_buf, &helper->row_id); +} + +status_t expl_format_expr_node_plan(visit_assist_t *va, expr_node_t **node) +{ + expl_helper_t *helper = (expl_helper_t *)va->param0; + if ((*node)->type != EXPR_NODE_SELECT) { + return CT_SUCCESS; + } + + va->result0 = CT_TRUE; + // Just to obtain whether there is a selectstatement in the expr + if (va->result1 == CT_FALSE) { + return CT_SUCCESS; + } + uint32 ssa_id = (*node)->value.v_obj.id; + sql_select_t *select = (sql_select_t *)sql_array_get(helper->ssa, ssa_id); + plan_node_t *plan = select->plan; + + if (plan->type != PLAN_NODE_SELECT || plan == sql_get_plan(va->stmt)) { + return CT_SUCCESS; + } + return expl_format_plan_node(va->stmt, helper, plan->select_p.next, va->result2); +} + +status_t expl_format_aggr_node_plan(sql_stmt_t *stmt, expl_helper_t *helper, expr_node_t *node, uint32 depth) +{ + visit_assist_t va = {0}; + sql_init_visit_assist(&va, stmt, NULL); + va.excl_flags = VA_EXCL_PRIOR; + va.param0 = (void *)helper; + va.result0 = CT_FALSE; + va.result1 = CT_TRUE; + va.result2 = depth; + return visit_expr_node(&va, &node, expl_format_expr_node_plan); +} + +status_t expl_format_expr_tree_plan(sql_stmt_t *stmt, expl_helper_t *helper, expr_tree_t *expr, uint32 depth) +{ + visit_assist_t va = {0}; + sql_init_visit_assist(&va, stmt, NULL); + va.excl_flags = VA_EXCL_PRIOR; + va.param0 = (void *)helper; + va.result0 = CT_FALSE; // selectstatement in the expr + va.result1 = CT_TRUE; // TRUE: execute, FALSE: not execute + va.result2 = depth; + + return visit_expr_tree(&va, expr, expl_format_expr_node_plan); +} + +status_t expl_format_cond_node_plan(sql_stmt_t *stmt, expl_helper_t *helper, cond_node_t *cond, uint32 depth, + bool32 *has_select) +{ + visit_assist_t va = {0}; + sql_init_visit_assist(&va, stmt, NULL); + va.excl_flags = VA_EXCL_PRIOR; + va.param0 = (void *)helper; + va.result0 = CT_FALSE; // selectstatement in the expr + va.result1 = (has_select == NULL); // TRUE: execute, FALSE: not execute; + va.result2 = depth; + + CT_RETURN_IFERR(visit_cond_node(&va, cond, expl_format_expr_node_plan)); + if (has_select != NULL) { + *has_select = va.result0; + } + + return CT_SUCCESS; +} + +status_t expl_format_default_plan_node(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return CT_SUCCESS; +} + +status_t expl_format_rs_col_plan(sql_stmt_t *stmt, expl_helper_t *helper, sql_query_t *query, uint32 depth) +{ + rs_column_t *rs_col = NULL; + for (uint32 i = 0; i < query->rs_columns->count; i++) { + rs_col = (rs_column_t *)cm_galist_get(query->rs_columns, i); + if (rs_col->type != RS_COL_CALC) { + continue; + } + CT_RETURN_IFERR(expl_format_expr_tree_plan(stmt, helper, rs_col->expr, depth)); + } + return CT_SUCCESS; +} + +status_t expl_format_query_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + // store + sql_query_t *query = helper->query; + sql_array_t *ssa = helper->ssa; + + // current node iter info + helper->query = plan->query.ref; + helper->ssa = &plan->query.ref->ssa; + + CT_RETURN_IFERR(expl_format_rs_col_plan(stmt, helper, plan->query.ref, depth)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->query.next, depth)); + + // restore + helper->ssa = ssa; + helper->query = query; + + return CT_SUCCESS; +} + +static inline bool32 expl_format_withas_has_mtrl(sql_withas_t *withas_plan) +{ + if (withas_plan == NULL || withas_plan->withas_factors->count == 0) { + return CT_FALSE; + } + uint32 i = 0; + sql_withas_factor_t *factor = NULL; + while (i < withas_plan->withas_factors->count) { + factor = (sql_withas_factor_t *)cm_galist_get(withas_plan->withas_factors, i++); + if (factor->is_mtrl) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +static status_t expl_format_withas_plan_node(sql_stmt_t *stmt, expl_helper_t *helper, sql_withas_t *withas_plan, + uint32 depth) +{ + uint32 i = 0; + sql_withas_factor_t *factor = NULL; + while (i < withas_plan->withas_factors->count) { + factor = (sql_withas_factor_t *)cm_galist_get(withas_plan->withas_factors, i++); + if (!factor->is_mtrl) { + continue; + } + plan_node_t *ws_plan = ((sql_select_t *)factor->subquery_ctx)->plan->select_p.next; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, ws_plan, depth, "LOAD AS SELECT", + NULL, &ws_plan->withas_p.name, NULL)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, ws_plan->withas_p.next, depth + 1)); + } + return CT_SUCCESS; +} + +static status_t expl_format_withas_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + sql_withas_t *withas_plan = (sql_withas_t *)stmt->context->withas_entry; + if (!expl_format_withas_has_mtrl(withas_plan)) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "TEMP TABLE TRANSFORMATION", + NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_withas_plan_node(stmt, helper, withas_plan, depth + 1)); + return CT_SUCCESS; +} + +status_t expl_format_select_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + bool32 is_subselect = (sql_get_plan(stmt) != plan); + char *oper = is_subselect ? "SUBSELECT" : "SELECT STATEMENT"; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + if (!is_subselect) { + CT_RETURN_IFERR(expl_format_withas_plan(stmt, helper, plan, depth + 1)); + } + + return expl_format_plan_node(stmt, helper, plan->select_p.next, depth + 1); +} + +status_t expl_format_user_rowid_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + // todo + sql_table_t *table = plan->scan_p.table; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "TABLE ACCESS BY ROWID", &table->user.value, + &table->name.value, &table->alias.value)); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth + 1, "ROWID SCAN", &table->user.value, + &table->name.value, &table->alias.value)); + // todo + return CT_SUCCESS; +} + +static bool32 expl_format_has_optimize(scan_list_array_t *arr) +{ + if (!can_use_point_scan(arr)) { + return CT_FALSE; + } + uint32 i = 0; + while (i < arr->count) { + if (arr->items[i].count > 1) { + return CT_TRUE; + } + if (arr->items[i].type == RANGE_LIST_FULL) { + return CT_FALSE; + } + i++; + } + return CT_FALSE; +} + +static inline index_mode_type_t expl_format_get_index_mode(sql_table_t *table, scan_list_array_t *arr) +{ + knl_index_desc_t *index = table->index; + index_mode_type_t index_mode = INDEX_MODE_RANGE_SCAN; + const index_mode_t g_index_mode[] = { + { table->multi_parts_scan, INDEX_MODE_MULTI_PARTS_SCAN }, + { table->index_ffs, INDEX_MODE_FAST_FULL_SCAN }, + { table->index_full_scan, INDEX_MODE_FULL_SCAN }, + { expl_format_has_optimize(arr), INDEX_MODE_OPTIMIZED_RANGE_SCAN }, + { table->idx_equal_to == index->column_count && (index->primary || index->unique), INDEX_MODE_UNIQUE_SCAN }, + { table->index_skip_scan, INDEX_MODE_SKIP_SCAN } + }; + for (uint32 i = 0; i < sizeof(g_index_mode) / sizeof(g_index_mode[0]); i++) { + if (g_index_mode[i].idx_cond) { + index_mode = g_index_mode[i].idx_mode; + break; + } + } + return index_mode; +} + +static status_t expl_format_index_scan_mode(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + char oper[CT_MAX_DFLT_VALUE_LEN] = { 0 }; + sql_table_t *scan_tbl = plan->scan_p.table; + sql_array_t *index_arr = &plan->scan_p.index_array; + scan_list_array_t arr = { 0 }; + arr.count = scan_tbl->index->column_count; + CT_RETURN_IFERR(sql_finalize_scan_range(stmt, index_arr, &arr, scan_tbl, NULL, NULL, CALC_IN_PLAN)); + + index_mode_type_t idx_mode = expl_format_get_index_mode(scan_tbl, &arr); + char *idx_oper = g_index_mode_oper[idx_mode]; + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, idx_oper, strlen(idx_oper))); + uint32 offset = (uint32)strlen(idx_oper); + text_t idx_name = { .str = scan_tbl->index->name, .len = (uint32)strlen(scan_tbl->index->name) }; + if (scan_tbl->index_dsc) { + MEMS_RETURN_IFERR(memcpy_s(oper + offset, CT_MAX_DFLT_VALUE_LEN - offset, " DESCENDING", + strlen(" DESCENDING"))); + } + return expl_format_plan_node_row(stmt, helper, plan, depth, oper, &scan_tbl->user.value, &idx_name, NULL); +} + +static status_t expl_format_user_index_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + uint32 depth) +{ + char oper[CT_MAX_DFLT_VALUE_LEN] = { 0 }; + int len = 0; + sql_table_t *scan_tbl = plan->scan_p.table; + if (!INDEX_ONLY_SCAN(scan_tbl->scan_flag)) { + len = snprintf_s(oper, CT_MAX_DFLT_VALUE_LEN, CT_MAX_DFLT_VALUE_LEN -1, "TABLE ACCESS BY INDEX ROWID "); + } else { + len = snprintf_s(oper, CT_MAX_DFLT_VALUE_LEN, CT_MAX_DFLT_VALUE_LEN -1, "TABLE ACCESS BY INDEX ONLY "); + } + if (SECUREC_UNLIKELY(len == -1)) { + return CT_ERROR; + } + uint32 offset = (uint32)len; + if (knl_is_part_table(scan_tbl->entry->dc.handle)) { + sql_part_get_print(stmt, &plan->scan_p, oper + offset, CT_MAX_DFLT_VALUE_LEN - offset); + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, &scan_tbl->user.value, + &scan_tbl->name.value, &scan_tbl->alias.value)); + CT_RETURN_IFERR(expl_format_index_scan_mode(stmt, helper, plan, depth + 1)); + return CT_SUCCESS; +} + +status_t expl_format_normal_table_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + text_t owner = {0}; + text_t name = {0}; + sql_table_t *table = plan->scan_p.table; + knl_dictionary_t *dc = &table->entry->dc; + + if ((dc->is_sysnonym == CT_TRUE) && + (dc->type == DICT_TYPE_TABLE || dc->type == DICT_TYPE_TEMP_TABLE_TRANS || + dc->type == DICT_TYPE_TEMP_TABLE_SESSION || dc->type == DICT_TYPE_TABLE_NOLOGGING)) { + dc_entry_t *entry = DC_ENTRY(dc); + owner.str = entry->user->desc.name; + owner.len = (uint32)strlen(owner.str); + name.str = entry->name; + name.len = (uint32)strlen(name.str); + } else { + owner = *(text_t *)&plan->scan_p.table->user; + name = *(text_t *)&plan->scan_p.table->name; + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "TABLE ACCESS FULL", &table->user.value, + &table->name.value, &table->alias.value)); + return CT_SUCCESS; +} + +status_t expl_format_view_as_table_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + sql_table_t *table = plan->scan_p.table; + plan_node_t *select_plan = table->select_ctx->plan; + plan_node_t *next_plan = select_plan->select_p.next; + + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "VIEW", &table->user.value, &table->name.value, + &table->alias.value)); + + return expl_format_plan_node(stmt, helper, next_plan, depth + 1); +} + +status_t expl_format_subselect_as_table_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + uint32 depth) +{ + char *oper = NULL; + plan_node_t *next_plan = NULL; + sql_table_t *table = plan->scan_p.table; + plan_node_t *select_plan = table->select_ctx->plan; + + if (select_plan->select_p.next->type == PLAN_NODE_VM_VIEW_MTRL) { + oper = "VN VIEW"; + next_plan = select_plan->select_p.next->vm_view_p.next; + } else { + oper = "SUBSELECT"; + next_plan = select_plan->select_p.next; + } + + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, &table->user.value, &table->name.value, + &table->alias.value)); + + return expl_format_plan_node(stmt, helper, next_plan, depth + 1); +} + +status_t expl_format_func_as_table_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + sql_table_t *table = plan->scan_p.table; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "TABLE ACCESS FULL", &table->user.value, + &table->name.value, &table->alias.value)); + return CT_SUCCESS; +} + +status_t expl_format_with_as_table_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + sql_table_t *table = plan->scan_p.table; + plan_node_t *select_plan = table->select_ctx->plan; + + if (select_plan->select_p.next->type == PLAN_NODE_WITHAS_MTRL) { + withas_mtrl_plan_t *withas_p = &select_plan->select_p.next->withas_p; + return expl_format_plan_node_row(stmt, helper, plan, depth, "TABLE ACCESS FULL", NULL, &withas_p->name, + &table->alias.value); + } + + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "TABLE ACCESS FULL", &table->user.value, + &table->name.value, &table->alias.value)); + return CT_SUCCESS; +} + +status_t expl_format_scan_plan_deep(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + sql_table_t *table = plan->scan_p.table; + + if (plan->scan_p.rowid_set != NULL && plan->scan_p.rowid_set->type == RANGE_LIST_NORMAL) { + return expl_format_user_rowid_scan_plan(stmt, helper, plan, depth); + } else if (plan->scan_p.table->index != NULL) { + return expl_format_user_index_scan_plan(stmt, helper, plan, depth); + } + + sql_table_type_t type = table->type; + switch (type) { + case NORMAL_TABLE: + return expl_format_normal_table_scan_plan(stmt, helper, plan, depth); + case VIEW_AS_TABLE: + return expl_format_view_as_table_scan_plan(stmt, helper, plan, depth); + case SUBSELECT_AS_TABLE: + return expl_format_subselect_as_table_scan_plan(stmt, helper, plan, depth); + case FUNC_AS_TABLE: + return expl_format_func_as_table_scan_plan(stmt, helper, plan, depth); + case JOIN_AS_TABLE: + return expl_format_normal_table_scan_plan(stmt, helper, plan, depth); + case WITH_AS_TABLE: + return expl_format_with_as_table_scan_plan(stmt, helper, plan, depth); + case JSON_TABLE: + return expl_format_func_as_table_scan_plan(stmt, helper, plan, depth); + } + return CT_SUCCESS; +} + +status_t expl_format_scan_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + sql_query_t *query = helper->query; + sql_table_t *table = plan->scan_p.table; + bool32 has_subselect = CT_FALSE; + bool32 has_indx_subselect = CT_FALSE; + + if (query != NULL && query->cond != NULL) { + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, query->cond->root, depth, &has_subselect)); + } + if (table != NULL && table->index_cond_pruning) { + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, table->cond->root, depth, &has_indx_subselect)); + } + if (has_subselect || has_indx_subselect) { + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth++, "KERNEL FILTER", NULL, NULL, NULL)); + } + + CT_RETURN_IFERR(expl_format_scan_plan_deep(stmt, helper, plan, depth)); + + if (has_subselect) { + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, query->cond->root, depth, NULL)); + } + if (has_indx_subselect) { + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, table->cond->root, depth, NULL)); + } + + return CT_SUCCESS; +} + +status_t expl_format_union_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "HASH UNION", NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->set_p.left, depth + 1)); + return expl_format_plan_node(stmt, helper, plan->set_p.right, depth + 1); +} + +static status_t expl_format_union_all_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + plan_node_t *sub_node = NULL; + char oper[CT_MAX_DFLT_VALUE_LEN] = {0}; + uint32 offset = 0; + int32 len = 0; + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, "UNION ALL", strlen("UNION ALL"))); + if (g_instance->sql.parallel_policy && plan->set_p.union_all_p.par_exec) { + offset = (uint32)strlen("UNION ALL"); + len = snprintf_s(oper + offset, CT_MAX_DFLT_VALUE_LEN - offset, CT_MAX_DFLT_VALUE_LEN - offset- 1, + "(p %3u)", stmt->context->parallel); + } + if (SECUREC_UNLIKELY(len == -1)) { + return CT_ERROR; + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + for (uint32 i = 0; i < plan->set_p.list->count; i++) { + sub_node = (plan_node_t *)cm_galist_get(plan->set_p.list, i); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, sub_node, depth + 1)); + } + return CT_SUCCESS; +} + +static status_t expl_format_minus_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + char oper[CT_MAX_DFLT_VALUE_LEN] = {0}; + uint32 offset = 0; + char *name = g_minus_names[plan->set_p.minus_p.minus_type]; + if (plan->type == PLAN_NODE_HASH_MINUS) { + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, "HASH ", strlen("HASH "))); + offset = (uint32)strlen("HASH "); + } + MEMS_RETURN_IFERR(memcpy_s(oper + offset, CT_MAX_DFLT_VALUE_LEN - offset, name, strlen(name))); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->set_p.left, depth + 1)); + return expl_format_plan_node(stmt, helper, plan->set_p.right, depth + 1); +} + +static status_t expl_format_aggr_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + char oper[CT_MAX_DFLT_VALUE_LEN] = {0}; + uint32 offset = 0; + expr_node_t *aggr_node = NULL; + if (plan->type == PLAN_NODE_INDEX_AGGR) { + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, "INDEX ", strlen("INDEX "))); + offset = (uint32)strlen("INDEX "); + } + MEMS_RETURN_IFERR(memcpy_s(oper + offset, CT_MAX_DFLT_VALUE_LEN - offset, "AGGR", strlen("AGGR"))); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + for (uint32 i = 0; i < plan->aggr.items->count; i++) { + aggr_node = (expr_node_t *)cm_galist_get(plan->aggr.items, i); + CT_RETURN_IFERR(expl_format_aggr_node_plan(stmt, helper, aggr_node, depth + 1)); + } + return expl_format_plan_node(stmt, helper, plan->aggr.next, depth + 1); +} + +static status_t expl_format_nl_full_opt_row(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + plan_node_t l_drive_plan = *plan; + l_drive_plan.join_p.oper = JOIN_OPER_NL_LEFT; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "UNION ALL", NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_join_plan(stmt, helper, &l_drive_plan, depth + 1)); + return expl_format_plan_node(stmt, helper, plan->join_p.r_drive_plan, depth + 1); +} + +static status_t expl_format_join_exists_subselect(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + uint32 depth, cond_node_t **subselect_cond) +{ + bool32 has_subselect = CT_FALSE; + if (plan->join_p.filter != NULL) { + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, plan->join_p.filter->root, + depth + 1, &has_subselect)); + if (has_subselect) { + *subselect_cond = plan->join_p.filter->root; + } + return CT_SUCCESS; + } + if (plan->join_p.cond != NULL) { + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, plan->join_p.cond->root, + depth + 1, &has_subselect)); + if (has_subselect) { + *subselect_cond = plan->join_p.cond->root; + } + return CT_SUCCESS; + } + return CT_SUCCESS; +} + +static status_t expl_format_join_fill_oper(plan_node_t *plan, char *oper) +{ + char *join_oper = NULL; + if (plan->join_p.oper != JOIN_OPER_NL_LEFT) { + join_oper = g_join_oper[plan->join_p.oper][plan->join_p.hash_left]; + } else { + join_oper = g_join_oper[plan->join_p.oper][plan->join_p.nl_full_r_drive]; + } + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, join_oper, strlen(join_oper))); + return CT_SUCCESS; +} + +static status_t expl_format_join_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + if (plan->join_p.oper == JOIN_OPER_NL_FULL && plan->join_p.nl_full_opt_type != NL_FULL_OPT_NONE) { + return expl_format_nl_full_opt_row(stmt, helper, plan, depth); + } + + cond_node_t *subselect_cond = NULL; + char oper[CT_MAX_DFLT_VALUE_LEN] = {0}; + CT_RETURN_IFERR(expl_format_join_exists_subselect(stmt, helper, plan, depth, &subselect_cond)); + if (subselect_cond != NULL) { + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth++, "KERNEL FILTER", NULL, NULL, NULL)); + } + CT_RETURN_IFERR(expl_format_join_fill_oper(plan, oper)); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->join_p.left, depth + 1)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->join_p.right, depth + 1)); + if (subselect_cond == NULL) { + return CT_SUCCESS; + } + return expl_format_cond_node_plan(stmt, helper, subselect_cond, depth, NULL); +} + +static status_t expl_format_insert_print_oper(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + uint32 depth, bool32 insert_all) +{ + char oper[CT_MAX_DFLT_VALUE_LEN] = {0}; + sql_table_t *table = plan->insert_p.table; + if (insert_all) { + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, "MULTI TABLE INSERT", strlen("MULTI TABLE INSERT"))); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + return CT_SUCCESS; + } + uint32 offset = (uint32)strlen("LOAD TABLE CONVENTIONAL "); + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, "LOAD TABLE CONVENTIONAL ", offset)); + if (stmt->context->type != CTSQL_TYPE_REPLACE && knl_is_part_table(table->entry->dc.handle)) { + CT_RETURN_IFERR(sql_calc_part_print(stmt, oper + offset, CT_MAX_DFLT_VALUE_LEN - offset)); + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, + &table->user.value, &table->name.value, + &table->alias.value)); + return CT_SUCCESS; +} + +static status_t expl_format_insert_expr_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + uint32 depth, sql_insert_t *insert_ctx, bool32 insert_all) +{ + uint32 i = 0; + column_value_pair_t *pair = NULL; + expr_tree_t *expr = NULL; + sql_table_t *table = plan->insert_p.table; + if (insert_all) { + while (i < insert_ctx->pairs_count) { + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "INTO", + &table->user.value, &table->name.value, + &table->alias.value)); + i++; + } + return CT_SUCCESS; + } + while (i < insert_ctx->pairs_count) { + pair = (column_value_pair_t *)cm_galist_get(insert_ctx->pairs, i++); + if (pair->exprs == NULL) { + continue; + } + for(uint32 j = 0; j < pair->exprs->count; j++) { + expr = (expr_tree_t *)cm_galist_get(pair->exprs, j); + CT_RETURN_IFERR(expl_format_expr_tree_plan(stmt, helper, expr, depth)); + } + } + return CT_SUCCESS; +} + +static status_t expl_format_insert_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + sql_insert_t *insert_ctx = NULL; + sql_array_t *ssa = helper->ssa; + char *oper = NULL; + if (stmt->context->type != CTSQL_TYPE_REPLACE) { + insert_ctx = (sql_insert_t *)stmt->context->entry; + oper = "INSERT STATEMENT"; + } else { + insert_ctx = &((sql_replace_t *)stmt->context->entry)->insert_ctx; + oper = "REPLACE STATEMENT"; + } + helper->ssa = &insert_ctx->ssa; + bool32 insert_all = CT_BIT_TEST(insert_ctx->syntax_flag, INSERT_IS_ALL); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + if (sql_get_plan(stmt) == plan) { + CT_RETURN_IFERR(expl_format_withas_plan(stmt, helper, plan, depth + 1)); + } + CT_RETURN_IFERR(expl_format_insert_print_oper(stmt, helper, plan, depth + 1, insert_all)); + if (insert_ctx->select_ctx != NULL) { + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, insert_ctx->select_ctx->plan, + depth + EXPL_DEPTH_CALC_LEVEL)); + } + CT_RETURN_IFERR(expl_format_insert_expr_plan(stmt, helper, plan, depth + EXPL_DEPTH_CALC_LEVEL, + insert_ctx, insert_all)); + helper->ssa = ssa; + return CT_SUCCESS; +} + +static status_t expl_format_merge_insert_plan(sql_stmt_t *stmt, expl_helper_t *helper, uint32 depth) +{ + sql_merge_t *merge_ctx = (sql_merge_t *)stmt->context->entry; + if (merge_ctx->insert_ctx == NULL) { + return CT_SUCCESS; + } + if (merge_ctx->insert_filter_cond == NULL) { + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, merge_ctx->insert_ctx->plan, depth, "INSERT STATEMENT", + NULL, NULL, NULL)); + return CT_SUCCESS; + } + sql_array_t *ssa = helper->ssa; + helper->ssa = &merge_ctx->query->ssa; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, NULL, depth, "FILTER", NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, merge_ctx->insert_ctx->plan, depth + 1, "INSERT STATEMENT", + NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, merge_ctx->insert_filter_cond->root, + depth + EXPL_DEPTH_CALC_LEVEL, NULL)); + helper->ssa = ssa; + return CT_SUCCESS; +} + +static status_t expl_format_merge_update_expr_plan(sql_stmt_t *stmt, expl_helper_t *helper, uint32 depth) +{ + sql_merge_t *merge_ctx = (sql_merge_t *)stmt->context->entry; + galist_t *update_pairs = merge_ctx->update_ctx->pairs; + expr_tree_t *expr = NULL; + column_value_pair_t *update_pair = NULL; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, merge_ctx->update_ctx->plan, depth, "UPDATE STATEMENT", + NULL, NULL, NULL)); + for (uint32 i = 0; i < update_pairs->count; i++) { + update_pair = (column_value_pair_t *)cm_galist_get(update_pairs, i); + expr = (expr_tree_t *)cm_galist_get(update_pair->exprs, 0); + CT_RETURN_IFERR(expl_format_expr_tree_plan(stmt, helper, expr, depth + 1)); + } + return CT_SUCCESS; +} + +static status_t expl_format_merge_update_plan(sql_stmt_t *stmt, expl_helper_t *helper, uint32 depth) +{ + sql_merge_t *merge_ctx = (sql_merge_t *)stmt->context->entry; + if (merge_ctx->update_ctx == NULL) { + return CT_SUCCESS; + } + sql_array_t *ssa = helper->ssa; + helper->ssa = &merge_ctx->query->ssa; + if (merge_ctx->update_filter_cond == NULL) { + CT_RETURN_IFERR(expl_format_merge_update_expr_plan(stmt, helper, depth)); + return CT_SUCCESS; + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, NULL, depth, "FILTER", NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_merge_update_expr_plan(stmt, helper, depth + 1)); + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, merge_ctx->update_filter_cond->root, + depth + EXPL_DEPTH_CALC_LEVEL, NULL)); + helper->ssa = ssa; + return CT_SUCCESS; +} + +static status_t expl_format_merge_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + char oper[CT_MAX_DFLT_VALUE_LEN] = {0}; + uint32 offset = (uint32)strlen("MERGE STATEMENT"); + MEMS_RETURN_IFERR(memcpy_s(oper, CT_MAX_DFLT_VALUE_LEN, "MERGE STATEMENT", offset)); + if (plan->merge_p.merge_keys == NULL || plan->merge_p.merge_keys->count == 0) { + MEMS_RETURN_IFERR(memcpy_s(oper + offset, CT_MAX_DFLT_VALUE_LEN - offset, "(NESTED LOOPS)", + strlen("(NESTED LOOPS)"))); + } else { + MEMS_RETURN_IFERR(memcpy_s(oper + offset, CT_MAX_DFLT_VALUE_LEN - offset, "(HASH JOIN)", + strlen("(HASH JOIN)"))); + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + if (sql_get_plan(stmt) == plan) { + CT_RETURN_IFERR(expl_format_withas_plan(stmt, helper, plan, depth + 1)); + } + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan->merge_p.using_table_scan_p, depth + 1, + "USING TABLE", NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->merge_p.using_table_scan_p, + depth + EXPL_DEPTH_CALC_LEVEL)); + helper->query = NULL; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan->merge_p.merge_into_scan_p, depth + 1, + "MERGE TABLE", NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->merge_p.merge_into_scan_p, + depth + EXPL_DEPTH_CALC_LEVEL)); + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, NULL, depth + 1, "ON CONDITION", NULL, NULL, NULL)); + + CT_RETURN_IFERR(expl_format_merge_insert_plan(stmt, helper, depth + EXPL_DEPTH_CALC_LEVEL)); + return expl_format_merge_update_plan(stmt, helper, depth +EXPL_DEPTH_CALC_LEVEL); +} + +static status_t expl_format_delete_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "DELETE STATEMENT", NULL, NULL, NULL)); + if (sql_get_plan(stmt) == plan) { + CT_RETURN_IFERR(expl_format_withas_plan(stmt, helper, plan, depth + 1)); + } + return expl_format_plan_node(stmt, helper, plan->delete_p.next, depth + 1); +} + +static bool32 expl_format_update_expr_exists(expr_tree_t **expr_arr, expr_tree_t *expr, uint32 count) +{ + for (uint32 i = 0; i < count; i++) { + if (expr_arr[i] == expr) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +static status_t expl_format_update_expr_plan(sql_stmt_t *stmt, expl_helper_t *helper, update_plan_t *upd_plan, + uint32 depth) +{ + sql_update_t *update_ctx = (sql_update_t *)stmt->context->entry; + sql_array_t *ssa = helper->ssa; + expr_tree_t **expr_arr; + upd_object_t *object = NULL; + column_value_pair_t *pair = NULL; + expr_tree_t *expr = NULL; + for (uint32 i = 0; i < upd_plan->objects->count; i++) { + object = (upd_object_t *)cm_galist_get(upd_plan->objects, i); + if (object->pairs->count == 0) { + continue; + } + CT_RETURN_IFERR(sql_push(stmt, object->pairs->count * sizeof(pointer_t), (void **)&expr_arr)); + uint32 expr_count = 0; + for (uint32 j = 0; j < object->pairs->count; j++) { + pair = (column_value_pair_t *)cm_galist_get(object->pairs, j); + expr = (expr_tree_t *)cm_galist_get(pair->exprs, 0); + if (expr_count == 0 || !expl_format_update_expr_exists(expr_arr, expr, expr_count)) { + expr_arr[expr_count] = expr; + expr_count++; + } else { + continue; + } + helper->ssa = &update_ctx->query->ssa; + if (expl_format_expr_tree_plan(stmt, helper, expr, depth) != CT_SUCCESS) { + CTSQL_POP(stmt); + return CT_ERROR; + } + } + CTSQL_POP(stmt); + } + helper->ssa = ssa; + return CT_SUCCESS; +} + +static status_t expl_format_update_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "UPDATE STATEMENT", NULL, NULL, NULL)); + if (sql_get_plan(stmt) == plan) { + CT_RETURN_IFERR(expl_format_withas_plan(stmt, helper, plan, depth + 1)); + } + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, plan->update_p.next, depth + 1)); + return expl_format_update_expr_plan(stmt, helper, &plan->update_p, depth + 1); +} + +static status_t expl_format_sort_distinct_expr(sql_stmt_t *stmt, expl_helper_t *helper, galist_t *columns, uint32 depth) +{ + rs_column_t *col = NULL; + for (uint32 i = 0; i < columns->count; i++) { + col = (rs_column_t *)cm_galist_get(columns, i); + if (col->type == RS_COL_CALC) { + CT_RETURN_IFERR(expl_format_expr_tree_plan(stmt, helper, col->expr, depth)); + } + } + return CT_SUCCESS; +} + +static status_t expl_format_sort_plan_data(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth, + char *oper) +{ + galist_t *columns = plan->query_sort.select_columns; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_sort_distinct_expr(stmt, helper, columns, depth + 1)); + return expl_format_plan_node(stmt, helper, plan->query_sort.next, depth + 1); +} + +static status_t expl_format_sort_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + char *oper = NULL; + if (plan->type == PLAN_NODE_QUERY_SORT_PAR) { + oper = "PAR QUERY SORT ORDER BY"; + } else if (plan->type == PLAN_NODE_QUERY_SIBL_SORT) { + oper = "QUERY SORT SIBLINGS ORDER BY"; + } else if (SORT_ORDER_BY_ROWNUM(plan)) { + oper = "QUERY SORT ORDER BY ROWNUM"; + } else { + oper = "QUERY SORT ORDER BY"; + } + return expl_format_sort_plan_data(stmt, helper, plan, depth, oper); +} + +static status_t expl_format_distinct_plan_data(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + uint32 depth, char *oper) +{ + galist_t *columns = plan->distinct.columns; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + CT_RETURN_IFERR(expl_format_sort_distinct_expr(stmt, helper, columns, depth + 1)); + return expl_format_plan_node(stmt, helper, plan->distinct.next, depth + 1); +} + +static status_t expl_format_distinct_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + char *oper = g_distinct_names[plan->type - PLAN_NODE_SORT_DISTINCT]; + return expl_format_distinct_plan_data(stmt, helper, plan, depth, oper); +} + +static status_t expl_format_next_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + plan_node_t *next_plan, uint32 depth, char *oper) +{ + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, oper, NULL, NULL, NULL)); + return expl_format_plan_node(stmt, helper, next_plan, depth + 1); +} + +static status_t expl_format_group_by_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + char *oper = g_group_by_names[plan->type - PLAN_NODE_SORT_GROUP]; + return expl_format_next_plan(stmt, helper, plan, plan->group.next, depth, oper); +} + +static status_t expl_format_select_sort_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->select_sort.next, depth, "SELECT SORT ORDER BY"); +} + +static status_t expl_format_having_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + CT_RETURN_IFERR(expl_format_next_plan(stmt, helper, plan, plan->having.next, depth, "HAVING")); + if (plan->having.cond == NULL) { + return CT_SUCCESS; + } + return expl_format_cond_node_plan(stmt, helper, plan->having.cond->root, depth, NULL); +} + +static status_t expl_format_query_limit_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->limit.next, depth, "QUERY LIMIT"); +} + +static status_t expl_format_select_limit_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->limit.next, depth, "SELECT LIMIT"); +} + +static status_t expl_format_connect_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + connect_plan_t *cb_plan = &plan->connect; + bool32 is_next_cb_type_mtrl = (cb_plan->next_connect_by->type == PLAN_NODE_CONNECT_MTRL); + + // start with condition + cond_tree_t *sw_cond = (cb_plan->s_query != NULL) ? cb_plan->s_query->cond : + (is_next_cb_type_mtrl ? NULL : cb_plan->start_with_cond); + if (cb_plan->s_query || (!is_next_cb_type_mtrl && cb_plan->start_with_cond)) { + CT_RETURN_IFERR(expl_format_next_plan(stmt, helper, plan, cb_plan->next_start_with, depth, "START WITH")); + if (sw_cond) { + CT_RETURN_IFERR(expl_format_cond_node_plan(stmt, helper, sw_cond->root, depth, NULL)); + } + } + + // connect by condition + if (is_next_cb_type_mtrl) { + return expl_format_plan_node(stmt, helper, cb_plan->next_connect_by, depth); + } + if (cb_plan->connect_by_cond == NULL) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(expl_format_next_plan(stmt, helper, plan, cb_plan->next_connect_by, depth, "CONNECT BY")); + return expl_format_cond_node_plan(stmt, helper, cb_plan->connect_by_cond->root, depth, NULL); +} + +static status_t expl_format_filter_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + CT_RETURN_IFERR(expl_format_next_plan(stmt, helper, plan, plan->filter.next, depth, "FILTER")); + if (plan->filter.cond == NULL) { + return CT_SUCCESS; + } + return expl_format_cond_node_plan(stmt, helper, plan->filter.cond->root, depth, NULL); +} + +static status_t expl_format_window_sort_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->winsort_p.next, depth, "WINDOW SORT"); +} + +static status_t expl_format_group_merge_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->group.next, depth, "MERGE GROUP BY"); +} + +static status_t expl_format_parallel_group_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, + uint32 depth) +{ + group_plan_t group = plan->group; + if (group.multi_prod) { + return expl_format_next_plan(stmt, helper, plan, group.next, depth, "PARALLEL HASH GROUP BY (M-M)"); + } + return expl_format_next_plan(stmt, helper, plan, group.next, depth, "PARALLEL HASH GROUP BY (S-S)"); +} + +static status_t expl_format_hash_mtrl_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->hash_mtrl.group.next, depth, "HASH MATERIALIZE"); +} + +static status_t expl_format_concate_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + uint32 i = 0; + plan_node_t *sub_plan = NULL; + CT_RETURN_IFERR(expl_format_plan_node_row(stmt, helper, plan, depth, "CONCATENATION", NULL, NULL, NULL)); + while (i < plan->cnct_p.plans->count) { + sub_plan = (plan_node_t *)cm_galist_get(plan->cnct_p.plans, i++); + CT_RETURN_IFERR(expl_format_plan_node(stmt, helper, sub_plan, depth + 1)); + } + return CT_SUCCESS; +} + +static status_t expl_format_cube_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->cube.next, depth, "GENERATE CUBE"); +} + +static status_t expl_format_pivot_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->group.next, depth, "HASH GROUP PIVOT"); +} + +static status_t expl_format_unpivot_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->unpivot_p.next, depth, "UNPIVOT"); +} + +static status_t expl_format_rownum_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + if (helper->query->incl_flags & COND_INCL_ROWNUM) { + return expl_format_next_plan(stmt, helper, plan, plan->rownum_p.next, depth, "ROWNUM FILTER"); + } + return expl_format_next_plan(stmt, helper, plan, plan->rownum_p.next, depth, "ROWNUM COUNT"); +} + +static status_t expl_format_for_update_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->for_update.next, depth, "FOR UPDATE"); +} + +static status_t expl_format_connect_mtrl_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->cb_mtrl.next, depth, "CONNECT BY MATERIALIZE"); +} + +static status_t expl_format_vm_view_mtrl_plan(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + return expl_format_next_plan(stmt, helper, plan, plan->vm_view_p.next, depth, "VM VIEW"); +} + +static expl_plan_t g_expl_plan_funcs[] = {{PLAN_NODE_QUERY, expl_format_query_plan}, + {PLAN_NODE_UNION, expl_format_union_plan}, + {PLAN_NODE_UNION_ALL, expl_format_union_all_plan}, + {PLAN_NODE_MINUS, expl_format_minus_plan}, + {PLAN_NODE_HASH_MINUS, expl_format_minus_plan}, + {PLAN_NODE_MERGE, expl_format_merge_plan}, + {PLAN_NODE_INSERT, expl_format_insert_plan}, + {PLAN_NODE_DELETE, expl_format_delete_plan}, + {PLAN_NODE_UPDATE, expl_format_update_plan}, + {PLAN_NODE_SELECT, expl_format_select_plan}, + {PLAN_NODE_JOIN, expl_format_join_plan}, + {PLAN_NODE_SORT_GROUP, expl_format_group_by_plan}, + {PLAN_NODE_MERGE_SORT_GROUP, expl_format_group_by_plan}, + {PLAN_NODE_HASH_GROUP, expl_format_group_by_plan}, + {PLAN_NODE_INDEX_GROUP, expl_format_group_by_plan}, + {PLAN_NODE_QUERY_SORT, expl_format_sort_plan}, + {PLAN_NODE_SELECT_SORT, expl_format_select_sort_plan}, + {PLAN_NODE_AGGR, expl_format_aggr_plan}, + {PLAN_NODE_INDEX_AGGR, expl_format_aggr_plan}, + {PLAN_NODE_SORT_DISTINCT, expl_format_distinct_plan}, + {PLAN_NODE_HASH_DISTINCT, expl_format_distinct_plan}, + {PLAN_NODE_INDEX_DISTINCT, expl_format_distinct_plan}, + {PLAN_NODE_HAVING, expl_format_having_plan}, + {PLAN_NODE_SCAN, expl_format_scan_plan}, + {PLAN_NODE_QUERY_LIMIT, expl_format_query_limit_plan}, + {PLAN_NODE_SELECT_LIMIT, expl_format_select_limit_plan}, + {PLAN_NODE_CONNECT, expl_format_connect_plan}, + {PLAN_NODE_FILTER, expl_format_filter_plan}, + {PLAN_NODE_WINDOW_SORT, expl_format_window_sort_plan}, + {PLAN_NODE_REMOTE_SCAN, expl_format_default_plan_node}, + {PLAN_NODE_GROUP_MERGE, expl_format_group_merge_plan}, + {PLAN_NODE_HASH_GROUP_PAR, expl_format_parallel_group_plan}, + {PLAN_NODE_HASH_MTRL, expl_format_hash_mtrl_plan}, + {PLAN_NODE_CONCATE, expl_format_concate_plan}, + {PLAN_NODE_QUERY_SORT_PAR, expl_format_sort_plan}, + {PLAN_NODE_QUERY_SIBL_SORT, expl_format_sort_plan}, + {PLAN_NODE_GROUP_CUBE, expl_format_cube_plan}, + {PLAN_NODE_HASH_GROUP_PIVOT, expl_format_pivot_plan}, + {PLAN_NODE_UNPIVOT, expl_format_unpivot_plan}, + {PLAN_NODE_ROWNUM, expl_format_rownum_plan}, + {PLAN_NODE_FOR_UPDATE, expl_format_for_update_plan}, + {PLAN_NODE_WITHAS_MTRL, expl_format_default_plan_node}, + {PLAN_NODE_CONNECT_MTRL, expl_format_connect_mtrl_plan}, + {PLAN_NODE_CONNECT_HASH, expl_format_connect_plan}, + {PLAN_NODE_VM_VIEW_MTRL, expl_format_vm_view_mtrl_plan}}; + +status_t expl_format_plan_node(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth) +{ + CM_ASSERT(plan->type <= sizeof(g_expl_plan_funcs) / sizeof(expl_plan_t)); + CM_ASSERT(plan->type == g_expl_plan_funcs[plan->type - PLAN_NODE_QUERY].type); + CM_ASSERT(g_expl_plan_funcs[plan->type - PLAN_NODE_QUERY].explain_plan_func != NULL); + + return g_expl_plan_funcs[plan->type - PLAN_NODE_QUERY].explain_plan_func(stmt, helper, plan, depth); +} diff --git a/pkg/src/ctsql/executor/explain/expl_plan.h b/pkg/src/ctsql/executor/explain/expl_plan.h new file mode 100644 index 0000000000000000000000000000000000000000..e080c19ef93aab453e4996210d02384695d61b29 --- /dev/null +++ b/pkg/src/ctsql/executor/explain/expl_plan.h @@ -0,0 +1,56 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * expl_plan.h + * + * + * IDENTIFICATION + * src/ctsql/executor/explain/expl_plan.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef __EXPL_PLAN_H__ +#define __EXPL_PLAN_H__ + +#include "expl_common.h" +#include "expl_predicate.h" + +typedef status_t (*expl_column_func_t)(expl_helper_t *helper); + +typedef struct st_expl_column { + expl_col_type_t type; + text_t name; + uint32 fmt_size; // default format size + expl_column_func_t expl_column_func; +} expl_column_t; + +typedef status_t (*expl_plan_func_t)(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth); + +typedef struct st_expl_plan { + plan_node_type_t type; + expl_plan_func_t explain_plan_func; +} expl_plan_t; + +typedef status_t (*expl_fmt_func_t)(sql_stmt_t *stmt, sql_cursor_t *cursor, expl_helper_t *helper, char *content); + +void expl_row_helper_init(row_helper_t *helper, plan_node_t *plan, text_t *operation, text_t *owner, text_t *name, + text_t *alias); +status_t expl_helper_init(sql_stmt_t *stmt, expl_helper_t *helper, uint32 mtrl_id); +text_t *expl_get_explcol_name(uint32 idx); +status_t expl_format_plan_node(sql_stmt_t *stmt, expl_helper_t *helper, plan_node_t *plan, uint32 depth); + +#endif diff --git a/pkg/src/ctsql/executor/explain/expl_predicate.c b/pkg/src/ctsql/executor/explain/expl_predicate.c new file mode 100644 index 0000000000000000000000000000000000000000..d1eef8a2230cf3fd3d2e9ca817baaf625e65c3c8 --- /dev/null +++ b/pkg/src/ctsql/executor/explain/expl_predicate.c @@ -0,0 +1,238 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * expl_predicate.c + * + * + * IDENTIFICATION + * src/ctsql/executor/explain/expl_predicate.c + * + * ------------------------------------------------------------------------- + */ + +#include "expl_predicate.h" + +#define EXPL_SIGN_JOIN " * " +#define EXPL_SIGN_NOJOIN " - " +#define EXPL_SIGN_LEN 3 +#define EXPL_COND_HEAD " " +#define EXPL_COND_HEAD_LEN 4 + +bool32 expl_pred_explain_enabled(sql_stmt_t *stmt) +{ + if (stmt->hide_plan_extras || stmt->context->has_dblink) { + return CT_FALSE; + } + if (stmt->session->plan_display_format != 0) { + return CT_BIT_TEST(stmt->session->plan_display_format, PLAN_FORMAT_PREDICATE); + } + return CT_BIT_TEST(g_instance->sql.plan_display_format, PLAN_FORMAT_PREDICATE); +} + +status_t expl_pred_helper_init(sql_stmt_t *stmt, pred_helper_t *helper, uint32 mtrl_id) +{ + (void)memset_s(helper, sizeof(pred_helper_t), 0, sizeof(pred_helper_t)); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&(helper->row_buf))); + CT_RETURN_IFERR(sql_push(stmt, CT_MAX_ROW_SIZE, (void **)&(helper->content.str))); + helper->content.cap = CT_MAX_ROW_SIZE; + + helper->mtrl_id = mtrl_id; + helper->is_start_with = CT_FALSE; + helper->type = PREDICATE_FILTER; + helper->is_enabled = CT_TRUE; + + return CT_SUCCESS; +} + +status_t expl_put_pred_data(sql_stmt_t *stmt, pred_helper_t *helper, var_text_t *content) +{ + text_t row_data = {.str = content->str, .len = content->len}; + row_init(&helper->ra, helper->row_buf, CT_MAX_ROW_SIZE, EXPL_PRED_COL_NUM); + CT_RETURN_IFERR(row_put_text(&helper->ra, &row_data)); + return mtrl_insert_row(&stmt->mtrl, helper->mtrl_id, helper->row_buf, &helper->row_id); +} + +status_t expl_format_cond_head(var_text_t *content, int32 row_id, bool32 is_join) +{ + char row_id_str[CT_MAX_INT32_STRLEN + 1] = {0}; + row_id--; + int32 row_id_len = snprintf_s(row_id_str, CT_MAX_INT32_STRLEN + 1, CT_MAX_INT32_STRLEN, "%d", row_id); + PRTS_RETURN_IFERR(row_id_len); + + CT_RETURN_IFERR(cm_concat_n_var_string(content, EXPL_COND_HEAD, EXPL_COND_HEAD_LEN)); + CT_RETURN_IFERR(cm_concat_n_var_string(content, row_id_str, row_id_len)); + char *sign = is_join ? EXPL_SIGN_JOIN : EXPL_SIGN_NOJOIN; + return cm_concat_n_var_string(content, sign, EXPL_SIGN_LEN); +} + +const char *expl_get_pred_type(predicate_type type) +{ + if (type == PREDICATE_FILTER) { + return "filter: "; + } + return "access: "; +} + +status_t expl_put_pred_info(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, cond_tree_t *cond) +{ + if (cond == NULL || cond->root == NULL || stmt->context->has_dblink) { + return CT_SUCCESS; + } + + int row_id = helper->parent->row_helper.id; + var_text_t *content = &helper->content; + CM_TEXT_CLEAR(content); + CT_RETURN_IFERR(expl_format_cond_head(content, row_id, CT_FALSE)); + CT_RETURN_IFERR(cm_concat_var_string(content, expl_get_pred_type(helper->type))); + // format condition data; + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, cond->root, CT_FALSE, content)); + CT_RETURN_IFERR(expl_put_pred_data(stmt, helper, content)); + + return CT_SUCCESS; +} + +status_t expl_format_node_join(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + return CT_SUCCESS; +} + +status_t expl_format_node_having(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + cond_tree_t *cond = plan->having.cond; + return expl_put_pred_info(stmt, query, helper, cond); +} + +status_t expl_format_node_scan(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + cond_tree_t *cond = plan->scan_p.table->cond; + return expl_put_pred_info(stmt, query, helper, cond); +} + +status_t expl_format_node_connect(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + cond_tree_t *cond = NULL; + if (helper->is_start_with) { + cond = plan->connect.start_with_cond; + } else { + cond = plan->connect.connect_by_cond; + } + + return expl_put_pred_info(stmt, query, helper, cond); +} + +status_t expl_format_node_filter(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + cond_tree_t *cond = plan->filter.cond; + return expl_put_pred_info(stmt, query, helper, cond); +} + +status_t expl_format_node_hash_mtrl(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + int row_id = helper->parent->row_helper.id; + var_text_t *content = &helper->content; + + CT_RETURN_IFERR(expl_format_cond_head(content, row_id, CT_FALSE)); + CT_RETURN_IFERR(cm_concat_var_string(content, expl_get_pred_type(PREDICATE_ACCESS))); + CT_RETURN_IFERR(ctsql_unparse_hash_mtrl_node(query, plan, content)); + CT_RETURN_IFERR(expl_put_pred_data(stmt, helper, content)); + CM_TEXT_CLEAR(content); + + return CT_SUCCESS; +} + +status_t expl_format_node_rownum(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + return CT_SUCCESS; +} + +status_t expl_format_node_connect_mtrl(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan) +{ + int row_id = helper->parent->row_helper.id; + text_t *content = &helper->content; + + CM_TEXT_CLEAR(content); + CT_RETURN_IFERR(expl_format_cond_head(content, row_id, CT_FALSE)); + CT_RETURN_IFERR(cm_concat_var_string(content, expl_get_pred_type(PREDICATE_ACCESS))); + CT_RETURN_IFERR(ctsql_unparse_connect_mtrl_join_node(query, plan, content)); + CT_RETURN_IFERR(expl_put_pred_data(stmt, helper, content)); + + cond_tree_t *start_cond = plan->cb_mtrl.start_with_cond; + if (start_cond == NULL || start_cond->root->type == COND_NODE_TRUE) { + return CT_SUCCESS; + } + helper->type = PREDICATE_FILTER; + return expl_put_pred_info(stmt, query, helper, start_cond); +} + +static expl_pred_t g_expl_pred_funcs[] = {{PLAN_NODE_JOIN, expl_format_node_join}, + {PLAN_NODE_HAVING, expl_format_node_having}, + {PLAN_NODE_SCAN, expl_format_node_scan}, + {PLAN_NODE_CONNECT, expl_format_node_connect}, + {PLAN_NODE_FILTER, expl_format_node_filter}, + {PLAN_NODE_HASH_MTRL, expl_format_node_hash_mtrl}, + {PLAN_NODE_ROWNUM, expl_format_node_rownum}, + {PLAN_NODE_CONNECT_MTRL, expl_format_node_connect_mtrl}, + {PLAN_NODE_CONNECT_HASH, expl_format_node_connect}}; + +static inline bool32 expl_is_support_predicate(plan_node_type_t type) +{ + for (int32 i = 0; i < sizeof(g_expl_pred_funcs) / sizeof(expl_pred_t); i++) { + if (type == g_expl_pred_funcs[i].type) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +static status_t expl_format_pred_row_by_type(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, + plan_node_t *plan) +{ + expl_pred_t *pred_func = NULL; + for (int32 i = 0; i < sizeof(g_expl_pred_funcs)/sizeof(expl_pred_t); i++) { + if (plan->type == g_expl_pred_funcs[i].type) { + pred_func = &g_expl_pred_funcs[i]; + break; + } + } + + if (pred_func == NULL) { + return CT_SUCCESS; + } + + return pred_func->expl_pred_func(stmt, query, helper, plan); +} + +status_t expl_format_predicate_row(sql_stmt_t *stmt, pred_helper_t *helper, plan_node_t *plan) +{ + sql_query_t *query = NULL; + if (!helper->is_enabled || !expl_is_support_predicate(plan->type)) { + return CT_SUCCESS; + } + + if (helper->is_start_with && helper->parent->query != NULL && helper->parent->query->s_query != NULL) { + query = helper->parent->query->s_query; + } else if (helper->parent->query != NULL) { + query = helper->parent->query; + } else if (stmt->context->type == CTSQL_TYPE_MERGE) { + query = ((sql_merge_t *)stmt->context->entry)->query; + } + + if (query == NULL) { + return CT_SUCCESS; + } + + return expl_format_pred_row_by_type(stmt, query, helper, plan); +} diff --git a/pkg/src/ctsql/executor/explain/expl_predicate.h b/pkg/src/ctsql/executor/explain/expl_predicate.h new file mode 100644 index 0000000000000000000000000000000000000000..cdbf497c357d98a8fe1c5fea5b04c6fd7c34f1cf --- /dev/null +++ b/pkg/src/ctsql/executor/explain/expl_predicate.h @@ -0,0 +1,44 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * expl_predicate.h + * + * + * IDENTIFICATION + * src/ctsql/executor/explain/expl_predicate.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef __EXPL_PREDICATE_H__ +#define __EXPL_PREDICATE_H__ + +#include "expl_common.h" +#include "ctsql_unparser.h" + +#define EXPL_PRED_COL_NUM 1 + +typedef status_t (*expl_pred_func_t)(sql_stmt_t *stmt, sql_query_t *query, pred_helper_t *helper, plan_node_t *plan); + +typedef struct st_expl_pred { + plan_node_type_t type; + expl_pred_func_t expl_pred_func; +} expl_pred_t; + +status_t expl_pred_helper_init(sql_stmt_t *stmt, pred_helper_t *helper, uint32 mtrl_id); +status_t expl_format_predicate_row(sql_stmt_t *stmt, pred_helper_t *helper, plan_node_t *plan); + +#endif \ No newline at end of file diff --git a/pkg/src/ctsql/node/ctsql_cond.c b/pkg/src/ctsql/node/ctsql_cond.c index f15f2af720ba01815fc6c5e8271bedc739897b11..bbf7e09562c5695d89176be9c4d584e3c6d46e89 100644 --- a/pkg/src/ctsql/node/ctsql_cond.c +++ b/pkg/src/ctsql/node/ctsql_cond.c @@ -832,6 +832,55 @@ status_t sql_match_pivot_list(sql_stmt_t *stmt, expr_tree_t *for_expr, expr_tree return status; } +status_t cond_collector_init(cond_collect_helper_t *cond_context, sql_stmt_t *stmt, + void *owner, ga_alloc_func_t alloc_func) +{ + cond_context->stmt = stmt; + cond_context->p_arg0 = NULL; + cond_context->p_arg1 = NULL; + cond_context->p_arg2 = NULL; + cond_context->pp_arg0 = NULL; + cond_context->is_stoped = CT_FALSE; + cond_context->cptr_false = CT_FALSE; + cond_context->type = COLL_TYPE_TRAVERSAL; + cond_context->arg0 = CT_INVALID_ID32; + cond_context->arg1 = CT_INVALID_ID32; + CT_RETURN_IFERR(alloc_func(owner, sizeof(galist_t), (void **)&cond_context->cond)); + cm_galist_init(cond_context->cond, owner, alloc_func); + return CT_SUCCESS; +} + +status_t traverse_and_collect_conds(cond_collect_helper_t *cond_collector, cond_node_t *node) +{ + CT_RETURN_IFERR(sql_stack_safe(cond_collector->stmt)); + if (!node) { + return CT_SUCCESS; + } + switch (node->type) { + case COND_NODE_COMPARE: + return cm_galist_insert(cond_collector->cond, node); + case COND_NODE_AND: + CT_RETURN_IFERR(traverse_and_collect_conds(cond_collector, node->left)); + return traverse_and_collect_conds(cond_collector, node->right); + case COND_NODE_OR: + if (cond_collector->type == COLL_TYPE_OVERALL) { + return cm_galist_insert(cond_collector->cond, node); + } else if (cond_collector->type == COLL_TYPE_TRAVERSAL) { + CT_RETURN_IFERR(traverse_and_collect_conds(cond_collector, node->left)); + return traverse_and_collect_conds(cond_collector, node->right); + } + break; + case COND_NODE_FALSE: + if (cond_collector->cptr_false) { + return cm_galist_insert(cond_collector->cond, node); + } + break; + default: + break; + } + return CT_SUCCESS; +} + static inline bool32 variant_list_has_null(variant_t *vars, uint32 key_count, bool32 *is_null) { bool32 has_null = CT_FALSE; diff --git a/pkg/src/ctsql/node/ctsql_cond.h b/pkg/src/ctsql/node/ctsql_cond.h index c6936a32e3b95cba37f7d475c12d01bc29e4feb0..22d3f48b0f2aca9cdd79c14d45da5a8f2a1c164a 100644 --- a/pkg/src/ctsql/node/ctsql_cond.h +++ b/pkg/src/ctsql/node/ctsql_cond.h @@ -119,6 +119,26 @@ typedef struct st_join_cond { } join_cond_t; /* * Evaluate an expression tree of a compare node */ +typedef enum en_collect_type { + COLL_TYPE_IGNORE = 0, // Ignore Mode: Ignore specific conditions + COLL_TYPE_TRAVERSAL, // Traversal Mode: Collect each condition individually + COLL_TYPE_OVERALL, // Overall Mode: Collect every condition +} collect_type_t; + +typedef struct st_cond_collect_helper { + sql_stmt_t *stmt; + void *p_arg0; + void *p_arg1; + void *p_arg2; + void **pp_arg0; + uint32 arg0; + uint32 arg1; + galist_t *cond; + collect_type_t type; + bool32 is_stoped; + bool32 cptr_false; +} cond_collect_helper_t; + #define SQL_EXEC_CMP_OPERAND(expr, var, res, pending, stmt) \ do { \ if (sql_exec_expr((stmt), (expr), (var)) != CT_SUCCESS) { \ @@ -196,6 +216,9 @@ status_t visit_join_node_cond(visit_assist_t *va, sql_join_node_t *join_node, vi bool32 sql_is_join_node(cond_node_t *cond_node, uint32 table1, uint32 table2); status_t sql_exec_escape_character(expr_tree_t *expr, variant_t *var, char *escape); status_t sql_try_simplify_new_cond(sql_stmt_t *stmt, cond_node_t *cond); +status_t cond_collector_init(cond_collect_helper_t *cond_context, sql_stmt_t *stmt, + void *owner, ga_alloc_func_t alloc_func); +status_t traverse_and_collect_conds(cond_collect_helper_t *cond_collector, cond_node_t *node); // compare node can be pushed up and used as join condition: // 1.expr1 (flag==HAS_PARENT_COLS) = expr2(flag==HAS_SELF_COLS) diff --git a/pkg/src/ctsql/node/ctsql_expr.c b/pkg/src/ctsql/node/ctsql_expr.c index c9d57934b2fe2ca4a41f41289596c431491599fc..903691314025cf114523f7871573980db77a5039 100644 --- a/pkg/src/ctsql/node/ctsql_expr.c +++ b/pkg/src/ctsql/node/ctsql_expr.c @@ -3434,7 +3434,7 @@ status_t visit_func_node(visit_assist_t *visit_ass, expr_node_t *node, visit_fun return CT_SUCCESS; } -static status_t visit_case_node(visit_assist_t *visit_ass, expr_node_t *node, visit_func_t visit_func) +status_t visit_case_node(visit_assist_t *visit_ass, expr_node_t *node, visit_func_t visit_func) { case_expr_t *case_expr = (case_expr_t *)node->value.v_pointer; @@ -3531,6 +3531,9 @@ status_t visit_expr_node(visit_assist_t *visit_ass, expr_node_t **node, visit_fu return visit_func_node(visit_ass, *node, visit_func); case EXPR_NODE_CASE: + if (visit_ass->excl_flags & VA_EXCL_CASE) { + return visit_func(visit_ass, node); + } return visit_case_node(visit_ass, *node, visit_func); case EXPR_NODE_OVER: diff --git a/pkg/src/ctsql/node/ctsql_expr.h b/pkg/src/ctsql/node/ctsql_expr.h index 105e526131b073db11a630b92b366389feba0c18..372ea1fe9b0675133c6453b124c43ee5ecf7cce3 100644 --- a/pkg/src/ctsql/node/ctsql_expr.h +++ b/pkg/src/ctsql/node/ctsql_expr.h @@ -55,6 +55,7 @@ typedef status_t (*visit_func_t)(visit_assist_t *va, expr_node_t **node); status_t visit_expr_node(visit_assist_t *visit_ass, expr_node_t **node, visit_func_t visit_func); status_t visit_expr_tree(visit_assist_t *visit_ass, expr_tree_t *tree, visit_func_t visit_func); status_t visit_func_node(visit_assist_t *visit_ass, expr_node_t *node, visit_func_t visit_func); +status_t visit_case_node(visit_assist_t *visit_ass, expr_node_t *node, visit_func_t visit_func); bool32 sql_expr_tree_equal(sql_stmt_t *stmt, expr_tree_t *tree1, expr_tree_t *tree2, uint32 *tab_map); status_t sql_get_reserved_value(sql_stmt_t *stmt, expr_node_t *node, variant_t *val); diff --git a/pkg/src/ctsql/node/ctsql_expr_def.h b/pkg/src/ctsql/node/ctsql_expr_def.h index 879c4770d543397aa9a76bc1791fc4870d9359b9..84c562157e22a9cbdcc369c9129ab864f8223442 100644 --- a/pkg/src/ctsql/node/ctsql_expr_def.h +++ b/pkg/src/ctsql/node/ctsql_expr_def.h @@ -578,6 +578,7 @@ typedef struct st_cols_used { #define VA_EXCL_WIN_SORT 0x00000002 #define VA_EXCL_FUNC 0x00000004 #define VA_EXCL_PROC 0x00000008 +#define VA_EXCL_CASE 0x00000010 #define ANCESTOR_IDX 0 #define PARENT_IDX 1 diff --git a/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.c b/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.c index f6f682eed2ceebc4b6041a66602408ca4591627f..7e87ea6e6e6f03529ddc313136807e4b4819e8d8 100644 --- a/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.c +++ b/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.c @@ -29,11 +29,43 @@ #include "dml_parser.h" #include "plan_rbo.h" #include "plan_range.h" +#include "cm_log.h" #ifdef __cplusplus extern "C" { #endif +#define COND_PRIORITY_UNKNOWN 6 +#define COND_PRIORITY_COMPARE 1 +#define COND_PRIORITY_OR 3 +#define COND_PRIORITY_AND 2 +#define COND_PRIORITY_NOT 5 +#define COND_PRIORITY_TRUE 4 +#define COND_PRIORITY_FALSE 0 + +#define STACK_INIT_CAPACITY 32 +#define STACK_EXPAND_FACTOR 2 +#define STACK_LEVEL_NODES 2 +#define STACK_EXPAND_THRESHOLD 0.8f // stack expand threshold + +typedef struct { + cond_node_t **nodes; + size_t capacity; + size_t top; +} dynamic_stack_t; + +// Used for conditional reorganisation sorting. +// FALSE > CMP > AND > OR > TRUE > NOT > UNKNOWN +static const int32 g_cond_priority[] = { + COND_PRIORITY_UNKNOWN, // COND_NODE_UNKNOWN + COND_PRIORITY_COMPARE, // COND_NODE_COMPARE + COND_PRIORITY_OR, // COND_NODE_OR + COND_PRIORITY_AND, // COND_NODE_AND + COND_PRIORITY_NOT, // COND_NODE_NOT + COND_PRIORITY_TRUE, // COND_NODE_TRUE + COND_PRIORITY_FALSE, // COND_NODE_FALSE +}; + static inline status_t replace_group_node(visit_assist_t *visit_ass, expr_node_t **node) { if ((*node)->type != EXPR_NODE_GROUP || NODE_VM_ANCESTOR(*node) > 0) { @@ -178,25 +210,6 @@ static uint32 sql_get_table_column_count(sql_stmt_t *stmt, sql_table_t *table) return knl_get_column_count(table->entry->dc.handle); } } -/* *******************predicate deliver************************ */ -static inline status_t sql_init_dlvr_pair(sql_stmt_t *stmt, sql_query_t *query, dlvr_pair_t **dlvr_pair, - galist_t *pairs) -{ - uint32 mem_size; - sql_table_t *table = NULL; - - CT_RETURN_IFERR(cm_galist_new(pairs, sizeof(dlvr_pair_t), (void **)dlvr_pair)); - cm_galist_init(&(*dlvr_pair)->cols, stmt->session->stack, cm_stack_alloc); - cm_galist_init(&(*dlvr_pair)->values, stmt->session->stack, cm_stack_alloc); - - for (uint32 i = 0; i < query->tables.count; i++) { - table = (sql_table_t *)sql_array_get(&query->tables, i); - mem_size = sql_get_table_column_count(stmt, table) * sizeof(uint32); - CT_RETURN_IFERR(cm_stack_alloc(stmt->session->stack, mem_size, (void **)&(*dlvr_pair)->col_map[i])); - MEMS_RETURN_IFERR(memset_s((*dlvr_pair)->col_map[i], mem_size, 0, mem_size)); - } - return CT_SUCCESS; -} static inline bool32 if_dlvr_border_equal(sql_stmt_t *stmt, plan_border_t *border1, plan_border_t *border2) { @@ -229,45 +242,6 @@ static bool32 if_dlvr_range_equal(sql_stmt_t *stmt, plan_range_t *range1, plan_r return CT_FALSE; } -static inline bool32 sql_dlvr_pair_exists_col(expr_tree_t *col, dlvr_pair_t *dlvr_pair) -{ - return dlvr_pair->col_map[EXPR_TAB(col)][EXPR_COL(col)]; -} - -static inline status_t sql_dlvr_pair_add_col(expr_tree_t *column, dlvr_pair_t *dlvr_pair) -{ - uint16 tab = EXPR_TAB(column); - uint16 col = EXPR_COL(column); - if (dlvr_pair->col_map[tab][col]) { - return CT_SUCCESS; - } - dlvr_pair->col_map[tab][col] = CT_TRUE; - return cm_galist_insert(&dlvr_pair->cols, column); -} - -static status_t sql_dlvr_pair_try_add_ff(expr_tree_t *left, expr_tree_t *right, dlvr_pair_t *dlvr_pair, bool32 *is_found) -{ - if (sql_dlvr_pair_exists_col(left, dlvr_pair)) { - *is_found = CT_TRUE; - return sql_dlvr_pair_add_col(right, dlvr_pair); - } - - if (sql_dlvr_pair_exists_col(right, dlvr_pair)) { - *is_found = CT_TRUE; - return sql_dlvr_pair_add_col(left, dlvr_pair); - } - return CT_SUCCESS; -} - -static status_t sql_dlvr_pairs_add_ff_pair(sql_stmt_t *stmt, sql_query_t *query, expr_tree_t *left, expr_tree_t *right, - galist_t *pairs) -{ - dlvr_pair_t *dlvr_pair = NULL; - CT_RETURN_IFERR(sql_init_dlvr_pair(stmt, query, &dlvr_pair, pairs)); - CT_RETURN_IFERR(sql_dlvr_pair_add_col(left, dlvr_pair)); - return sql_dlvr_pair_add_col(right, dlvr_pair); -} - bool32 get_specified_level_query(sql_query_t *curr_query, uint32 level, sql_query_t **query, sql_select_t **subslct) { uint32 depth = 0; @@ -288,7 +262,7 @@ bool32 get_specified_level_query(sql_query_t *curr_query, uint32 level, sql_quer return CT_TRUE; } -static inline bool32 if_cond_can_be_pulled(expr_node_t *node) +static inline bool32 if_expr_node_can_be_pulled(expr_node_t *node) { switch (node->type) { case EXPR_NODE_CONST: @@ -304,6 +278,18 @@ static inline bool32 if_cond_can_be_pulled(expr_node_t *node) } } +static inline bool32 if_value_can_be_pulled(expr_tree_t *val) +{ + while (val != NULL) { + if (!if_expr_node_can_be_pulled(val->root)) { + return CT_FALSE; + } + val = val->next; + } + + return CT_TRUE; +} + static inline bool32 if_range_need_merge(plan_range_t *range) { if (range->type != RANGE_SECTION && range->type != RANGE_POINT) { @@ -332,7 +318,7 @@ static inline bool32 sql_dlvr_inter_border(sql_stmt_t *stmt, plan_border_t *bord return sql_inter_const_range(stmt, border1, border2, is_left, result); } -static inline bool32 sql_dlvr_inter_range(sql_stmt_t *stmt, plan_range_t *range1, plan_range_t *range2, +static inline bool32 ctsql_dlvr_inter_range(sql_stmt_t *stmt, plan_range_t *range1, plan_range_t *range2, plan_range_t *result) { if (!sql_dlvr_inter_border(stmt, &range1->left, &range2->left, BORDER_INFINITE_LEFT, &result->left, CT_TRUE)) { @@ -353,41 +339,6 @@ static inline bool32 sql_dlvr_inter_range(sql_stmt_t *stmt, plan_range_t *range1 return CT_TRUE; } -static inline status_t sql_dlvr_pair_add_range(sql_stmt_t *stmt, dlvr_pair_t *pair, plan_range_t *new_range, - bool32 *is_false) -{ - plan_range_t result; - plan_range_t *range = NULL; - - for (uint32 i = 0; i < pair->values.count; i++) { - range = (plan_range_t *)cm_galist_get(&pair->values, i); - if (if_range_need_merge(range) && if_range_need_merge(new_range) && - sql_dlvr_inter_range(stmt, range, new_range, &result)) { - if (result.type == RANGE_EMPTY) { - *is_false = CT_TRUE; - } else { - *range = result; - } - return CT_SUCCESS; - } - if (if_dlvr_range_equal(stmt, range, new_range)) { - return CT_SUCCESS; - } - } - return cm_galist_insert(&pair->values, new_range); -} - -static inline bool32 sql_dlvr_pair_exists_value(sql_stmt_t *stmt, plan_range_t *new_range, dlvr_pair_t *pair) -{ - for (uint32 i = 0; i < pair->values.count; i++) { - plan_range_t *range = (plan_range_t *)cm_galist_get(&pair->values, i); - if (range->type == RANGE_POINT && if_dlvr_range_equal(stmt, range, new_range)) { - return CT_TRUE; - } - } - return CT_FALSE; -} - static inline status_t sql_dlvr_make_range(sql_stmt_t *stmt, ct_type_t col_datatype, cmp_type_t cmp_type, expr_tree_t *val, plan_range_t **range) { @@ -400,20 +351,6 @@ static inline status_t sql_dlvr_make_range(sql_stmt_t *stmt, ct_type_t col_datat return CT_SUCCESS; } -static inline bool32 if_cond_num_exceed_max(sql_query_t *query, galist_t *pairs) -{ - dlvr_pair_t *dlvr_pair = NULL; - - for (uint32 i = 0; i < pairs->count; i++) { - dlvr_pair = (dlvr_pair_t *)cm_galist_get(pairs, i); - if (dlvr_pair->cols.count > CT_MAX_DLVR_COLS_COUNT) { - return CT_TRUE; - } - } - - return CT_FALSE; -} - static inline bool32 if_cond_dlvr_support(cond_node_t *cond) { cols_used_t cols_used; @@ -426,8 +363,33 @@ static inline bool32 if_cond_dlvr_support(cond_node_t *cond) return (cond->cmp->type == CMP_TYPE_EQUAL); } -static status_t dlvr_pull_range_with_cmp(sql_stmt_t *stmt, cond_node_t *cond, dlvr_pair_t *dlvr_pair, - expr_tree_t *ancestor_col, bool32 *is_false) +status_t ctsql_pred_add_range(sql_stmt_t *stmt, pred_node_t *pred, plan_range_t *new_range, + bool32 *is_conflict) +{ + plan_range_t result; + plan_range_t *range = NULL; + + for (uint32 i = 0; i < pred->values.count; i++) { + range = (plan_range_t *)cm_galist_get(&pred->values, i); + if (if_range_need_merge(range) && if_range_need_merge(new_range) && + ctsql_dlvr_inter_range(stmt, range, new_range, &result)) { + if (result.type == RANGE_EMPTY) { + *is_conflict = CT_TRUE; + } else { + *range = result; + } + return CT_SUCCESS; + } + if (if_dlvr_range_equal(stmt, range, new_range)) { + return CT_SUCCESS; + } + } + return cm_galist_insert(&pred->values, new_range); +} + + +static status_t dlvr_pull_range_with_cmp(sql_stmt_t *stmt, cond_node_t *cond, pred_node_t *pred, + expr_tree_t *ancestor_col, bool32 *is_conflict) { expr_tree_t *val = NULL; expr_tree_t *col = NULL; @@ -454,36 +416,34 @@ static status_t dlvr_pull_range_with_cmp(sql_stmt_t *stmt, cond_node_t *cond, dl return CT_SUCCESS; } - if (!if_cond_can_be_pulled(val->root)) { + if (!if_value_can_be_pulled(val)) { return CT_SUCCESS; } CT_RETURN_IFERR(sql_dlvr_make_range(stmt, TREE_DATATYPE(col), cmp_type, val, &new_range)); - return sql_dlvr_pair_add_range(stmt, dlvr_pair, new_range, is_false); + return ctsql_pred_add_range(stmt, pred, new_range, is_conflict); } -static status_t dlvr_pull_ancestor_range(sql_stmt_t *stmt, cond_node_t *cond, dlvr_pair_t *dlvr_pair, - expr_tree_t *ancestor_col, bool32 *is_false) +static status_t dlvr_pull_ancestor_range(sql_stmt_t *stmt, cond_node_t *cond, pred_node_t *pred, + expr_tree_t *ancestor_col, bool32 *is_conflict) { CT_RETURN_IFERR(sql_stack_safe(stmt)); - switch (cond->type) { - case COND_NODE_AND: - CT_RETURN_IFERR(dlvr_pull_ancestor_range(stmt, cond->left, dlvr_pair, ancestor_col, is_false)); - if (*is_false) { - return CT_SUCCESS; - } - return dlvr_pull_ancestor_range(stmt, cond->right, dlvr_pair, ancestor_col, is_false); - - case COND_NODE_COMPARE: - return dlvr_pull_range_with_cmp(stmt, cond, dlvr_pair, ancestor_col, is_false); - - default: + cond_node_type_t cond_type = cond->type; + if (cond_type == COND_NODE_AND) { + CT_RETURN_IFERR(dlvr_pull_ancestor_range(stmt, cond->left, pred, ancestor_col, is_conflict)); + if (*is_conflict) { return CT_SUCCESS; + } + return dlvr_pull_ancestor_range(stmt, cond->right, pred, ancestor_col, is_conflict); + } else if (cond_type == COND_NODE_COMPARE) { + return dlvr_pull_range_with_cmp(stmt, cond, pred, ancestor_col, is_conflict); } + + return CT_SUCCESS; } -static inline status_t sql_dlvr_pull_ancestor_cond(sql_stmt_t *stmt, sql_query_t *query, dlvr_pair_t *dlvr_pair, - plan_range_t *range, bool32 *is_false) +static inline status_t ctsql_dlvr_pull_ancestor_conds(sql_stmt_t *stmt, sql_query_t *query, pred_node_t *pred, + plan_range_t *range, bool32 *is_conflict) { if (range->type != RANGE_POINT) { return CT_SUCCESS; @@ -502,97 +462,7 @@ static inline status_t sql_dlvr_pull_ancestor_cond(sql_stmt_t *stmt, sql_query_t return CT_SUCCESS; } - return dlvr_pull_ancestor_range(stmt, ancestor_query->cond->root, dlvr_pair, val, is_false); -} - -static inline status_t sql_dlvr_pair_add_values(sql_stmt_t *stmt, sql_query_t *query, dlvr_pair_t *dlvr_pair, - plan_range_t *new_range, bool32 *is_false) -{ - CT_RETURN_IFERR(sql_dlvr_pair_add_range(stmt, dlvr_pair, new_range, is_false)); - if (*is_false) { - return CT_SUCCESS; - } - return sql_dlvr_pull_ancestor_cond(stmt, query, dlvr_pair, new_range, is_false); -} - -static inline status_t sql_dlvr_merge_pair_values(sql_stmt_t *stmt, dlvr_pair_t *src, dlvr_pair_t *dst, - bool32 *is_false) -{ - for (uint32 i = 0; i < src->values.count; i++) { - plan_range_t *range = (plan_range_t *)cm_galist_get(&src->values, i); - CT_RETURN_IFERR(sql_dlvr_pair_add_range(stmt, dst, range, is_false)); - if (*is_false) { - break; - } - } - return CT_SUCCESS; -} - -static inline status_t sql_dlvr_merge_pair_columns(dlvr_pair_t *src, dlvr_pair_t *dst) -{ - for (uint32 i = 0; i < src->cols.count; i++) { - expr_tree_t *col = (expr_tree_t *)cm_galist_get(&src->cols, i); - CT_RETURN_IFERR(sql_dlvr_pair_add_col(col, dst)); - } - return CT_SUCCESS; -} - -static inline status_t sql_dlvr_merge_pair(sql_stmt_t *stmt, dlvr_pair_t *src, dlvr_pair_t *dst, bool32 *is_false) -{ - CT_RETURN_IFERR(sql_dlvr_merge_pair_values(stmt, src, dst, is_false)); - if (*is_false) { - return CT_SUCCESS; - } - return sql_dlvr_merge_pair_columns(src, dst); -} - -static inline status_t sql_dlvr_try_merge_pairs(sql_stmt_t *stmt, uint32 start_pos, expr_tree_t *left, - expr_tree_t *right, dlvr_pair_t *merge_pair, galist_t *pairs, bool32 *is_false) -{ - dlvr_pair_t *dlvr_pair = NULL; - - for (uint32 i = start_pos; i < pairs->count;) { - dlvr_pair = (dlvr_pair_t *)cm_galist_get(pairs, i); - if (sql_dlvr_pair_exists_col(left, dlvr_pair) || (right != NULL && - sql_dlvr_pair_exists_col(right, dlvr_pair))) { - CT_RETURN_IFERR(sql_dlvr_merge_pair(stmt, dlvr_pair, merge_pair, is_false)); - if (*is_false) { - break; - } - cm_galist_delete(pairs, i); - continue; - } - i++; - } - return CT_SUCCESS; -} - -static inline status_t sql_dlvr_pairs_add_ff(sql_stmt_t *stmt, sql_query_t *query, expr_tree_t *left, - expr_tree_t *right, galist_t *pairs, bool32 *is_false) -{ - bool32 is_found = CT_FALSE; - dlvr_pair_t *pair = NULL; - - for (uint32 i = 0; i < pairs->count; i++) { - pair = (dlvr_pair_t *)cm_galist_get(pairs, i); - CT_RETURN_IFERR(sql_dlvr_pair_try_add_ff(left, right, pair, &is_found)); - if (is_found) { - return sql_dlvr_try_merge_pairs(stmt, i + 1, left, right, pair, pairs, is_false); - } - } - return sql_dlvr_pairs_add_ff_pair(stmt, query, left, right, pairs); -} - -static inline status_t sql_dlvr_pairs_add_fv_pair(sql_stmt_t *stmt, sql_query_t *query, expr_tree_t *col, - plan_range_t *new_range, galist_t *pairs) -{ - bool32 is_false = CT_FALSE; - dlvr_pair_t *pair = NULL; - - CT_RETURN_IFERR(sql_init_dlvr_pair(stmt, query, &pair, pairs)); - CT_RETURN_IFERR(sql_dlvr_pair_add_col(col, pair)); - CT_RETURN_IFERR(cm_galist_insert(&pair->values, new_range)); - return sql_dlvr_pull_ancestor_cond(stmt, query, pair, new_range, &is_false); + return dlvr_pull_ancestor_range(stmt, ancestor_query->cond->root, pred, val, is_conflict); } static inline bool32 has_semi_in_expr(sql_query_t *query, expr_tree_t *expr) @@ -644,7 +514,7 @@ static status_t expr_node_is_dlvr_value(visit_assist_t *visit_ass, expr_node_t * return CT_SUCCESS; } -static inline status_t expr_tree_is_dlvr_value(sql_stmt_t *stmt, expr_tree_t *expr_tree, bool32 *is_dlvr) +static inline status_t is_val_can_dlvr(sql_stmt_t *stmt, expr_tree_t *expr_tree, bool32 *is_dlvr) { visit_assist_t visit_ass; sql_init_visit_assist(&visit_ass, stmt, NULL); @@ -662,7 +532,7 @@ static inline status_t pre_generate_dlvr_cond(sql_stmt_t *stmt, expr_tree_t *col return sql_clone_expr_tree(stmt->context, column, &(*node)->cmp->left, sql_alloc_mem); } -static inline status_t sql_generate_ff_cond(sql_stmt_t *stmt, expr_tree_t *left, expr_tree_t *right, cond_tree_t *cond, +static inline status_t sql_generate_join_cond(sql_stmt_t *stmt, expr_tree_t *left, expr_tree_t *right, cond_tree_t *cond, bool32 has_filter_cond) { cond_node_t *node = NULL; @@ -741,7 +611,7 @@ static inline status_t generate_range_section_cond(sql_stmt_t *stmt, expr_tree_t return sql_add_cond_node_left(cond, node); } -static inline status_t sql_generate_fv_cond(sql_stmt_t *stmt, expr_tree_t *left, plan_range_t *range, cond_tree_t *cond) +static inline status_t sql_generate_filter_cond(sql_stmt_t *stmt, expr_tree_t *left, plan_range_t *range, cond_tree_t *cond) { switch (range->type) { case RANGE_LIST: @@ -762,114 +632,53 @@ static inline status_t sql_generate_fv_cond(sql_stmt_t *stmt, expr_tree_t *left, } } -static inline status_t sql_generate_fv_dlvr_conds(sql_stmt_t *stmt, cond_tree_t *cond, dlvr_pair_t *dlvr_pair) +static inline status_t sql_pred_rebuild_filter_conds(sql_stmt_t *stmt, cond_tree_t *cond, pred_node_t *pred) { expr_tree_t *col = NULL; plan_range_t *range = NULL; + uint32 i = 0; - for (uint32 i = 0; i < dlvr_pair->cols.count; i++) { - col = (expr_tree_t *)cm_galist_get(&dlvr_pair->cols, i); - for (uint32 j = 0; j < dlvr_pair->values.count; j++) { - range = (plan_range_t *)cm_galist_get(&dlvr_pair->values, j); - CT_RETURN_IFERR(sql_generate_fv_cond(stmt, col, range, cond)); + while (i < pred->cols.count) { + col = (expr_tree_t *)cm_galist_get(&pred->cols, i++); + for (uint32 j = 0; j < pred->values.count; j++) { + range = (plan_range_t *)cm_galist_get(&pred->values, j); + CT_RETURN_IFERR(sql_generate_filter_cond(stmt, col, range, cond)); } } + return CT_SUCCESS; } -static status_t sql_generate_ff_dlvr_conds(sql_stmt_t *stmt, cond_tree_t *cond, dlvr_pair_t *dlvr_pair) +static status_t sql_pred_rebuild_join_conds(sql_stmt_t *stmt, cond_tree_t *cond, pred_node_t *pred) { expr_tree_t *left = NULL; expr_tree_t *right = NULL; plan_range_t *range = NULL; bool32 has_filter_cond = CT_FALSE; - for (uint32 j = 0; j < dlvr_pair->values.count; j++) { - range = (plan_range_t *)cm_galist_get(&dlvr_pair->values, j); + + for (uint32 j = 0; j < pred->values.count; j++) { + range = (plan_range_t *)cm_galist_get(&pred->values, j); if (range->type == RANGE_POINT) { has_filter_cond = CT_TRUE; break; } } - for (uint32 i = 0; i < dlvr_pair->cols.count - 1; i++) { - left = (expr_tree_t *)cm_galist_get(&dlvr_pair->cols, i); - for (uint32 j = i + 1; j < dlvr_pair->cols.count; j++) { - right = (expr_tree_t *)cm_galist_get(&dlvr_pair->cols, j); - CT_RETURN_IFERR(sql_generate_ff_cond(stmt, left, right, cond, has_filter_cond)); + for (uint32 i = 0; i < pred->cols.count - 1; i++) { + left = (expr_tree_t *)cm_galist_get(&pred->cols, i); + for (uint32 j = i + 1; j < pred->cols.count; j++) { + right = (expr_tree_t *)cm_galist_get(&pred->cols, j); + CT_RETURN_IFERR(sql_generate_join_cond(stmt, left, right, cond, has_filter_cond)); } } return CT_SUCCESS; } -static inline status_t sql_generate_dlvr_cond(sql_stmt_t *stmt, cond_tree_t *cond, dlvr_pair_t *dlvr_pair) +static inline status_t sql_pred_rebuild_conds(sql_stmt_t *stmt, cond_tree_t *cond, pred_node_t *pred) { // generate join condition - CT_RETURN_IFERR(sql_generate_ff_dlvr_conds(stmt, cond, dlvr_pair)); + CT_RETURN_IFERR(sql_pred_rebuild_join_conds(stmt, cond, pred)); // generate filter condition - return sql_generate_fv_dlvr_conds(stmt, cond, dlvr_pair); -} - -static inline status_t sql_generate_dlvr_conds(sql_stmt_t *stmt, cond_tree_t *cond, galist_t *pairs) -{ - dlvr_pair_t *dlvr_pair = NULL; - - for (uint32 i = 0; i < pairs->count; i++) { - dlvr_pair = (dlvr_pair_t *)cm_galist_get(pairs, i); - CT_RETURN_IFERR(sql_generate_dlvr_cond(stmt, cond, dlvr_pair)); - } - return CT_SUCCESS; -} - -static status_t sql_generate_dlvr_pairs(sql_stmt_t *stmt, sql_query_t *query, expr_tree_t *l_col, galist_t *values, - galist_t *ff_pairs, galist_t *dlvr_pairs) -{ - bool32 is_found = CT_FALSE; - dlvr_pair_t *ff_pair = NULL; - dlvr_pair_t *dlvr_pair = NULL; - - for (uint32 i = 0; i < ff_pairs->count; i++) { - ff_pair = (dlvr_pair_t *)cm_galist_get(ff_pairs, i); - if (sql_dlvr_pair_exists_col(l_col, ff_pair)) { - is_found = CT_TRUE; - break; - } - } - - if (!is_found) { - return CT_SUCCESS; - } - - CT_RETURN_IFERR(sql_init_dlvr_pair(stmt, query, &dlvr_pair, dlvr_pairs)); - - for (uint32 i = 0; i < ff_pair->cols.count; i++) { - expr_tree_t *r_col = (expr_tree_t *)cm_galist_get(&ff_pair->cols, i); - if (EXPR_TAB(r_col) == EXPR_TAB(l_col) && EXPR_COL(r_col) == EXPR_COL(l_col)) { - continue; - } - CT_RETURN_IFERR(sql_dlvr_pair_add_col(r_col, dlvr_pair)); - } - - for (uint32 i = 0; i < values->count; i++) { - plan_range_t *range = (plan_range_t *)cm_galist_get(values, i); - CT_RETURN_IFERR(cm_galist_insert(&dlvr_pair->values, range)); - } - return CT_SUCCESS; -} - -static inline status_t sql_try_generate_dlvr_pairs(sql_stmt_t *stmt, sql_query_t *query, galist_t *ff_pairs, - galist_t *fv_pairs, galist_t *dlvr_pairs) -{ - for (uint32 i = 0; i < fv_pairs->count; i++) { - dlvr_pair_t *pair = (dlvr_pair_t *)cm_galist_get(fv_pairs, i); - if (pair->values.count == 0) { - continue; - } - - for (uint32 j = 0; j < pair->cols.count; j++) { - expr_tree_t *col = (expr_tree_t *)cm_galist_get(&pair->cols, j); - CT_RETURN_IFERR(sql_generate_dlvr_pairs(stmt, query, col, &pair->values, ff_pairs, dlvr_pairs)); - } - } - return CT_SUCCESS; + return sql_pred_rebuild_filter_conds(stmt, cond, pred); } static inline bool32 sink_oper_remove_node(sql_stmt_t *stmt, cond_node_t *cond, biqueue_t *cond_que) @@ -1180,6 +989,950 @@ static inline bool32 check_exists_subslct_with_index(const cmp_node_t *cmp, cons return CT_FALSE; } +static status_t ct_cond_list_init(sql_stmt_t *stmt, galist_t **cmp_list_ptr, galist_t **and_list_ptr) +{ + if (cmp_list_ptr == NULL || and_list_ptr == NULL) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] cmp_list_ptr or and_list_ptr is invalid."); + return CT_ERROR; + } + if (sql_push(stmt, sizeof(galist_t), (void **)cmp_list_ptr) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Cmp_list push failed."); + return CT_ERROR; + } + cm_galist_init(*cmp_list_ptr, stmt, sql_stack_alloc); + + if (sql_push(stmt, sizeof(galist_t), (void **)and_list_ptr) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] And_list push failed."); + return CT_ERROR; + } + cm_galist_init(*and_list_ptr, stmt, sql_stack_alloc); + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Cond_list init success."); + return CT_SUCCESS; +} + +// stack dynamic expansion +static status_t dynamic_stack_expand(sql_stmt_t *stmt, dynamic_stack_t *stack) +{ + if (stack == NULL) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Stack is NULL."); + return CT_ERROR; + } + + size_t new_cap = stack->capacity * STACK_EXPAND_FACTOR; + cond_node_t **new_nodes = NULL; + cond_node_t **old_nodes = stack->nodes; + + new_nodes = (cond_node_t **)malloc(sizeof(cond_node_t *) * new_cap); + if (new_nodes == NULL) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Stack malloc failed."); + return CT_ERROR; + } + + MEMS_RETURN_IFERR( + memcpy_s(new_nodes, sizeof(cond_node_t *) * new_cap, stack->nodes, sizeof(cond_node_t *) * stack->top)); + + stack->nodes = new_nodes; + stack->capacity = new_cap; + if (old_nodes != NULL) { + free(old_nodes); + } + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Stack expanded to capacity:%zu", new_cap); + return CT_SUCCESS; +} + +// flatten the condition tree to a list(Preorder traversal) +static inline status_t ctsql_flatten_cond_node(sql_stmt_t *stmt, cond_node_t *cond, galist_t *cond_list, + galist_t *and_list) +{ + CT_RETURN_IFERR(sql_stack_safe(stmt)); + + // Initialize queue + dynamic_stack_t stack = { 0 }; + stack.capacity = STACK_INIT_CAPACITY; + + stack.nodes = (cond_node_t **)malloc(sizeof(cond_node_t *) * stack.capacity); + if (stack.nodes == NULL) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Stack malloc failed."); + return CT_ERROR; + } + + if (cond == NULL) { + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] NULL condition, condition flatten is invalid."); + return CT_SUCCESS; + } + + // Enqueue initial condition + stack.nodes[stack.top++] = cond; + + while (stack.top > 0) { + if (stack.top > stack.capacity) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Condition stack out of range."); + return CT_ERROR; + } + --stack.top; + cond_node_t *current = stack.nodes[stack.top]; + + if (current->type == COND_NODE_AND) { + // Process AND node + if ((cm_galist_insert(and_list, current)) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] AND list insert error."); + return CT_ERROR; + } + + // Expand queue capacity(reserve 20% free space) + if ((float)stack.top + STACK_LEVEL_NODES > (float)stack.capacity * STACK_EXPAND_THRESHOLD) { + CT_RETURN_IFERR(dynamic_stack_expand(stmt, &stack)); + } + + if (current->right != NULL) { + stack.nodes[stack.top++] = current->right; + } + if (current->left != NULL) { + stack.nodes[stack.top++] = current->left; + } + + } else { + if (cm_galist_insert(cond_list, current) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Condition list insert error."); + return CT_ERROR; + } + } + } + + if (stack.nodes!= NULL) { + free(stack.nodes); + } + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Flattened condition tree SUCCESS"); + return CT_SUCCESS; +} + +static bool32 check_expr_node_4_reorder(expr_node_t *expr_node) +{ + if (expr_node->type == EXPR_NODE_COLUMN || expr_node->type == EXPR_NODE_TRANS_COLUMN || + expr_node->type == EXPR_NODE_CONST || expr_node->type == EXPR_NODE_PARAM || + expr_node->type == EXPR_NODE_CSR_PARAM || expr_node->type == EXPR_NODE_RESERVED || + expr_node->type == EXPR_NODE_SELECT || expr_node->type == EXPR_NODE_PL_ATTR) { + return CT_TRUE; + } + if (expr_node->type == EXPR_NODE_V_ADDR) { + return sql_pair_type_is_plvar(expr_node); + } + return CT_FALSE; +} + +static bool32 check_expr_tree_4_reorder(expr_tree_t *expr_tree) +{ + for (; expr_tree != NULL; expr_tree = expr_tree->next) { + if (!check_expr_node_4_reorder(expr_tree->root)) { + return CT_FALSE; + } + } + return CT_TRUE; +} + +static bool32 check_cond_node(const cond_node_t *cond_node) +{ + if (cond_node->type != COND_NODE_COMPARE) { + return CT_FALSE; + } + if (check_expr_tree_4_reorder(cond_node->cmp->left) == CT_TRUE && + check_expr_tree_4_reorder(cond_node->cmp->right) == CT_TRUE) { + return CT_FALSE; + } + return CT_TRUE; +} + +// result: +// >=0: cond1 is better +// <0: cond2 is better +static status_t compare_cond_types(const cond_node_t *cond1, const cond_node_t *cond2, int32 *result) +{ + if (check_cond_node(cond1) == CT_TRUE) { + *result = 0; + return CT_SUCCESS; + } + if (check_cond_node(cond2) == CT_TRUE) { + *result = 0; + return CT_SUCCESS; + } + + // Compare different condition types + if (cond1->type != cond2->type) { + *result = g_cond_priority[(uint32)cond1->type] - g_cond_priority[(uint32)cond2->type]; + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Different condition types, result: %d", *result); + return CT_SUCCESS; + } + // Same condition types (non-CMP) + if (cond1->type != COND_NODE_COMPARE) { + *result = 0; + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Same condition types(Non-CMP), result: %d", *result); + return CT_SUCCESS; + } + + return CT_ERROR; +} + +static inline bool32 ctsql_is_simple_expr(expr_tree_t *expr) +{ + if (expr->next != NULL) { + return CT_FALSE; + } + + if (expr->root->type == EXPR_NODE_COLUMN || expr->root->type == EXPR_NODE_CONST || + expr->root->type == EXPR_NODE_PARAM || expr->root->type == EXPR_NODE_CSR_PARAM || + expr->root->type == EXPR_NODE_RESERVED || expr->root->type == EXPR_NODE_TRANS_COLUMN) { + return CT_TRUE; + } + return CT_FALSE; +} + +// Check if left side is a simple expression and right side is NULL or constant/bound parameter +static inline bool32 check_single_col(const cmp_node_t *cmp, const expr_tree_t *left_expr, const expr_tree_t *right_expr) +{ + if (left_expr == NULL) { + return CT_FALSE; + } + if (!ctsql_is_simple_expr(cmp->left)) { + return CT_FALSE; + } + return right_expr == NULL || sql_is_single_const_or_param(right_expr->root); +} + +static inline bool32 ctsql_check_single_col_filter_cond(const cmp_node_t *cmp1, const cmp_node_t *cmp2, + const expr_tree_t *l1, const expr_tree_t *l2, + const expr_tree_t *r1, const expr_tree_t *r2, int32 *result) +{ + if (check_single_col(cmp1, l1, r1)) { + *result = 0; + return CT_TRUE; + } + if (check_single_col(cmp2, l2, r2)) { + *result = 1; + return CT_TRUE; + } + + return CT_FALSE; +} + +// Equality condition with both sides as simple columns +static inline bool32 ctsql_check_cmp_join_cond(const cmp_node_t *cmp1, const cmp_node_t *cmp2, const expr_tree_t *l1, + const expr_tree_t *l2, const expr_tree_t *r1, const expr_tree_t *r2, + int32 *result) +{ + if (cmp1->type == CMP_TYPE_EQUAL && l1->root->type == EXPR_NODE_COLUMN && r1->root->type == EXPR_NODE_COLUMN) { + *result = 0; + return CT_TRUE; + } + if (cmp2->type == CMP_TYPE_EQUAL && l2->root->type == EXPR_NODE_COLUMN && r2->root->type == EXPR_NODE_COLUMN) { + *result = 1; + return CT_TRUE; + } + + return CT_FALSE; +} + +static inline bool32 is_single_select_expr(const expr_tree_t *expr) +{ + return (expr != NULL) && (expr->root->type == EXPR_NODE_SELECT) && (expr->next == NULL); +} + +static inline bool32 is_valid_index_condition(sql_table_t *table) +{ + return (INDEX_ONLY_SCAN(table->scan_flag) || + ((table->index->primary || table->index->unique) && table->idx_equal_to == table->index->column_count)); +} + +static inline bool32 ctsql_check_exists_subslct_with_index(const expr_tree_t *l_expr, const expr_tree_t *r_expr, + int32 *result) +{ + if (l_expr != NULL || !is_single_select_expr(r_expr)) { + return CT_FALSE; + } + + sql_select_t *subslct = (sql_select_t *)r_expr->root->value.v_obj.ptr; + if (subslct->root->type != SELECT_NODE_QUERY) { + return CT_FALSE; + } + + if (subslct->first_query->tables.count > 1) { + return CT_FALSE; + } + sql_table_t *table = (sql_table_t *)sql_array_get(&subslct->first_query->tables, 0); + if (table->index != NULL && is_valid_index_condition(table)) { + *result = 1; + return CT_TRUE; + } + return CT_FALSE; +} + +bool32 is_cmp_same_datatype(ct_type_t l_type, ct_type_t r_type) +{ + if (CT_IS_NUMERIC_TYPE2(l_type, r_type) + || CT_IS_STRING_TYPE2(l_type, r_type) + || CT_IS_BINARY_TYPE2(l_type, r_type) + || CT_IS_DATETIME_TYPE2(l_type, r_type)) { + return CT_TRUE; + } + + return l_type == r_type; +} + +// create a new predicates node in list +status_t sql_init_pred_node(sql_stmt_t *stmt, sql_query_t *query, pred_node_t **node, galist_t *nodes) +{ + uint32 col_size; + sql_table_t *table = NULL; + uint32 tab_count = query->tables.count; + + CT_RETURN_IFERR(cm_galist_new(nodes, sizeof(pred_node_t), (void **)node)); + cm_galist_init(&(*node)->cols, stmt->session->stack, cm_stack_alloc); + cm_galist_init(&(*node)->values, stmt->session->stack, cm_stack_alloc); + CT_RETURN_IFERR(sql_stack_alloc(stmt, tab_count * sizeof(int16 *), (void **)&(*node)->col_map)); + + for (uint32 i = 0; i < tab_count; i++) { + table = (sql_table_t *)sql_array_get(&query->tables, i); + col_size = sql_get_table_column_count(stmt, table) * sizeof(int16); + CT_RETURN_IFERR(sql_stack_alloc(stmt, col_size, (void **)&(*node)->col_map[i])); + MEMS_RETURN_IFERR(memset_s((*node)->col_map[i], col_size, 0, col_size)); + } + + return CT_SUCCESS; +} + +status_t ctsql_pred_add_col(pred_node_t *pred, expr_tree_t *column) +{ + int16 tab_id = EXPR_TAB(column); + int16 col_id = EXPR_COL(column); + if (pred->col_map[tab_id][col_id] == 1) { + return CT_SUCCESS; + } + pred->col_map[tab_id][col_id] = 1; + return cm_galist_insert(&pred->cols, column); +} + +bool32 ctsql_pred_exist_col(pred_node_t *pred, expr_tree_t *column) +{ + return pred->col_map[EXPR_TAB(column)][EXPR_COL(column)] == 1; +} + +bool32 ctsql_pred_exist_range(sql_stmt_t *stmt, pred_node_t *pred, plan_range_t *new_range) +{ + plan_range_t *range = NULL; + if (new_range->type != RANGE_POINT) { + return CT_FALSE; + } + + for (uint32 i = 0; i < pred->values.count; i++) { + range = (plan_range_t *)cm_galist_get(&pred->values, i); + if (range->type == RANGE_POINT + && if_dlvr_border_equal(stmt, &range->left, &new_range->left)) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +status_t ctsql_pred_merge_two(sql_stmt_t *stmt, pred_node_t *src_pred, pred_node_t *dst_pred, bool32 *is_conflict) +{ + plan_range_t *range = NULL; + expr_tree_t *col = NULL; + + for (uint32 i = 0; i < src_pred->values.count; i++) { + range = (plan_range_t *)cm_galist_get(&src_pred->values, i); + CT_RETURN_IFERR(ctsql_pred_add_range(stmt, dst_pred, range, is_conflict)); + if (*is_conflict) { + return CT_SUCCESS; + } + } + + for (uint32 i = 0; i < src_pred->cols.count; i++) { + col = (expr_tree_t *)cm_galist_get(&src_pred->cols, i); + CT_RETURN_IFERR(ctsql_pred_add_col(dst_pred, col)); + } + + return CT_SUCCESS; +} + +status_t ctsql_try_dlvr_preds(sql_stmt_t *stmt, sql_query_t *query, expr_tree_t *l_expr, expr_tree_t *r_expr, pred_node_t *dst_pred, + galist_t *preds, plan_range_t *new_range, uint32 start, bool32 *is_conflict) +{ + pred_node_t *pred = NULL; + + while (start < preds->count) { + pred = (pred_node_t *)cm_galist_get(preds, start); + if (ctsql_pred_exist_col(pred, l_expr) + || (r_expr != NULL && ctsql_pred_exist_col(pred, r_expr)) + || (new_range != NULL && ctsql_pred_exist_range(stmt, pred, new_range))) { + CT_RETURN_IFERR(ctsql_pred_merge_two(stmt, pred, dst_pred, is_conflict)); + if (*is_conflict) { + return CT_SUCCESS; + } + cm_galist_delete(preds, start); + continue; + } + start++; + } + return CT_SUCCESS; +} + +status_t ctsql_collect_join_preds(sql_stmt_t *stmt, sql_query_t *query, cond_node_t *cond, dlvr_info_t *dlvr_info, + bool32 *is_conflict) +{ + galist_t *preds = &dlvr_info->pred_nodes; + expr_tree_t *l_expr = cond->cmp->left; + expr_tree_t *r_expr = cond->cmp->right; + pred_node_t *pred = NULL; + // t1.a = t1.a => t1.a IS NOT NULL + if (EXPR_TAB(l_expr) == EXPR_TAB(r_expr) && EXPR_COL(l_expr) == EXPR_COL(r_expr)) { + cond->cmp->type = CMP_TYPE_IS_NOT_NULL; + cond->cmp->right = NULL; + return cm_galist_insert(&dlvr_info->graft_nodes, cond); + } + /* + If the current condition can be merged into the current node, then this condition may cause all + the subsequent nodes to be merged. Therefore, after inserting the current condition into the + current node, it will iterate through the conditions in the subsequent nodes and attempt to merge them. + */ + for (uint32 i = 0; i < preds->count; ++i) { + pred = (pred_node_t *)cm_galist_get(preds, i); + if (ctsql_pred_exist_col(pred, l_expr) || ctsql_pred_exist_col(pred, r_expr)) { + CT_RETURN_IFERR(ctsql_pred_add_col(pred, l_expr)); + CT_RETURN_IFERR(ctsql_pred_add_col(pred, r_expr)); + return ctsql_try_dlvr_preds(stmt, query, l_expr, r_expr, pred, preds, NULL, i + 1, is_conflict); + } + } + // add new pred node + pred = NULL; + CT_RETURN_IFERR(sql_init_pred_node(stmt, query, &pred, preds)); + CT_RETURN_IFERR(ctsql_pred_add_col(pred, l_expr)); + return ctsql_pred_add_col(pred, r_expr); +} + +// filter condition +status_t ctsql_collect_filter_preds(sql_stmt_t *stmt, sql_query_t *query, cond_node_t *cond, dlvr_info_t *dlvr_info, + bool32 *is_conflict) +{ + galist_t *preds = &dlvr_info->pred_nodes; + expr_tree_t *col = NULL; + expr_tree_t *val = NULL; + plan_range_t *new_range = NULL; + pred_node_t *pred = NULL; + bool32 is_dlvr = CT_FALSE; + bool32 is_merge = CT_FALSE; + bool32 is_col_exist; + bool32 is_range_exist; + + if (!IS_LOCAL_COLUMN(cond->cmp->left)) { + col = cond->cmp->right; + val = cond->cmp->left; + cond->cmp->type = sql_reverse_cmp(cond->cmp->type); + } else { + col = cond->cmp->left; + val = cond->cmp->right; + } + + CT_RETURN_IFERR(is_val_can_dlvr(stmt, val, &is_dlvr)); + if (!is_dlvr) { + return cm_galist_insert(&dlvr_info->graft_nodes, cond); + } + + CT_RETURN_IFERR(sql_dlvr_make_range(stmt, TREE_DATATYPE(col), cond->cmp->type, val, &new_range)); + + for (uint32 i = 0; i < preds->count; ++i) { + pred = (pred_node_t *)cm_galist_get(preds, i); + is_col_exist = ctsql_pred_exist_col(pred, col); + is_range_exist = ctsql_pred_exist_range(stmt, pred, new_range); + // current cond already exist in pred_node, do nothing + if (is_col_exist && is_range_exist) { + return CT_SUCCESS; + } else if (is_col_exist) { + is_merge = CT_TRUE; + CT_RETURN_IFERR(ctsql_pred_add_range(stmt, pred, new_range, is_conflict)); + if (*is_conflict) { + return CT_SUCCESS; + } + // have repeat pull a = c and a = 1 and b = c , c is ancestor column + CT_RETURN_IFERR(ctsql_dlvr_pull_ancestor_conds(stmt, query, pred, new_range, is_conflict)); + if (*is_conflict) { + return CT_SUCCESS; + } + } else if (is_range_exist) { + is_merge = CT_TRUE; + CT_RETURN_IFERR(ctsql_pred_add_col(pred, col)); + } + if (is_merge) { + CT_RETURN_IFERR(ctsql_try_dlvr_preds(stmt, query, col, NULL, pred, preds, new_range, i + 1, is_conflict)); + return CT_SUCCESS; + } + } + // add new pred node + pred = NULL; + CT_RETURN_IFERR(sql_init_pred_node(stmt, query, &pred, preds)); + CT_RETURN_IFERR(cm_galist_insert(&pred->values, new_range)); + CT_RETURN_IFERR(ctsql_pred_add_col(pred, col)); + + return ctsql_dlvr_pull_ancestor_conds(stmt, query, pred, new_range, is_conflict); +} + +bool32 is_support_dlvr_cmp_node(sql_query_t *query, cmp_node_t *cmp) +{ + cols_used_t l_used, r_used; + if (cmp->left == NULL || cmp->right == NULL) { + return CT_FALSE; + } + // left and right nodes both have ancestor column + if (!IS_LOCAL_COLUMN(cmp->left) && !IS_LOCAL_COLUMN(cmp->right)) { + return CT_FALSE; + } + + if (!is_cmp_same_datatype(TREE_DATATYPE(cmp->left), TREE_DATATYPE(cmp->right))) { + return CT_FALSE; + } + // a = b and a LIKE "%f" x=> b = LIKE "%f" + if ((IS_LOCAL_COLUMN(cmp->left) && TREE_DATATYPE(cmp->left) == CT_TYPE_CHAR) || + (IS_LOCAL_COLUMN(cmp->right) && TREE_DATATYPE(cmp->right) == CT_TYPE_CHAR)) { + return CT_FALSE; + } + + if (has_semi_in_cmp_node(query, cmp)) { + return CT_FALSE; + } + init_cols_used(&l_used); + init_cols_used(&r_used); + sql_collect_cols_in_expr_tree(cmp->left, &l_used); + sql_collect_cols_in_expr_tree(cmp->right, &r_used); + // subselect can't be deliver + if (HAS_SUBSLCT(&l_used) || HAS_SUBSLCT(&r_used)) { + return CT_FALSE; + } + + // TODO: add more condition check + return cmp->type == CMP_TYPE_EQUAL; +} + +status_t ctsql_collect_preds_in_cmp(sql_stmt_t *stmt, sql_query_t *query, cond_node_t *cond, dlvr_info_t *dlvr_info, + bool32 *is_conflict) +{ + if (!is_support_dlvr_cmp_node(query, cond->cmp)) { + return cm_galist_insert(&dlvr_info->graft_nodes, cond); + } + // at least one expr is LOCAL_COLUMN + if (IS_LOCAL_COLUMN(cond->cmp->left) && IS_LOCAL_COLUMN(cond->cmp->right)) { + CT_RETSUC_IFTRUE(dlvr_info->dlvr_mode == DLVR_FILTER_COND); + return ctsql_collect_join_preds(stmt, query, cond, dlvr_info, is_conflict); + } else { + CT_RETSUC_IFTRUE(dlvr_info->dlvr_mode == DLVR_JOIN_COND); + return ctsql_collect_filter_preds(stmt, query, cond, dlvr_info, is_conflict); + } + + return CT_SUCCESS; +} + +status_t ctsql_get_pred_dlvr_info(sql_stmt_t *stmt, sql_query_t *query, cond_node_t *cond, dlvr_info_t *dlvr_info, + bool32 *is_conflict) +{ + CT_RETURN_IFERR(sql_stack_safe(stmt)); + + if (cond->type == COND_NODE_AND) { + CT_RETURN_IFERR(ctsql_get_pred_dlvr_info(stmt, query, cond->left, dlvr_info, is_conflict)); + // if current cond is conflict means whole condition is FALSE. + if (*is_conflict) { + return CT_SUCCESS; + } + return ctsql_get_pred_dlvr_info(stmt, query, cond->right, dlvr_info, is_conflict); + } else if (cond->type == COND_NODE_OR) { + return cm_galist_insert(&dlvr_info->graft_nodes, cond); + } else if (cond->type == COND_NODE_COMPARE) { + return ctsql_collect_preds_in_cmp(stmt, query, cond, dlvr_info, is_conflict); + } + return CT_SUCCESS; +} + +status_t sql_preds_rebuild_cond_tree(sql_stmt_t *stmt, sql_query_t *query, cond_tree_t *cond, dlvr_info_t *dlvr_info) +{ + galist_t *preds = &dlvr_info->pred_nodes; + galist_t *graft = &dlvr_info->graft_nodes; + pred_node_t *pred = NULL; + cond_node_t *node = NULL; + + if (preds->count == 0) { + return CT_SUCCESS; + } + cond->root = NULL; + // rebuild cond tree + for (uint32 i = 0; i < preds->count; i++) { + pred = (pred_node_t *)cm_galist_get(preds, i); + CT_RETURN_IFERR(sql_pred_rebuild_conds(stmt, cond, pred)); + } + // graft to current tree + for (uint32 i = 0; i < graft->count; ++i) { + node = (cond_node_t *)cm_galist_get(graft, i); + CT_RETURN_IFERR(sql_add_cond_node_left(cond, node)); + } + return CT_SUCCESS; +} + +static inline void ctsql_init_dlvr_info(sql_stmt_t *stmt, dlvr_info_t *dlvr_info, dlvr_mode_t mode) +{ + cm_galist_init(&dlvr_info->pred_nodes, stmt->session->stack, cm_stack_alloc); + cm_galist_init(&dlvr_info->graft_nodes, stmt->session->stack, cm_stack_alloc); + dlvr_info->dlvr_mode = mode; +} + +static status_t ctsql_try_add_value_in_join_pred(sql_stmt_t *stmt, pred_node_t *dlvr_pred, galist_t *filter_preds, expr_tree_t *col, + bool32 *is_conflict) +{ + pred_node_t *pred = NULL; + plan_range_t *range = NULL; + for (uint32 i = 0; i < filter_preds->count; i++) { + pred = (pred_node_t *)cm_galist_get(filter_preds, i); + if (!ctsql_pred_exist_col(pred, col)) { + continue; + } + for (uint32 j = 0; j < pred->values.count; j++) { + range = (plan_range_t *)cm_galist_get(&pred->values, j); + CT_RETURN_IFERR(ctsql_pred_add_range(stmt, dlvr_pred, range, is_conflict)); + //TODO: consider is_conflict + } + } + + return CT_SUCCESS; +} + +static status_t ctsql_dlvr_predicates_on_join_inter(sql_stmt_t *stmt, sql_query_t *query, sql_join_node_t *join_node) +{ + CT_RETSUC_IFTRUE(join_node->join_cond == NULL || query->cond == NULL); + dlvr_info_t filter_info; + dlvr_info_t dlvr_info; + cond_tree_t *cond = NULL; + pred_node_t *pred; + expr_tree_t *col; + bool32 is_conflict = CT_FALSE; + + CTSQL_SAVE_STACK(stmt); + ctsql_init_dlvr_info(stmt, &filter_info, DLVR_FILTER_COND); + cond = query->cond; + CT_RETURN_IFERR(ctsql_get_pred_dlvr_info(stmt, query, cond->root, &filter_info, &is_conflict)); + // TODO: consider is_conflict + if (join_node->filter != NULL) { + cond = join_node->filter; + CT_RETURN_IFERR(ctsql_get_pred_dlvr_info(stmt, query, cond->root, &filter_info, &is_conflict)); + // TODO: consider is_conflict + } + // if there has no filter, nothing could de delivered + if (filter_info.pred_nodes.count == 0) { + CTSQL_RESTORE_STACK(stmt); + return CT_SUCCESS; + } + + ctsql_init_dlvr_info(stmt, &dlvr_info, DLVR_JOIN_COND); + cond = join_node->join_cond; + CT_RETURN_IFERR(ctsql_get_pred_dlvr_info(stmt, query, cond->root, &dlvr_info, &is_conflict)); + // move all connect filter condition in join pred + for (uint32 i = 0; i < dlvr_info.pred_nodes.count; i++) { + pred = (pred_node_t *)cm_galist_get(&dlvr_info.pred_nodes, i); + for (uint32 j = 0; j < pred->cols.count; j++) { + col = (expr_tree_t *)cm_galist_get(&pred->cols, j); + CT_RETURN_IFERR(ctsql_try_add_value_in_join_pred(stmt, pred, &filter_info.pred_nodes, + col, &is_conflict)); + // TODO: consider join condition + } + } + + CT_RETURN_IFERR(sql_preds_rebuild_cond_tree(stmt, query, join_node->join_cond, &dlvr_info)); + + CTSQL_RESTORE_STACK(stmt); + return CT_SUCCESS; +} + +status_t ctsql_dlvr_predicates_on_join_iter(sql_stmt_t *stmt, sql_query_t *query, sql_join_node_t *join_node) +{ + CT_RETSUC_IFTRUE(join_node->type == JOIN_TYPE_NONE); + + CT_RETURN_IFERR(ctsql_dlvr_predicates_on_join_iter(stmt, query, join_node->left)); + CT_RETURN_IFERR(ctsql_dlvr_predicates_on_join_iter(stmt, query, join_node->right)); + + return ctsql_dlvr_predicates_on_join_inter(stmt, query, join_node); +} + +status_t ct_transf_predicate_delivery(sql_stmt_t *stmt, sql_query_t *query) +{ + // TODO : add hint operation + CT_LOG_DEBUG_INF("start predicates delivery."); + dlvr_info_t dlvr_info; + bool32 is_conflict = CT_FALSE; + cond_tree_t *cond = query->cond; + // The CONNECT BY statement has a hierarchical query relationship ,can't deliver + if (cond == NULL || query->connect_by_cond != NULL) { + return CT_SUCCESS; + } + CTSQL_SAVE_STACK(stmt); + SET_NODE_STACK_CURR_QUERY(stmt, query); + ctsql_init_dlvr_info(stmt, &dlvr_info, DLVR_ALL); + CT_RETURN_IFERR(ctsql_get_pred_dlvr_info(stmt, query, cond->root, &dlvr_info, &is_conflict)); + // all collected cond is AND, if has confilict condition means the whole tree is FALSE + if (is_conflict) { + CT_LOG_DEBUG_INF("current condition is confilict.line:%d, column:%d", query->loc.line, query->loc.column); + cond->root->type = COND_NODE_FALSE; + cond->rownum_upper = 0; + SQL_RESTORE_NODE_STACK(stmt); + CTSQL_RESTORE_STACK(stmt); + return CT_SUCCESS; + } + + CT_RETURN_IFERR(sql_preds_rebuild_cond_tree(stmt, query, cond, &dlvr_info)); + + SQL_RESTORE_NODE_STACK(stmt); + CTSQL_RESTORE_STACK(stmt); + return CT_SUCCESS; +} + +typedef bool32 (*cond_check_func_t)(const cmp_node_t *, const cmp_node_t *, const expr_tree_t *, const expr_tree_t *, + const expr_tree_t *, const expr_tree_t *, int32 *); + +static bool32 ctsql_check_exists_subslct_adapter(const cmp_node_t *c1, const cmp_node_t *c2, const expr_tree_t *l1, + const expr_tree_t *l2, const expr_tree_t *r1, const expr_tree_t *r2, + int32 *res) +{ + return ctsql_check_exists_subslct_with_index(l2, r2, res); +} + +static cond_check_func_t s_check_funcs[] = { + ctsql_check_single_col_filter_cond, // Single-column filter condition check + ctsql_check_cmp_join_cond, // Join condition check + ctsql_check_exists_subslct_adapter // Subquery index scan check +}; + +static status_t compare_cmp_nodes(const cmp_node_t *cmp1, const cmp_node_t *cmp2, int32 *result) +{ + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Same condition types(CMP)"); + + const expr_tree_t *l1 = cmp1->left; + const expr_tree_t *l2 = cmp2->left; + const expr_tree_t *r1 = cmp1->right; + const expr_tree_t *r2 = cmp2->right; + + for (uint32 i = 0; i < sizeof(s_check_funcs) / sizeof(s_check_funcs[0]); ++i) { + if (s_check_funcs[i](cmp1, cmp2, l1, l2, r1, r2, result) == CT_TRUE) { + return CT_SUCCESS; + } + } + return CT_ERROR; +} + +// Condition priority comparison function +static status_t ctsql_cond_cmp_func(const void *item1, const void *item2, int32 *result) +{ + const cond_node_t *cond1 = (const cond_node_t *)item1; + const cond_node_t *cond2 = (const cond_node_t *)item2; + + status_t status = compare_cond_types(cond1, cond2, result); + if (status == CT_SUCCESS) { + return status; + } + + status = compare_cmp_nodes(cond1->cmp, cond2->cmp, result); + if (status == CT_SUCCESS) { + return status; + } + + *result = 0; + return CT_SUCCESS; +} + +// Merge sort implementation for condition ordering +static status_t merge_sort(sql_stmt_t *stmt, galist_t *list, int32 l, int32 r, galist_cmp_func_t cmp_func) +{ + if (l >= r) { + return CT_SUCCESS; + } + + int32 mid = l + (r - l) / 2; + if (merge_sort(stmt, list, l, mid, cmp_func) != CT_SUCCESS) { + return CT_ERROR; + } + if (merge_sort(stmt, list, mid + 1, r, cmp_func) != CT_SUCCESS) { + return CT_ERROR; + } + + int k = 0, i = l, j = mid + 1; + void **tmp_arr = NULL; + int32 arr_size = r - l + 1; + + tmp_arr = (void **)malloc(sizeof(void *) * arr_size); + if (tmp_arr == NULL) { + CT_THROW_ERROR(ERR_ALLOC_MEMORY, sizeof(void *) * arr_size, "merge sort temp array"); + return CT_ERROR; + } + + while (i <= mid && j <= r) { + void *item1 = cm_galist_get(list, i); + void *item2 = cm_galist_get(list, j); + int32 cmp_result; + + if (cmp_func(item1, item2, &cmp_result) != CT_SUCCESS) { + return CT_ERROR; + } + + if (cmp_result <= 0) { + tmp_arr[k++] = item1; + i++; + } else { + tmp_arr[k++] = item2; + j++; + } + } + + while (i <= mid) { + tmp_arr[k++] = cm_galist_get(list, i++); + } + while (j <= r) { + tmp_arr[k++] = cm_galist_get(list, j++); + } + + for (int m = 0; m < k; m++) { + cm_galist_set(list, l + m, tmp_arr[m]); + } + + if (tmp_arr != NULL) { + free(tmp_arr); + } + return CT_SUCCESS; +} + +// Condition sorting (merge sort) +static inline status_t ct_galist_sort(sql_stmt_t *stmt, galist_t *list, galist_cmp_func_t cmp_func) +{ + if (list->count <= 1) { + return CT_SUCCESS; + } + + int32 l = 0; + int32 r = (int32)list->count - 1; + + if (merge_sort(stmt, list, l, r, cmp_func) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Merge sort failed."); + return CT_ERROR; + } + + return CT_SUCCESS; +} + +static status_t reorganise_recursive(uint32 index, cond_tree_t *cond_tree, cond_node_t *prev_and, cond_node_t *prev_cmp, + galist_t *cmp_list, galist_t *and_list) +{ + if (index >= cmp_list->count) { + return CT_SUCCESS; + } + + cond_node_t *cmp_node = (cond_node_t *)cm_galist_get(cmp_list, index); + cond_node_t *and_node = (cond_node_t *)cm_galist_get(and_list, index - 1); + + and_node->left = prev_cmp; + and_node->right = cmp_node; + + if (prev_and != NULL) { + prev_and->right = and_node; + } else { + cond_tree->root = and_node; + } + + return reorganise_recursive(index + 1, cond_tree, and_node, cmp_node, cmp_list, and_list); +} + +// Condition tree reorganization +static status_t ctsql_reorganise_cond_tree(sql_stmt_t *stmt, galist_t *cmp_list, galist_t *and_list, cond_tree_t *cond_tree) +{ + CT_RETURN_IFERR(sql_stack_safe(stmt)); + + if (cmp_list->count == 0) { + cond_tree->root = NULL; + return CT_SUCCESS; + } + + cond_node_t *first_cmp_node = (cond_node_t *)cm_galist_get(cmp_list, 0); + cond_tree->root = first_cmp_node; + + if (cmp_list->count == 1) { + return CT_SUCCESS; + } + + uint32 index = 1; + return reorganise_recursive(index, cond_tree, NULL, first_cmp_node, cmp_list, and_list); +} + +static void print_cond_list(galist_t *cond_list, const char *flag) +{ + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] %s rewrite, the order of conditions is:", flag); + for (uint32 i = 0; i < cond_list->count; i++) { + cond_node_t *node = (cond_node_t *)cm_galist_get(cond_list, i); + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] condition type: %d, condition ptr: %p", node->type, (void *)node); + } +} + +// Condition tree processing: flatten -> sort -> reorganize +static status_t ct_cond_rewrite(sql_stmt_t *stmt, sql_query_t *query, galist_t *cmp_list, galist_t *and_list) +{ + if (ctsql_flatten_cond_node(stmt, query->cond->root, cmp_list, and_list) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Condition tree flatten failed."); + return CT_ERROR; + } + print_cond_list(cmp_list, "Before"); + + if (ct_galist_sort(stmt, cmp_list, ctsql_cond_cmp_func) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Condition tree sort failed."); + return CT_ERROR; + } + if (ctsql_reorganise_cond_tree(stmt, cmp_list, and_list, query->cond) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Condition tree reorganise failed."); + return CT_ERROR; + } + print_cond_list(cmp_list, "After"); + + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Condition rewrite success."); + return CT_SUCCESS; +} + +status_t ct_cond_rewrite_4_chg_order(sql_stmt_t *stmt, sql_query_t *query) +{ + // TODO: HINT interface + // if (!hint_apply_opt_param(stmt->context, g_instance->sql.enable_pred_reorder, OPT_PARAM_PRED_REORDER)) { + // return CT_SUCCESS; + // } + + if (query->cond == NULL) { + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] NULL condition."); + return CT_SUCCESS; + } + + // Top-level condition must be AND type + if (query->cond->root->type != COND_NODE_AND) { + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Top condition is not AND."); + return CT_SUCCESS; + } + + galist_t *cmp_list = NULL; + galist_t *and_list = NULL; + + CTSQL_SAVE_STACK(stmt); + + if (ct_cond_list_init(stmt, &cmp_list, &and_list) != CT_SUCCESS) { + CTSQL_RESTORE_STACK(stmt); + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Condition list init failed."); + return CT_ERROR; + } + + if (ct_cond_rewrite(stmt, query, cmp_list, and_list) != CT_SUCCESS) { + CTSQL_RESTORE_STACK(stmt); + CT_LOG_RUN_ERR("[CONDITION_REWRITE] Condition rewrite failed."); + return CT_ERROR; + } + + CTSQL_RESTORE_STACK(stmt); + CT_LOG_DEBUG_INF("[CONDITION_REWRITE] Condition rewrite for change order success."); + return CT_SUCCESS; +} + #ifdef __cplusplus } #endif diff --git a/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.h b/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.h index 6ed6b08b15a06ee922fc67826ee3ad60effcff6c..9f377389e1c91061fe2723072d3e2266dbc21da6 100644 --- a/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.h +++ b/pkg/src/ctsql/optimizer/ctsql_cond_rewrite.h @@ -58,18 +58,11 @@ pulling up sub-link is equivalent to sub-query unnesting in Oracle. extern "C" { #endif -typedef enum en_collect_mode { - DLVR_COLLECT_FV, - DLVR_COLLECT_FF, - DLVR_COLLECT_ALL, -} collect_mode_t; - -typedef struct st_dlvr_pair { - uint32 *col_map[CT_MAX_JOIN_TABLES]; - galist_t cols; - galist_t values; -} dlvr_pair_t; - +typedef enum en_dlvr_mode { + DLVR_FILTER_COND, + DLVR_JOIN_COND, + DLVR_ALL, +} dlvr_mode_t; typedef struct st_push_assist { sql_query_t *p_query; uint32 ssa_count; @@ -79,17 +72,32 @@ typedef struct st_push_assist { } push_assist_t; #define DLVR_MAX_IN_COUNT 5 + +typedef struct st_predicates_node { + int16 **col_map; + galist_t cols; + galist_t values; +} pred_node_t; + +typedef struct st_dlvr_info { + dlvr_mode_t dlvr_mode; + galist_t pred_nodes; + galist_t graft_nodes; +} dlvr_info_t; + + status_t sql_predicate_push_down(sql_stmt_t *stmt, sql_query_t *query); -status_t sql_process_predicate_dlvr(sql_stmt_t *stmt, sql_query_t *query, cond_tree_t *cond); status_t sql_process_dlvr_join_tree_on(sql_stmt_t *stmt, sql_query_t *query, sql_join_node_t *join_tree); status_t sql_process_oper_or_sink(sql_stmt_t *stmt, cond_node_t **cond); status_t push_down_predicate(sql_stmt_t *stmt, cond_tree_t *cond, sql_table_t *table, select_node_t *slct, push_assist_t *push_assist); -status_t cond_rewrite_4_chg_order(sql_stmt_t *stmt, sql_query_t *query); +status_t ct_cond_rewrite_4_chg_order(sql_stmt_t *stmt, sql_query_t *query); status_t replace_group_expr_node(sql_stmt_t *stmt, expr_node_t **node); bool32 get_specified_level_query(sql_query_t *curr_query, uint32 level, sql_query_t **query, sql_select_t **subslct); status_t sql_update_query_ssa(sql_query_t *query); bool32 sql_can_expr_node_optm_by_hash(expr_node_t *node); +status_t ct_transf_predicate_delivery(sql_stmt_t *stmt, sql_query_t *query); +status_t ctsql_dlvr_predicates_on_join_iter(sql_stmt_t *stmt, sql_query_t *query, sql_join_node_t *join_node); #ifdef __cplusplus } #endif diff --git a/pkg/src/ctsql/optimizer/ctsql_distinct_rewrite.c b/pkg/src/ctsql/optimizer/ctsql_distinct_rewrite.c new file mode 100644 index 0000000000000000000000000000000000000000..8af4e3e5e0374356589e0ea9bb465a27e927b9a7 --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_distinct_rewrite.c @@ -0,0 +1,172 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_distinct_rewrite.c + * + * + * IDENTIFICATION + * src/ctsql/optimizer/ctsql_distinct_rewrite.c + * + * ------------------------------------------------------------------------- + */ + +#include "ctsql_distinct_rewrite.h" +#include "srv_instance.h" +#include "ctsql_hint_verifier.h" +#include "ctsql_cond_rewrite.h" +#include "dml_parser.h" + +static bool32 check_query_distinct_eliminated(sql_stmt_t *stmt, sql_query_t *query) +{ + galist_t *rs_columns = NULL; + if (query->has_distinct) { + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Parent query has distinct."); + rs_columns = query->distinct_columns; + } else if (query->owner != NULL && query->owner->type == SELECT_AS_LIST) { + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Parent query is SELECT_AS_LIST."); + rs_columns = query->rs_columns; + } else { + return CT_FALSE; + } + + uint32 idx = 0; + while (idx < rs_columns->count) { + rs_column_t *rs_col = (rs_column_t *)cm_galist_get(rs_columns, idx++); + // 查询列必须是 RS_COL_COLUMN类型(普通表字段) + if (rs_col->type == RS_COL_COLUMN) { + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] The query column must be RS_COL_COLUMN."); + continue; + } + + // 如果查询列为 RS_COL_CALCL 类型, 必须为常量或者常量表达式、绑定参数 + if (!sql_is_const_expr_tree(rs_col->expr) && !TREE_IS_BINDING_PARAM(rs_col->expr)) { + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] RS_COL_CALC is not const or binding param."); + return CT_FALSE; + } + } + + return CT_TRUE; +} + +static inline bool32 check_subquery_distinct_eliminatable(sql_query_t *subquery) +{ + if (!subquery->has_distinct) { // 子查询需要有 distinct + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Subquery have no distinct."); + return CT_FALSE; + } + + if (subquery->group_sets->count != 0) { // 子查询不能有 group by + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Subquery can't have group sets."); + return CT_FALSE; + } + + if (subquery->winsort_list->count != 0) { // 子查询不能有窗口函数 + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Subquery can't have window sort."); + return CT_FALSE; + } + + if (LIMIT_CLAUSE_OCCUR(&subquery->limit)) { // 子查询不能有 limit + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Subquery can't have limit."); + return CT_FALSE; + } + + if (ROWNUM_COND_OCCUR(subquery->cond)) { // 子查询不能有 rownum 条件 + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Subquery can't have rownum condition."); + return CT_FALSE; + } + + return CT_TRUE; +} + +static status_t ct_eliminate_query_distinct(sql_stmt_t *stmt, sql_query_t *query, sql_query_t *subquery) +{ + if (!check_query_distinct_eliminated(stmt, query)) { + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Query is not eliminatable."); + return CT_SUCCESS; + } + + if (!check_subquery_distinct_eliminatable(subquery)) { + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Subquery is not eliminatable."); + return CT_SUCCESS; + } + + /* + * 如果子查询有 order by,则会将 order by中满足条件的 group 类型的字段用它的 origin_ref 替换. + * 不过一般不会走到这一步,在之前的改写子查询表排序消除中,排序已经被消除. + */ + SET_NODE_STACK_CURR_QUERY(stmt, subquery); + uint32 idx = 0; + while (idx < subquery->sort_items->count) { + sort_item_t *item = (sort_item_t *)cm_galist_get(subquery->sort_items, idx++); + if (replace_group_expr_node(stmt, &item->expr->root) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[DISTINCT_ELIMATE] replace_group_expr_node failed."); + return CT_ERROR; + } + } + SQL_RESTORE_NODE_STACK(stmt); + + SWAP(galist_t *, subquery->rs_columns, subquery->distinct_columns); + cm_galist_reset(subquery->distinct_columns); // 清空 distinct_columns + subquery->has_distinct = CT_FALSE; + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Success on subquery %p", subquery); + + return CT_SUCCESS; +} + +status_t ct_transf_eliminate_distinct(sql_stmt_t *stmt, sql_query_t *query) +{ + if (sql_stack_safe(stmt) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[DISTINCT_ELIMATE] sql_stack_safe failed."); + return CT_ERROR; + } + + uint32 idx = 0; + while (idx < query->tables.count) { + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, idx++); + if (table == NULL) { + CT_LOG_RUN_ERR("[DISTINCT_ELIMATE] table is NULL."); + return CT_ERROR; + } + if (table->type != VIEW_AS_TABLE && table->type != SUBSELECT_AS_TABLE) { // 表类型为子查询表或视图 + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Skip table[%s] type:%d, not view/subquery.", table->name, table->type); + continue; + } + if (table->subslct_tab_usage != SUBSELECT_4_NORMAL_JOIN) { // 表用于 NORMAL JOIN + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Skip table[%s] sub_usage:%d, not normal join.", table->name, + table->subslct_tab_usage); + continue; + } + if (table->select_ctx->root->type != SELECT_NODE_QUERY) { // 子查询表中不能是集合类型的查询 + CT_LOG_DEBUG_INF("[DISTINCT_ELIMATE] Skip table[%s] root_type:%d, not plain query.", table->name, + table->select_ctx->root->type); + continue; + } + + sql_query_t *subquery = table->select_ctx->first_query; + + if (ct_transf_eliminate_distinct(stmt, subquery) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[DISTINCT_ELIMATE] ct_transf_eliminate_distinct failed."); + return CT_ERROR; + } + + if (ct_eliminate_query_distinct(stmt, query, subquery) != CT_SUCCESS) { + CT_LOG_RUN_ERR("[DISTINCT_ELIMATE] ct_eliminate_query_distinct failed."); + return CT_ERROR; + } + } + + return CT_SUCCESS; +} \ No newline at end of file diff --git a/pkg/src/ctsql/optimizer/ctsql_distinct_rewrite.h b/pkg/src/ctsql/optimizer/ctsql_distinct_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..c7d147353ab0c0c809faa21799ce06e7f311fc19 --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_distinct_rewrite.h @@ -0,0 +1,41 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_distinct_rewrite.h + * + * + * IDENTIFICATION + * src/ctsql/optimizer/ctsql_distinct_rewrite.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef CTSQL_DISTINCT_REWRITE_H +#define CTSQL_DISTINCT_REWRITE_H + +#include "ctsql_stmt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +status_t ct_transf_eliminate_distinct(sql_stmt_t *stmt, sql_query_t *query); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/pkg/src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.c b/pkg/src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.c new file mode 100644 index 0000000000000000000000000000000000000000..c327fcb8e57b64dde24c082d776579926108aada --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.c @@ -0,0 +1,422 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_hash_mtrl_rewrite.c + * + * + * IDENTIFICATION + * src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.c + * + * ------------------------------------------------------------------------- + */ +#include "ctsql_hash_mtrl_rewrite.h" +#include "ctsql_verifier.h" +#include "ctsql_aggr.h" +#include "ctsql_hint_verifier.h" +#include "ctsql_transform.h" +#include "dml_parser.h" +#include "ctsql_context.h" + +cond_check_strategy cond_check_strategies[] = { [COND_NODE_AND] = check_and_hash_condition, + [COND_NODE_OR] = check_or_hash_condition, + [COND_NODE_COMPARE] = check_compare_hash_condition }; + +static column_flags_t analyze_columns(cols_used_t *cols_used) +{ + return (column_flags_t){ .has_ancestor = HAS_PRNT_OR_ANCSTR_COLS(cols_used->flags), + .has_self = HAS_SELF_COLS(cols_used->flags), + .has_subquery = HAS_SUBSLCT(cols_used) }; +} + +static bool32 validate_column_mixing(column_flags_t left, column_flags_t right) +{ + // Both sides contain ancestor columns or neither side contains ancestor columns + if ((left.has_ancestor && right.has_ancestor) || (!left.has_ancestor && !right.has_ancestor)) { + return CT_FALSE; + } + // Subquery and ancestor columns are mixed + if ((left.has_ancestor && left.has_subquery) || (right.has_ancestor && right.has_subquery)) { + return CT_FALSE; + } + // One side contains both its own columns and ancestor columns + return !((left.has_ancestor && left.has_self) || (right.has_ancestor && right.has_self)); +} + +static inline status_t check_table_has_ancestor(visit_assist_t *visit_assist, expr_node_t **expr) +{ + if (((*expr)->type == EXPR_NODE_GROUP && NODE_VM_ANCESTOR(*expr) > 0) || + ((*expr)->type == EXPR_NODE_COLUMN && NODE_ANCESTOR(*expr) > 0)) { + visit_assist->result0 = CT_TRUE; + } + return CT_SUCCESS; +} + +static bool32 check_outer_join_node_cond(cond_tree_t *cond) +{ + if (cond == NULL) { + return CT_TRUE; + } + cols_used_t cols_used; + init_cols_used(&cols_used); + sql_collect_cols_in_expr_tree(cond->root, &cols_used); + return !HAS_SUBSLCT(&cols_used) && !HAS_PRNT_AND_ANCSTR_COLS(cols_used.flags); +} + +// Check if it is a regular aggregate function +static bool32 is_basic_aggr_func(const sql_func_t *func) +{ + switch (func->aggr_type) { + case AGGR_TYPE_SUM: + case AGGR_TYPE_MIN: + case AGGR_TYPE_MAX: + case AGGR_TYPE_COUNT: + case AGGR_TYPE_AVG: + return CT_TRUE; + default: + break; + } + return CT_FALSE; +} + +static bool32 ctsql_can_hash_mtrl_aggreation_support(sql_stmt_t *stmt, sql_query_t *query) +{ + for (uint32 i = 0; i < query->aggrs->count; i++) { + expr_node_t *aggr = (expr_node_t *)cm_galist_get(query->aggrs, i); + if (!is_basic_aggr_func(GET_AGGR_FUNC(aggr))) { + return CT_FALSE; + } + } + return CT_TRUE; +} + +static bool32 validate_outer_join_conditions(sql_join_node_t *join_node) +{ + if (join_node == NULL || join_node->type == JOIN_TYPE_NONE) { + return CT_TRUE; + } + if (!check_outer_join_node_cond(join_node->filter) || !check_outer_join_node_cond(join_node->join_cond)) { + return CT_FALSE; + } + return validate_outer_join_conditions(join_node->left) && validate_outer_join_conditions(join_node->right); +} + +static bool32 validate_subquery_for_hash_mtrl(sql_query_t *sub_query) +{ + // Ensure the subquery is a variant and contains parent columns + if (sub_query->owner->type != SELECT_AS_VARIANT || sub_query->owner->parent_refs->count == 0) { + return CT_FALSE; + } + + // Check for aggregate functions and grouping + if (sub_query->aggrs->count == 0 || HAS_GROUP_OR_WINDOW(sub_query)) { + return CT_FALSE; + } + + // Hierarchical query and conditional query + if (sub_query->cond == NULL || (sub_query->cond->incl_flags & SQL_INCL_ROWNUM) || + sub_query->connect_by_cond != NULL) { + return CT_FALSE; + } + + if ((sub_query->incl_flags & EXPR_INCL_ROWNUM) || (sub_query->incl_flags & RS_INCL_PRNT_OR_ANCSTR) || + (sub_query->incl_flags & RS_INCL_GROUPING)) { + return CT_FALSE; + } + if (sub_query->join_assist.outer_node_count > 0 && + !validate_outer_join_conditions(sub_query->join_assist.join_node)) { + return CT_FALSE; + } + + return CT_TRUE; +} + +static bool32 check_subquery_hash_rewrite_conditions(sql_query_t *sub_query) +{ + return !validate_subquery_for_hash_mtrl(sub_query) || ct_query_contains_table_ancestor(sub_query) || + detect_cross_level_dependency(sub_query); +} + +static bool32 validate_hash_filter_conditions(cond_node_t *node, bool32 *has_join_cond) +{ + if (node == NULL) { + return CT_TRUE; + } + if (node->type != COND_NODE_COMPARE && node->type != COND_NODE_AND && node->type != COND_NODE_OR) { + return CT_TRUE; + } + if (cond_check_strategies[node->type]) { + return cond_check_strategies[node->type](node, has_join_cond); + } + return CT_TRUE; +} + +static status_t can_perform_hash_mtrl_rewrite(sql_stmt_t *stmt, select_node_t *select_node, bool32 *is_rewrite) +{ + if (select_node == NULL || select_node->type != SELECT_NODE_QUERY) { + return CT_SUCCESS; + } + sql_query_t *sub_query = select_node->query; + if (sub_query == NULL || sub_query->cond == NULL) { + return CT_SUCCESS; + } + if (!hint_apply_opt_param(stmt->context, g_instance->sql.enable_hash_mtrl, OPT_PARAM_HASH_MTRL)) { + return CT_SUCCESS; + } + if (check_subquery_hash_rewrite_conditions(sub_query)) { + return CT_SUCCESS; + } + if (!ctsql_can_hash_mtrl_aggreation_support(stmt, sub_query)) { + return CT_SUCCESS; + } + bool32 has_join_cond = CT_FALSE; + if (!validate_hash_filter_conditions(sub_query->cond->root, &has_join_cond) || !has_join_cond) { + return CT_SUCCESS; + } + if (sql_get_group_plan_type(stmt, sub_query) != PLAN_NODE_HASH_GROUP) { + return CT_SUCCESS; + } + if (HAS_SPEC_TYPE_HINT(sub_query->hint_info, OPTIM_HINT, HINT_KEY_WORD_UNNEST | HINT_KEY_WORD_NO_UNNEST)) { + // TODO: sql_create_unnest_outline_by_hint + return CT_SUCCESS; + } + return can_rewrite_by_check_index(stmt, sub_query, CK_FOR_HASH_MTRL, is_rewrite); +} + +static status_t manage_expr_addition_for_hash(sql_stmt_t *stmt, sql_query_t *query, expr_tree_t *parent, + expr_tree_t *child) +{ + CT_RETURN_IFERR(cm_galist_insert(query->remote_keys, parent)); + group_set_t *group_set = NULL; + if (query->group_sets->count == 0) { + CT_RETURN_IFERR(cm_galist_new(query->group_sets, sizeof(group_set_t), (void **)&group_set)); + CT_RETURN_IFERR(sql_create_list(stmt, &group_set->items)); + } else { + group_set = (group_set_t *)cm_galist_get(query->group_sets, 0); + } + CT_RETURN_IFERR(cm_galist_insert(group_set->items, child)); + return CT_SUCCESS; +} + +static status_t extract_hash_mtrl_keys_from_cmp(cond_collect_helper_t *cond_collector, cond_node_t *node) +{ + cmp_node_t *cmp = node->cmp; + if (!cmp && cmp->type != CMP_TYPE_EQUAL) { + return CT_SUCCESS; + } + cols_used_t right_cols_used, left_cols_used; + init_cols_used(&left_cols_used); + init_cols_used(&right_cols_used); + sql_collect_cols_in_expr_tree(cmp->left, &left_cols_used); + sql_collect_cols_in_expr_tree(cmp->right, &right_cols_used); + if (!HAS_PRNT_OR_ANCSTR_COLS(left_cols_used.flags) && !HAS_PRNT_OR_ANCSTR_COLS(right_cols_used.flags)) { + return CT_SUCCESS; + } + node->type = COND_NODE_TRUE; + cond_collector->arg0 = CT_TRUE; + sql_query_t *query = (sql_query_t *)cond_collector->p_arg0; + if (HAS_PRNT_OR_ANCSTR_COLS(left_cols_used.flags)) { + manage_expr_addition_for_hash(cond_collector->stmt, query, cmp->left, cmp->right); + return CT_SUCCESS; + } + return manage_expr_addition_for_hash(cond_collector->stmt, query, cmp->right, cmp->left); +} + +static status_t extract_keys_for_hash_mtrl(sql_stmt_t *stmt, cond_node_t *cond, sql_query_t *query) +{ + cond_collect_helper_t cond_collector; + CTSQL_SAVE_STACK(stmt); + CT_RETURN_IFERR(cond_collector_init(&cond_collector, stmt, stmt, sql_stack_alloc)); + cond_collector.p_arg0 = (void *)query; + cond_collector.arg0 = CT_FALSE; + cond_collector.type = COLL_TYPE_IGNORE; + bool32 is_and = cond->type == COND_NODE_AND; + CT_RETURN_IFERR(traverse_and_collect_conds(&cond_collector, cond)); + if (!cond_collector.cond) { + return CT_SUCCESS; + } + for (uint32 i = 0; i < cond_collector.cond->count; i++) { + cond_node_t *cond_node = (cond_node_t *)cm_galist_get(cond_collector.cond, i); + CT_RETURN_IFERR(extract_hash_mtrl_keys_from_cmp(&cond_collector, cond_node)); + CT_BREAK_IF_TRUE(cond_collector.is_stoped); + } + if (is_and && cond_collector.arg0) { + CT_RETURN_IFERR(try_eval_logic_cond(stmt, cond)); + } + CTSQL_RESTORE_STACK(stmt); + return CT_SUCCESS; +} + +static status_t reset_query_state(sql_query_t *query) +{ + if (query->has_distinct) { + SWAP(galist_t *, query->rs_columns, query->distinct_columns); + cm_galist_reset(query->distinct_columns); + query->has_distinct = CT_FALSE; + } + query->cond_has_acstor_col = CT_FALSE; + cm_galist_reset(query->sort_items); + return CT_SUCCESS; +} + +static status_t rewrite_subquery_for_hash_processing(sql_stmt_t *stmt, select_node_t *select_node) +{ + bool32 is_rewrite = CT_FALSE; + CT_RETURN_IFERR(can_perform_hash_mtrl_rewrite(stmt, select_node, &is_rewrite)); + if (!is_rewrite) { + make_subquery_without_hash_join(stmt, select_node, CT_FALSE); + return CT_SUCCESS; + } + sql_query_t *query = select_node->query; + CT_RETURN_IFERR(sql_create_list(stmt, &query->remote_keys)); + CT_RETURN_IFERR(extract_keys_for_hash_mtrl(stmt, query->cond->root, query)); + return reset_query_state(query); +} + +static bool32 inspect_parent_reference(parent_ref_t *ref) +{ + galist_t *ref_columns = ref->ref_columns; + for (uint32 i = 0; i < ref_columns->count; i++) { + expr_node_t *col_ref = (expr_node_t *)cm_galist_get(ref->ref_columns, i); + col_ref = sql_get_origin_ref(col_ref); + if (ANCESTOR_OF_NODE(col_ref) > 1) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +static void disable_hash_join_for_table(sql_stmt_t *stmt, sql_query_t *query) +{ + for (uint32 i = 0; i < query->tables.count; i++) { + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, i); + if (!table) { + continue; + } + TABLE_CBO_SET_FLAG(table, SELTION_NO_HASH_JOIN); + if (table->type == SUBSELECT_AS_TABLE || table->type == VIEW_AS_TABLE) { + make_subquery_without_hash_join(stmt, table->select_ctx->root, CT_TRUE); + } + } +} + +void make_subquery_without_hash_join(sql_stmt_t *stmt, select_node_t *select_node, bool32 is_var) +{ + if (!select_node) { + return; + } + if (select_node->type != SELECT_NODE_QUERY) { + make_subquery_without_hash_join(stmt, select_node->left, is_var); + make_subquery_without_hash_join(stmt, select_node->right, is_var); + } else { + sql_query_t *query = select_node->query; + if (is_var || ((query->owner->type == SELECT_AS_VARIANT || query->owner->type == SELECT_AS_LIST) && + IF_USE_PARENT_COLS(query->owner))) { + disable_hash_join_for_table(stmt, query); + } + } +} + +bool32 check_and_hash_condition(cond_node_t *node, bool32 *has_join_cond) +{ + return validate_hash_filter_conditions(node->left, has_join_cond) && + validate_hash_filter_conditions(node->right, has_join_cond); +} + +bool32 check_or_hash_condition(cond_node_t *node, bool32 *has_join_cond) +{ + cols_used_t col_used; + init_cols_used(&col_used); + sql_collect_cols_in_cond(node, &col_used); + return !HAS_PRNT_OR_ANCSTR_COLS(col_used.flags); +} + +bool32 check_compare_hash_condition(cond_node_t *node, bool32 *has_join_cond) +{ + cols_used_t left_col_used, right_col_used; + init_cols_used(&left_col_used); + init_cols_used(&right_col_used); + sql_collect_cols_in_expr_tree(node->cmp->left, &left_col_used); + sql_collect_cols_in_expr_tree(node->cmp->right, &right_col_used); + if (node->cmp->type != CMP_TYPE_EQUAL) { + return CT_FALSE; + } + bool32 vaild = validate_column_mixing(analyze_columns(&left_col_used), analyze_columns(&right_col_used)); + if (vaild) { + *has_join_cond = CT_TRUE; + } + return vaild; +} + +bool32 detect_cross_level_dependency(sql_query_t *query) +{ + galist_t *parent_refs = query->owner->parent_refs; + for (uint32 i = 0; i < parent_refs->count; i++) { + parent_ref_t *current_ref = (parent_ref_t *)cm_galist_get(parent_refs, i); + if (inspect_parent_reference(current_ref)) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +bool32 ct_query_contains_table_ancestor(sql_query_t *query) +{ + visit_assist_t visit_assist; + sql_init_visit_assist(&visit_assist, NULL, query); + uint32 count = 0; + sql_table_t *table = NULL; + while (count < query->tables.count) { + table = (sql_table_t *)sql_array_get(&query->tables, count++); + expr_tree_t *tree = NULL; + if (table->type == FUNC_AS_TABLE) { + tree = table->func.args; + } else if (table->type == JSON_TABLE) { + tree = table->json_table_info->data_expr; + } else { + continue; + } + (void)visit_expr_tree(&visit_assist, tree, check_table_has_ancestor); + if (visit_assist.result0 != CT_INVALID_ID32) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +status_t ct_rewrite_subquery_using_hash(sql_stmt_t *stmt, sql_array_t *ssa) +{ + if (ssa == NULL || ssa->count <= 0) { + return CT_SUCCESS; + } + uint32 count = 0; + while (count < ssa->count) { + sql_select_t *select = (sql_select_t *)sql_array_get(ssa, count++); + if (select == NULL) { + continue; + } + CT_RETURN_IFERR(rewrite_subquery_for_hash_processing(stmt, select->root)); + } + return CT_SUCCESS; +} + +status_t ct_var_subquery_rewrite(sql_stmt_t *stmt, sql_query_t *query) +{ + // todo HAS_SPEC_TYPE_HINT(HINT_KEY_WORD_NO_REWRITE) + if (query == NULL) { + return CT_SUCCESS; + } + return ct_rewrite_subquery_using_hash(stmt, &(query->ssa)); +} diff --git a/pkg/src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.h b/pkg/src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..b05439f37544a9c905698fd7addda5065fbd05c9 --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.h @@ -0,0 +1,64 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_hash_mtrl_rewrite.h + * + * + * IDENTIFICATION + * src/ctsql/optimizer/ctsql_hash_mtrl_rewrite.h + * + * ------------------------------------------------------------------------- + */ +#ifndef __CTSQL_HASH_MTRL_REWRITE_H__ +#define __CTSQL_HASH_MTRL_REWRITE_H__ + +#include "ctsql_stmt.h" +#include "ctsql_expr.h" +#include "ctsql_cond.h" +#include "ctsql_func.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define HAS_PARENT_COLUMNS(select_context) ((select_context)->parent_refs->count > 0) +#define HAS_GROUP_OR_WINDOW(query) ((query)->group_sets->count > 0 || (query)->winsort_list->count > 0) + +typedef struct st_column_flags { + bool32 has_ancestor; + bool32 has_self; + bool32 has_subquery; +} column_flags_t; + +typedef bool32 (*cond_check_strategy)(cond_node_t *, bool32 *); + +bool32 check_and_hash_condition(cond_node_t *node, bool32 *join_cond); +bool32 check_or_hash_condition(cond_node_t *node, bool32 *join_cond); +bool32 check_compare_hash_condition(cond_node_t *node, bool32 *join_cond); + +status_t ct_rewrite_subquery_using_hash(sql_stmt_t *stmt, sql_array_t *ssa); +bool32 ct_query_contains_table_ancestor(sql_query_t *query); +bool32 detect_cross_level_dependency(sql_query_t *query); +bool32 check_and_hash_condition(cond_node_t *node, bool32 *join_cond); +bool32 check_or_hash_condition(cond_node_t *node, bool32 *join_cond); +void make_subquery_without_hash_join(sql_stmt_t *stmt, select_node_t *select_node, bool32 is_var); +status_t ct_var_subquery_rewrite(sql_stmt_t *stmt, sql_query_t *query); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/pkg/src/ctsql/optimizer/ctsql_proj_rewrite.c b/pkg/src/ctsql/optimizer/ctsql_proj_rewrite.c new file mode 100644 index 0000000000000000000000000000000000000000..a28cc67ca45efd06f776eee2c0b0c0be95838a9c --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_proj_rewrite.c @@ -0,0 +1,445 @@ +#include "ctsql_proj_rewrite.h" + +bool32 check_select_type_4_elimination(select_node_t *node) +{ + if (node->type == SELECT_NODE_QUERY) { + return CT_TRUE; + } else if (node->type == SELECT_NODE_UNION_ALL) { + return (check_select_type_4_elimination(node->left) && + check_select_type_4_elimination(node->right)); + } + return CT_FALSE; +} + +bool32 is_can_proj_eliminate(sql_table_t *table) +{ + if (table->view_dml || + (table->type != SUBSELECT_AS_TABLE && table->type != VIEW_AS_TABLE) || + table->subslct_tab_usage != SUBSELECT_4_NORMAL_JOIN) { + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] The table does not support proj elimination," + "type: %d, subslct_tab_usage: %d", table->type, table->subslct_tab_usage); + return CT_FALSE; + } + + select_node_t *node = table->select_ctx->root; + if (!check_select_type_4_elimination(node)) { + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] The node does not support proj elimination," + "type: %d", node->type); + return CT_FALSE; + } + + return CT_TRUE; +} + +static inline bool32 has_window_func_in_order_by(sql_query_t *query) +{ + uint32 i = 0; + while (i < query->sort_items->count) { + sort_item_t *item = (sort_item_t *)cm_galist_get(query->sort_items, i); + if (item->expr->root->type == EXPR_NODE_OVER) { + return CT_TRUE; + } + i++; + } + + return CT_FALSE; +} + +bool32 check_if_support_eliminate4proj(sql_query_t *query) +{ + if (NO_NEED_ELIMINATE(query)) { + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] The subquery does not support proj elimination," + "group sets: %d, has_distinct: %d, winsort_count: %d, ssa_count: %d", + query->group_sets->count, query->has_distinct, query->winsort_list->count, query->ssa.count); + return CT_FALSE; + } + + if (has_window_func_in_order_by(query)) { + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] The window function is referenced by ORDER BY," + "not support proj elimination."); + return CT_FALSE; + } + + return CT_TRUE; +} + +bool32 query_contains_rs_column(sql_table_t *table, uint32 col_id) +{ + bilist_node_t *node = cm_bilist_head(&table->query_fields); + for (; node != NULL; node = BINODE_NEXT(node)) { + query_field_t *field = BILIST_NODE_OF(query_field_t, node, bilist_node); + if (field->col_id < col_id) { + continue; + } + return field->col_id == col_id; + } + + return CT_FALSE; +} + +bool32 is_reserved_field(rs_column_t *rs_col) +{ + if (rs_col->type == RS_COL_COLUMN && + (rs_col->v_col.is_rowid || rs_col->v_col.is_rownodeid)) { + return CT_TRUE; + } + + if (rs_col->type == RS_COL_CALC) { + if (NODE_IS_RES_ROWID(rs_col->expr->root) || NODE_IS_RES_ROWNODEID(rs_col->expr->root) || + NODE_IS_RES_ROWNUM(rs_col->expr->root)) { + return CT_TRUE; + } + } + + if (!CT_BIT_TEST(rs_col->rs_flag, RS_EXIST_ALIAS)) { + if (cm_text_str_equal(&rs_col->name, "ROWID") || cm_text_str_equal(&rs_col->name, "ROWNODEID") || + cm_text_str_equal(&rs_col->name, "ROWSCN")) { + return CT_TRUE; + } + } + + return CT_FALSE; +} + +status_t replace_rs_col_with_null(sql_stmt_t *stmt, rs_column_t *rs_col) +{ + if (sql_create_expr(stmt, &rs_col->expr) != CT_SUCCESS) { + return CT_ERROR; + } + + expr_node_t *root = NULL; + if (sql_alloc_mem(stmt->context, sizeof(expr_node_t), (void **)&root) != CT_SUCCESS) { + return CT_ERROR; + } + + root->owner = rs_col->expr; + root->value.v_res.res_id = RES_WORD_NULL; + root->datatype = CT_DATATYPE_OF_NULL; + root->type = EXPR_NODE_RESERVED; + rs_col->expr->root = root; + rs_col->datatype = CT_DATATYPE_OF_NULL; + rs_col->type = RS_COL_CALC; + return CT_SUCCESS; +} + +status_t eliminate_proj_in_expr(visit_assist_t *visit_assist, expr_node_t **node); +status_t handle_column_proj_elimination(visit_assist_t *visit_assist, expr_node_t **node) +{ + var_column_t *v_col = &(*node)->value.v_col; + if (v_col->ancestor != 0) { + visit_assist->result0 = CT_FALSE; + return; + } + + bool32 is_valid_table = (visit_assist->result1 == CT_INVALID_ID32 || visit_assist->result1 == v_col->tab); + if (!visit_assist->result0 && !is_valid_table) { + visit_assist->result0 = CT_FALSE; + } + visit_assist->result1 = v_col->tab; + sql_table_uncache_query_field(visit_assist->param0, v_col); + return CT_SUCCESS; +} + +status_t eliminate_winsort_rs_col(visit_assist_t *visit_assist, sql_query_t *query, expr_node_t **node) +{ + uint32 node_id = VALUE_PTR(var_vm_col_t, &(*node)->value)->id; + rs_column_t *win_rs_col = (rs_column_t *)cm_galist_get(query->winsort_rs_columns, node_id); + + if (win_rs_col->type == RS_COL_COLUMN && win_rs_col->v_col.ancestor > 0) { + return CT_SUCCESS; + } + + if (win_rs_col->type == RS_COL_COLUMN) { + sql_table_uncache_query_field(query, &win_rs_col->v_col); + win_rs_col->win_rs_refs--; + if (win_rs_col->win_rs_refs == 0) { + CT_RETURN_IFERR(replace_rs_col_with_null(visit_assist->stmt, win_rs_col)); + } + } else if (win_rs_col->expr && win_rs_col->expr->root) { + CT_RETURN_IFERR(visit_expr_node(visit_assist, &win_rs_col->expr->root, eliminate_proj_in_expr)); + } + + return CT_SUCCESS; +} + +status_t handle_group_proj_elimination(visit_assist_t *visit_assist, expr_node_t **node) +{ + if (NODE_VM_ANCESTOR(*node)) { + return CT_SUCCESS; + } + + sql_query_t *query = (sql_query_t *)visit_assist->param0; + if (QUERY_HAS_SINGLE_GROUP_BY(query)) { + expr_node_t *origin_ref = sql_get_origin_ref(*node); + return visit_expr_node(visit_assist, &origin_ref, eliminate_proj_in_expr); + } + + return eliminate_winsort_rs_col(visit_assist, query, node); +} + +void set_aggr_node_as_min(expr_node_t *node) +{ + node->argument->root->type = EXPR_NODE_CONST; + node->argument->root->datatype = CT_TYPE_INTEGER; + node->argument->root->value.type = CT_TYPE_INTEGER; + node->argument->root->value.is_null = CT_FALSE; + node->argument->root->value.v_int = 0; + node->argument->root->optmz_info.idx = 0; + node->argument->root->optmz_info.mode = OPTIMIZE_AS_CONST; + node->argument->next = NULL; + node->value.v_func.func_id = ID_FUNC_ITEM_MIN; + node->value.v_func.orig_func_id = ID_FUNC_ITEM_MIN; + node->value.v_func.pack_id = CT_INVALID_ID32; + node->dis_info.need_distinct = CT_FALSE; + node->datatype = CT_TYPE_INTEGER; + node->sort_items = NULL; +} + +status_t handle_aggr_proj_elimination(visit_assist_t *visit_assist, expr_node_t **node) +{ + sql_query_t *query = (sql_query_t *)visit_assist->param0; + uint32 node_id = NODE_VALUE(uint32, *node); + expr_node_t *aggr = (expr_node_t *)cm_galist_get(query->aggrs, node_id); + int32 ref_count = aggr->value.v_func.aggr_ref_count; + + ref_count--; + if (ref_count == 0) { + CT_RETURN_IFERR(visit_expr_node(visit_assist, &aggr, eliminate_proj_in_expr)); + set_aggr_node_as_min(aggr); + } + + return CT_SUCCESS; +} + +status_t process_winsort_deletion(visit_assist_t *visit_assist, sql_query_t *query, expr_node_t *node, uint32 i) +{ + uint32 ori_flag = visit_assist->excl_flags; + CM_CLEAN_FLAG(visit_assist->excl_flags, VA_EXCL_WIN_SORT); + CT_RETURN_IFERR(visit_expr_node(visit_assist, &node, eliminate_proj_in_expr)); + visit_assist->excl_flags = ori_flag; + cm_galist_delete(query->winsort_list, i); + if (query->winsort_list->count > 0 && node->win_args->is_rs_node) { + set_winsort_rs_node_flag(query); + } + return CT_SUCCESS; +} + +status_t handle_winsort_proj_elimination(visit_assist_t *visit_assist, expr_node_t **node) +{ + sql_query_t *query = (sql_query_t *)visit_assist->param0; + expr_node_t *win_node = NULL; + uint32 win_count = query->winsort_list->count; + uint32 i = 0; + + while(i < win_count) { + win_node = (expr_node_t *)cm_galist_get(query->winsort_list, i); + if (*node!= win_node) { + i++; + continue; + } + CT_RETURN_IFERR(process_winsort_deletion(visit_assist, query, win_node, i)); + break; + } + + uint32 node_id = VALUE_PTR(var_vm_col_t, &(*node)->value)->id; + rs_column_t *win_rs_col = (rs_column_t *)cm_galist_get(query->winsort_rs_columns, node_id); + return replace_rs_col_with_null(visit_assist->stmt, win_rs_col); +} + +void delete_func_expr_from_table(visit_assist_t *visit_assist, expr_node_t **func_node) +{ + sql_query_t *query = (sql_query_t *)visit_assist->param0; + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, visit_assist->result1); + bilist_node_t *node = cm_bilist_head(&table->func_expr); + + for (; node!= NULL; node = BINODE_NEXT(node)) { + func_expr_t *func = BILIST_NODE_OF(func_expr_t, node, bilist_node); + if (sql_expr_node_equal(visit_assist->stmt, func->expr, *func_node, NULL)) { + cm_bilist_del(node, &table->func_expr); + break; + } + } +} + +void update_result_value(visit_assist_t *visit_assist, bool32 ori_result0, uint32 table_id) +{ + visit_assist->result0 = ori_result0 && visit_assist->result0; + bool32 is_same_table = (visit_assist->result1 == CT_INVALID_ID32 || table_id == CT_INVALID_ID32 || + visit_assist->result1 == table_id); + if (!visit_assist->result0 || !is_same_table) { + visit_assist->result0 = CT_FALSE; + } +} + +status_t handle_func_proj_elimination(visit_assist_t *visit_assist, expr_node_t **node) +{ + bool32 ori_result0 = (bool32)visit_assist->result0; + uint32 ori_table_id = visit_assist->result1; + visit_assist->result0 = CT_TRUE; + visit_assist->result1 = CT_INVALID_ID32; + CT_RETURN_IFERR(visit_func_node(visit_assist, *node, eliminate_proj_in_expr)); + + sql_func_t *func = sql_get_func(&(*node)->value.v_func); + if (visit_assist->result0 && visit_assist->result1 != CT_INVALID_ID32 && func->indexable) { + delete_func_expr_from_table(visit_assist, node); + } + + update_result_value(visit_assist, ori_result0, ori_table_id); + return CT_SUCCESS; +} + +status_t handle_case_proj_elimination(visit_assist_t *visit_assist, expr_node_t **node) +{ + bool32 ori_result0 = (bool32)visit_assist->result0; + uint32 ori_table_id = visit_assist->result1; + visit_assist->result0 = CT_TRUE; + visit_assist->result1 = CT_INVALID_ID32; + CT_RETURN_IFERR(visit_case_node(visit_assist, *node, eliminate_proj_in_expr)); + + if (visit_assist->result0 && visit_assist->result1 != CT_INVALID_ID32) { + delete_func_expr_from_table(visit_assist, node); + } + + update_result_value(visit_assist, ori_result0, ori_table_id); + return CT_SUCCESS; +} + +status_t eliminate_proj_in_expr(visit_assist_t *visit_assist, expr_node_t **node) +{ + expr_node_type_t type = (*node)->type; + if (type == EXPR_NODE_COLUMN || type == EXPR_NODE_TRANS_COLUMN) { + return handle_column_proj_elimination(visit_assist, node); + } else if (type == EXPR_NODE_GROUP) { + return handle_group_proj_elimination(visit_assist, node); + } else if (type == EXPR_NODE_OVER) { + return handle_winsort_proj_elimination(visit_assist, node); + } else if (type == EXPR_NODE_FUNC) { + return handle_func_proj_elimination(visit_assist, node); + } else if (type == EXPR_NODE_AGGR) { + return handle_aggr_proj_elimination(visit_assist, node); + } else if (type == EXPR_NODE_CASE) { + return handle_case_proj_elimination(visit_assist, node); + } + + return CT_SUCCESS; +} + +status_t process_rs_col_elimination(sql_stmt_t *stmt, sql_query_t *query, rs_column_t *rs_col) +{ + if (rs_col->type == RS_COL_COLUMN) { + if (rs_col->v_col.ancestor == 0) { + sql_table_uncache_query_field(query, &rs_col->v_col); + } + return CT_SUCCESS; + } + + visit_assist_t visit_assist; + sql_init_visit_assist(&visit_assist, stmt, NULL); + visit_assist.param0 = (void *)query; + visit_assist.result0 = CT_TRUE; + visit_assist.excl_flags |= (VA_EXCL_WIN_SORT | VA_EXCL_FUNC | VA_EXCL_CASE); + return visit_expr_tree(&visit_assist, rs_col->expr, eliminate_proj_in_expr); +} + +status_t eliminate_rs_column(sql_stmt_t *stmt, sql_query_t *query, uint32 col_id) +{ + rs_column_t *rs_col = (rs_column_t *)cm_galist_get(query->rs_columns, col_id); + if (is_reserved_field(rs_col)) { + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] The rs_col is reserved field," + "not support proj elimination."); + return CT_SUCCESS; + } + + if (process_rs_col_elimination(stmt, query, rs_col) != CT_SUCCESS) { + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] Failed to process expr tree."); + return CT_ERROR; + } + + CT_RETURN_IFERR(replace_rs_col_with_null(stmt, rs_col)); + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] The result set column is eliminated, col_id: %d", col_id); + return CT_SUCCESS; +} + +status_t rewrite_group_related_columns(sql_stmt_t *stmt, sql_query_t *query) +{ + if (query->winsort_list->count > 0) { + return CT_SUCCESS; + } + + uint32 i = 0; + rs_column_t *rs_col = NULL; + while (i < query->rs_columns->count) { + rs_col = (rs_column_t *)cm_galist_get(query->rs_columns, i); + if (QUERY_HAS_SINGLE_GROUP_BY(query) || rs_col->type != RS_COL_CALC || + rs_col->expr->root->type == EXPR_NODE_RESERVED) { + continue; + } + CT_RETURN_IFERR(replace_group_expr_node(stmt, &rs_col->expr->root)); + i++; + } + + i = 0; + sort_item_t *sort_item = NULL; + while (i < query->sort_items->count) { + sort_item = (sort_item_t *)cm_galist_get(query->sort_items, i); + if (!QUERY_HAS_SINGLE_GROUP_BY(query)) { + CT_RETURN_IFERR(replace_group_expr_node(stmt, &sort_item->expr->root)); + } + i++; + } + + cm_galist_reset(query->winsort_rs_columns); + return CT_SUCCESS; +} + +status_t eliminate_proj_col_in_query(sql_stmt_t *stmt, sql_table_t *table, select_node_t *node) +{ + sql_query_t *subquery = node->query; + if (!check_if_support_eliminate4proj(subquery)) { + return CT_SUCCESS; + } + + uint32 old_winsort_cnt = subquery->winsort_list->count; + uint32 col_id = 0; + while (col_id < subquery->rs_columns->count) { + if (!query_contains_rs_column(table, col_id)) { + CT_RETURN_IFERR(eliminate_rs_column(stmt, subquery, col_id)); + } + col_id++; + } + + if (old_winsort_cnt > 0) { + CT_RETURN_IFERR(rewrite_group_related_columns(stmt, subquery)); + } + return CT_SUCCESS; +} + +status_t process_proj_columns_elimination(sql_stmt_t *stmt, sql_table_t *table, select_node_t *node) +{ + switch(node->type) { + case SELECT_NODE_QUERY: + return eliminate_proj_col_in_query(stmt, table, node); + case SELECT_NODE_UNION_ALL: + CT_RETURN_IFERR(process_proj_columns_elimination(stmt, table, node->left)); + CT_RETURN_IFERR(process_proj_columns_elimination(stmt, table, node->right)); + break; + default: + return CT_ERROR; + } + + return CT_SUCCESS; +} + +status_t ct_transf_eliminate_proj_col(sql_stmt_t *stmt, sql_query_t *query) +{ + sql_table_t *table = NULL; + for (uint32 i = 0; i < query->tables.count; i++) { + table = (sql_table_t *)sql_array_get(&query->tables, i); + if (is_can_proj_eliminate(table)) { + select_node_t *root = table->select_ctx->root; + CT_RETURN_IFERR(process_proj_columns_elimination(stmt, table, root)); + } + } + + return CT_SUCCESS; +} \ No newline at end of file diff --git a/pkg/src/ctsql/optimizer/ctsql_proj_rewrite.h b/pkg/src/ctsql/optimizer/ctsql_proj_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..dbad751e6e25a31a55692f2def76038f338ef136 --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_proj_rewrite.h @@ -0,0 +1,47 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_proj_rewrite.h + * + * + * IDENTIFICATION + * src/ctsql/optimizer/ctsql_proj_rewrite.h + * + * ------------------------------------------------------------------------- + */ +#ifndef __SQL_PROJ_REWRITE_H__ +#define __SQL_PROJ_REWRITE_H__ + +#include "srv_instance.h" +#include "ctsql_func.h" +#include "ctsql_cond_rewrite.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define QUERY_HAS_SINGLE_GROUP_BY(query) ((query)->group_sets->count == 1) +#define NO_NEED_ELIMINATE(query) ((query)->group_sets->count > 1 || (query)->pivot_items != NULL || \ + (query)->connect_by_cond != NULL || (query)->has_distinct || \ + ((query)->winsort_list->count > 0 && (query)->ssa.count > 0)) + +status_t ct_transf_eliminate_proj_col(sql_stmt_t *stmt, sql_query_t *query); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/pkg/src/ctsql/optimizer/ctsql_pushdown_orderby.c b/pkg/src/ctsql/optimizer/ctsql_pushdown_orderby.c new file mode 100644 index 0000000000000000000000000000000000000000..ed7a9cab61ff7a90ebf4f0492f79590cfca33742 --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_pushdown_orderby.c @@ -0,0 +1,246 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_pushdown_orderby.c + * + * + * IDENTIFICATION + * src/ctsql/optimizer/ctsql_pushdown_orderby.c + * + * ------------------------------------------------------------------------- + */ + +#include "ctsql_pushdown_orderby.h" +#include "srv_instance.h" +#include "ctsql_cond_rewrite.h" +#include "dml_parser.h" +#include "ctsql_hint_verifier.h" + +static bool32 check_query_cond_valid_4_pushdown_orderby(sql_query_t *query) +{ + if (query->owner == NULL || query->sort_items->count == 0 || query->tables.count > 1) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Parent query must be single-table query with orderby."); + return CT_FALSE; + } + + return CT_TRUE; +} + +static bool32 check_query_clause_valid_4_pushdown_orderby(sql_query_t *query) +{ + if (query->has_distinct || query->group_sets->count > 0 || query->winsort_list->count > 0 || + query->connect_by_cond != NULL || query->pivot_items != NULL || query->group_cubes != NULL) { + CT_LOG_DEBUG_INF( + "[ORDERBY_PUSHDOWN] Parent query can't have distinct,group by, winsort, connect by, pivot/unpivot, cube, or limit."); + return CT_FALSE; + } + + if ((query->cond != NULL && query->cond->root->type != COND_NODE_TRUE) || + (query->having_cond != NULL && query->having_cond->root->type != COND_NODE_TRUE)) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Parent query's where/having condition must be empty or TRUE."); + return CT_FALSE; + } + + return CT_TRUE; +} + +static bool32 check_table_valid_4_pushdown_orderby(sql_table_t *table) +{ + if (table->type != SUBSELECT_AS_TABLE && table->type != VIEW_AS_TABLE) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Invalid table type: %u.", table->type); + return CT_FALSE; + } + + return CT_TRUE; +} + +static bool32 check_subquery_valid_4_pushdown_orderby(sql_select_t *subselect, sql_query_t *subquery) +{ + if (subselect->root->type != SELECT_NODE_QUERY || subquery->pivot_items != NULL || + LIMIT_CLAUSE_OCCUR(&subquery->limit)) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Subquery check failed, sub-select type: %d.", subselect->root->type); + return CT_FALSE; + } + + return CT_TRUE; +} + +static status_t check_orderby_col_can_push_down(visit_assist_t *va, var_column_t *var_col) +{ + sql_query_t *subquery = (sql_query_t *)va->param0; + rs_column_t *rs_col = (rs_column_t *)cm_galist_get(subquery->rs_columns, var_col->col); + + if ((rs_col->type == RS_COL_COLUMN && rs_col->v_col.is_array) || CT_BIT_TEST(rs_col->rs_flag, RS_COND_UNABLE)) { + va->result0 = CT_TRUE; + } + return CT_SUCCESS; +} + +static status_t check_orderby_pushdown_conditions(visit_assist_t *va, expr_node_t **node) +{ + switch ((*node)->type) { + case EXPR_NODE_CONST: + break; + + case EXPR_NODE_COLUMN: { + var_column_t *var_col = VALUE_PTR(var_column_t, &(*node)->value); + if (var_col->ancestor > 0) { + va->result0 = CT_TRUE; + break; + } + return check_orderby_col_can_push_down(va, var_col); + } + + default: + va->result0 = CT_TRUE; + break; + } + return CT_SUCCESS; +} + +static inline bool32 check_sort_items_for_pushdown(sql_stmt_t *stmt, galist_t *sort_items, sql_query_t *subquery) +{ + visit_assist_t va; + sql_init_visit_assist(&va, stmt, NULL); + + va.param0 = (void *)subquery; + va.result0 = CT_FALSE; + + uint32 idx = 0; + while (idx < sort_items->count) { + sort_item_t *item = (sort_item_t *)cm_galist_get(sort_items, idx++); + (void)visit_expr_tree(&va, item->expr, check_orderby_pushdown_conditions); + if ((bool32)va.result0) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Sort item can't be pushed down."); + return CT_FALSE; + } + } + + return CT_TRUE; +} + +static bool32 sql_validate_orderby_pushdown(sql_stmt_t *stmt, sql_query_t *query) +{ + if (!check_query_cond_valid_4_pushdown_orderby(query) || !check_query_clause_valid_4_pushdown_orderby(query)) { + return CT_FALSE; + } + + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, 0); + if (!check_table_valid_4_pushdown_orderby(table)) { + return CT_FALSE; + } + + sql_select_t *subselect = table->select_ctx; + sql_query_t *subquery = subselect->first_query; + if (!check_subquery_valid_4_pushdown_orderby(subselect, subquery)) { + return CT_FALSE; + } + + return check_sort_items_for_pushdown(stmt, query->sort_items, subquery); +} + +static status_t sql_handle_select_node(sql_query_t *query, expr_node_t **node, sql_query_t *subquery) +{ + sql_select_t *select_ctx = (sql_select_t *)VALUE_PTR(var_object_t, &(*node)->value)->ptr; + + if (sql_array_delete(&query->ssa, (*node)->value.v_obj.id) != CT_SUCCESS) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Failed to delete SSA entry."); + return CT_ERROR; + } + + if (query->ssa.count > 0 && sql_update_query_ssa(query) != CT_SUCCESS) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Failed to update SSA."); + return CT_ERROR; + } + + select_ctx->parent = subquery; + (*node)->value.v_obj.id = subquery->ssa.count; + + if (sql_array_put(&subquery->ssa, select_ctx) != CT_SUCCESS) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Failed to add SSA entry."); + return CT_ERROR; + } + + return CT_SUCCESS; +} + +static status_t sql_handle_column_node(sql_stmt_t *stmt, expr_node_t **node, sql_query_t *subquery) +{ + var_column_t *var_col = VALUE_PTR(var_column_t, &(*node)->value); + rs_column_t *rs_col = (rs_column_t *)cm_galist_get(subquery->rs_columns, var_col->col); + + if (rs_col->type == RS_COL_COLUMN) { + var_col->tab = rs_col->v_col.tab; + var_col->col = rs_col->v_col.col; + var_col->ss_start = rs_col->v_col.ss_start; + var_col->ss_end = rs_col->v_col.ss_end; + var_col->ancestor = rs_col->v_col.ancestor; + var_col->datatype = rs_col->v_col.datatype; + return CT_SUCCESS; + } + + return sql_clone_expr_node(stmt->context, rs_col->expr->root, node, sql_alloc_mem); +} + +status_t sql_modify_col_4_push_down(visit_assist_t *va, expr_node_t **node) +{ + sql_query_t *subquery = (sql_query_t *)va->param0; + + switch ((*node)->type) { + case EXPR_NODE_SELECT: + return sql_handle_select_node(va->query, node, subquery); + + case EXPR_NODE_COLUMN: + return sql_handle_column_node(va->stmt, node, subquery); + + default: + return CT_SUCCESS; + } +} + +static status_t ct_perform_pushdown_orderby(sql_stmt_t *stmt, sql_query_t *query) +{ + visit_assist_t va; + sql_init_visit_assist(&va, stmt, query); + + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, 0); + sql_query_t *subquery = table->select_ctx->first_query; + va.param0 = (void *)subquery; + + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Pushing down order by to subquery, table[%s].", table->name); + uint32 idx = 0; + while (idx < subquery->sort_items->count) { + sort_item_t *item = (sort_item_t *)cm_galist_get(subquery->sort_items, idx++); + if (visit_expr_tree(&va, item->expr, sql_modify_col_4_push_down) != CT_SUCCESS) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] Column modification failed."); + return CT_ERROR; + } + } + + subquery->sort_items = query->sort_items; + subquery->order_siblings = query->order_siblings; + return sql_create_list(stmt, &query->sort_items); +} + +status_t ct_transf_pushdown_orderby(sql_stmt_t *stmt, sql_query_t *query) +{ + if (!sql_validate_orderby_pushdown(stmt, query)) { + CT_LOG_DEBUG_INF("[ORDERBY_PUSHDOWN] ORDER BY can't be pushed down."); + return CT_SUCCESS; + } + + return ct_perform_pushdown_orderby(stmt, query); +} \ No newline at end of file diff --git a/pkg/src/ctsql/optimizer/ctsql_pushdown_orderby.h b/pkg/src/ctsql/optimizer/ctsql_pushdown_orderby.h new file mode 100644 index 0000000000000000000000000000000000000000..e8429fa12711598824d96d0740499fee4d863f65 --- /dev/null +++ b/pkg/src/ctsql/optimizer/ctsql_pushdown_orderby.h @@ -0,0 +1,42 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_pushdown_orderby.h + * + * + * IDENTIFICATION + * src/ctsql/optimizer/ctsql_pushdown_orderby.h + * + * ------------------------------------------------------------------------- + */ +#ifndef __CTSQL_PUSHDOWN_ORDERBY_H__ +#define __CTSQL_PUSHDOWN_ORDERBY_H__ + +#include "ctsql_stmt.h" +#include "ctsql_expr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +status_t ct_transf_pushdown_orderby(sql_stmt_t *stmt, sql_query_t *query); +status_t sql_modify_col_4_push_down(visit_assist_t *va, expr_node_t **node); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/pkg/src/ctsql/optimizer/ctsql_transform.c b/pkg/src/ctsql/optimizer/ctsql_transform.c index 095f784c957f55a9dd59cd8c2cd30c5d259fcf0e..1f79d9a2df49e1a7f0a91404d9dc43fe07c99c77 100644 --- a/pkg/src/ctsql/optimizer/ctsql_transform.c +++ b/pkg/src/ctsql/optimizer/ctsql_transform.c @@ -32,11 +32,26 @@ #include "ctsql_cond_rewrite.h" #include "plan_rbo.h" #include "plan_join.h" +#include "ctsql_proj_rewrite.h" +#include "ctsql_distinct_rewrite.h" +#include "ctsql_pushdown_orderby.h" +#include "ctsql_hash_mtrl_rewrite.h" #ifdef __cplusplus extern "C" { #endif + +static transform_sql_t g_transformers[] = { + { CTSQL_TYPE_NONE, sql_transform_dummy }, + { CTSQL_TYPE_SELECT, ctsql_optimize_logic_select }, + { CTSQL_TYPE_UPDATE, ctsql_optimize_logic_update }, + { CTSQL_TYPE_INSERT, ctsql_optimize_logic_insert }, + { CTSQL_TYPE_DELETE, ctsql_optimize_logic_delete }, + { CTSQL_TYPE_MERGE, ctsql_optimize_logic_merge }, + { CTSQL_TYPE_REPLACE, ctsql_optimize_logic_replace }, + }; + typedef status_t (*sql_optimizer_func_t)(sql_stmt_t *stmt, sql_query_t *query); typedef struct st_sql_optimizer { @@ -44,6 +59,16 @@ typedef struct st_sql_optimizer { sql_optimizer_func_t optimizer; } sql_optimizer_t; +status_t sql_transform_dummy(sql_stmt_t *stmt, void *entry) +{ + text_t *ctsql = (text_t *)&stmt->session->lex->text; + if (ctsql) { + CT_LOG_DEBUG_INF("transfrom sql nothing to do, SQL is %s.", T2S(ctsql)); + } + + return CT_SUCCESS; +} + status_t create_new_table_4_rewrite(sql_stmt_t *stmt, sql_query_t *query, sql_select_t *subslct) { sql_table_t *table = NULL; @@ -69,18 +94,96 @@ static inline status_t sql_add_sort_group(sql_query_t *query, sort_item_t *item) return CT_SUCCESS; } -static inline bool32 chk_aggr_4_sort_group(sql_aggr_type_t aggr_type) +static inline bool32 check_aggr_4_sort_group(sql_aggr_type_t aggr_type) { - const sql_aggr_type_t sg_aggrs[] = { - AGGR_TYPE_AVG, AGGR_TYPE_SUM, AGGR_TYPE_MIN, AGGR_TYPE_MAX, - AGGR_TYPE_COUNT, AGGR_TYPE_MEDIAN, AGGR_TYPE_DENSE_RANK - }; - for (uint32 i = 0; i < ARRAY_NUM(sg_aggrs); ++i) { - if (aggr_type == sg_aggrs[i]) { + switch (aggr_type) { + case AGGR_TYPE_AVG: + case AGGR_TYPE_SUM: + case AGGR_TYPE_MIN: + case AGGR_TYPE_MAX: + case AGGR_TYPE_COUNT: + case AGGR_TYPE_MEDIAN: + case AGGR_TYPE_DENSE_RANK: return CT_TRUE; + default: + return CT_FALSE; + } +} + +/* Check if query qualifies for sort-groupby optimization */ +static bool32 check_query_4_sort_groupby(sql_stmt_t *stmt, sql_query_t *query) +{ + // Validate optimization preconditions + if (query->has_distinct || query->winsort_list->count > 0 || query->group_sets->count != 1 || + query->sort_items->count == 0) { + CT_LOG_DEBUG_ERR("[SORT_GROUPBY] The query has distinct or winsort or group sets or sort items."); + return CT_FALSE; + } + + // Verify aggregate functions compatibility + uint32 aggrs_idx = 0; + while (aggrs_idx < query->aggrs->count) { + expr_node_t *aggr_node = (expr_node_t *)cm_galist_get(query->aggrs, aggrs_idx++); + const sql_func_t *func = sql_get_func(&aggr_node->value.v_func); + if (!check_aggr_4_sort_group(func->aggr_type)) { + CT_LOG_DEBUG_INF("[SORT_GROUPBY] Aggregation function type %u is not supported for sort-group optimization", + func->aggr_type); + return CT_FALSE; } } - return CT_FALSE; + + // Validate sort items structure + uint32 item_idx = 0; + while (item_idx < query->sort_items->count) { + sort_item_t *item = (sort_item_t *)cm_galist_get(query->sort_items, item_idx++); + if (item->expr->root->type != EXPR_NODE_GROUP || NODE_VM_ANCESTOR(item->expr->root) > 0) { + CT_LOG_DEBUG_ERR("[SORT_GROUPBY] The sort item is not in group list or contains ancestor column."); + query->sort_groups = NULL; + return CT_FALSE; + } + } + + return CT_TRUE; +} + +/* Perform sort-groupby optimization transformation */ +static status_t ct_perform_sort_groupby_optimize(sql_stmt_t *stmt, sql_query_t *query) +{ + // Initialize sort groups list + if (sql_create_list(stmt, &query->sort_groups) != CT_SUCCESS) { + cm_reset_error(); + // Avoid dangling pointers + query->sort_groups = NULL; + CT_LOG_DEBUG_ERR("[SORT_GROUPBY] Failed to create sort groups list, continue without optimization."); + return CT_SUCCESS; + } + + // Convert sort items to sort groups + uint32 idx = 0; + while (idx < query->sort_items->count) { + sort_item_t *item = (sort_item_t *)cm_galist_get(query->sort_items, idx++); + if (sql_add_sort_group(query, item) != CT_SUCCESS) { + cm_reset_error(); + // Clear the created list when a single sort item fails to process + query->sort_groups = NULL; + CT_LOG_DEBUG_ERR("[SORT_GROUPBY] Failed to add sort group, continue without optimization."); + return CT_SUCCESS; + } + } + + // Clear original sort items after transformation + cm_galist_reset(query->sort_items); + return CT_SUCCESS; +} + +// group by f1 order by f1 => sort group f1 +static status_t ct_transf_sort_groupby(sql_stmt_t *stmt, sql_query_t *query) +{ + if (!check_query_4_sort_groupby(stmt, query)) { + return CT_SUCCESS; + } + + return ct_perform_sort_groupby_optimize(stmt, query); } static inline bool32 sql_is_grouping_func(expr_node_t *node) @@ -140,6 +243,21 @@ status_t sql_get_table_join_cond(sql_stmt_t *stmt, sql_array_t *l_tables, sql_ar join_cond->table1 = left_table->id; join_cond->table2 = right_table->id; cm_bilist_add_tail(&join_cond->bilist_node, join_conds); + + if (left_table->join_info.count == 0) { + cm_bilist_init(&left_table->join_info); + } + if (right_table->join_info.count == 0) { + cm_bilist_init(&right_table->join_info); + } + tbl_join_info_t *left_join_info = NULL, *right_join_info = NULL; + CT_RETURN_IFERR(sql_stack_alloc(stmt, sizeof(tbl_join_info_t), (void **)&left_join_info)); + CT_RETURN_IFERR(sql_stack_alloc(stmt, sizeof(tbl_join_info_t), (void **)&right_join_info)); + sql_bitmap_union_singleton(join_cond->table1, join_cond->table2, &left_join_info->table_ids); + sql_bitmap_union_singleton(join_cond->table1, join_cond->table2, &right_join_info->table_ids); + cm_bilist_add_tail(&left_join_info->bilist_node, &left_table->join_info); + cm_bilist_add_tail(&right_join_info->bilist_node, &right_table->join_info); + CT_RETURN_IFERR(sql_stack_alloc(stmt, sizeof(join_cond_t), (void **)&join_cond)); cm_galist_init(&join_cond->cmp_nodes, stmt, sql_stack_alloc); } @@ -343,6 +461,211 @@ status_t sql_set_old_query_block_name(sql_stmt_t *stmt, sql_query_t *query, new_ return CT_SUCCESS; } +status_t ctsql_optimize_logically(sql_stmt_t *stmt) +{ + uint32 count = sizeof(g_transformers) / sizeof(transform_sql_t) - 1; + uint32 index = (uint32)stmt->context->type; + SAVE_AND_RESET_NODE_STACK(stmt); + + // first optmize logically with as sql. + if (ctsql_optimize_logic_withas(stmt, stmt->context->withas_entry)) { + return CT_ERROR; + } + + if (index <= count) { + CT_RETURN_IFERR(g_transformers[index].tranform(stmt, stmt->context->entry)); + } + + SQL_RESTORE_NODE_STACK(stmt); + return CT_SUCCESS; +} + +status_t ctsql_transform_one_rule(sql_stmt_t *stmt, sql_query_t *query, const char* rule_name, + sql_tranform_rule_func_t proc) +{ + if (proc(stmt, query) == CT_SUCCESS) { + CT_LOG_DEBUG_INF("Succeed to transform rule:%s", rule_name); + return CT_SUCCESS; + } + return CT_ERROR; +} + +status_t ctsql_apply_rule_set(sql_stmt_t *stmt, sql_query_t *query) +{ + // 1. transform to delete unusable orderby. + // 2. transform or condition. + // 3. transform predicate delivery. + CTSQL_APPLY_RULE(stmt, query, ct_transf_predicate_delivery); + // 4. transform to eliminate no usable distinct. + CTSQL_APPLY_RULE(stmt, query, ct_transf_eliminate_distinct); + // 5. transform to eliminate projection column. + CTSQL_APPLY_RULE(stmt, query, ct_transf_eliminate_proj_col); + // 6. transform to erase sub-select table + //CTSQL_APPLY_RULE(stmt, query, ct_transf_select_erase); + // 7. transform to push down connectby. + // 8. transform in 2 exist. + // 9. transform sub-select to table. + // 10. transform sub-select by winMagic. + // 11. transform sub_select by hash mtrl. + CTSQL_APPLY_RULE(stmt, query, ct_var_subquery_rewrite); + // 12. transform cartesian join(push down grouped aggregation). + // 13. transform to eliminate outer join. + // 14. transform to optimize connectby. + // 15. transfrom predicate delivery. + // 16. transform to push down orderby. + CTSQL_APPLY_RULE(stmt, query, ct_transf_pushdown_orderby); + // 17. transform to optimize sort groupby. + CTSQL_APPLY_RULE(stmt, query, ct_transf_sort_groupby); + // 18. transform cube group. + // 19. transform condition order. + CTSQL_APPLY_RULE(stmt, query, ct_cond_rewrite_4_chg_order); + // 20. binding parameters peek. + + return CT_SUCCESS; +} + +status_t ctsql_tranform_subselect_in_expr(sql_stmt_t *stmt, sql_query_t *query) +{ + sql_array_t *ssa = &query->ssa; + sql_select_t *select = NULL; + uint32 index = 0; + while (index < ssa->count) { + select = (sql_select_t*)sql_array_get(ssa, index++); + CT_RETURN_IFERR(ctsql_optimize_logic_select_node(stmt, select->root)); + } + + return CT_SUCCESS; +} + +static inline bool32 ctsql_check_subselect_if_as_table(sql_table_t *table) +{ + if ((table->type == SUBSELECT_AS_TABLE || table->type == VIEW_AS_TABLE) + && table->subslct_tab_usage == SUBSELECT_4_NORMAL_JOIN) { + return CT_TRUE; + } + + return CT_FALSE; +} + +status_t ctsql_tranform_subselect_as_table(sql_stmt_t *stmt, sql_query_t *query) +{ + sql_array_t *tables = &query->tables; + sql_table_t *tab = NULL; + uint32 index = 0; + while (index < tables->count) { + tab = (sql_table_t*)sql_array_get(tables, index++); + if (ctsql_check_subselect_if_as_table(tab)) { + CT_RETURN_IFERR(ctsql_optimize_logic_select_node(stmt, tab->select_ctx->root)); + } + } + + return CT_SUCCESS; +} + +status_t ctsql_transform_query(sql_stmt_t *stmt, sql_query_t *query) +{ + + if (ctsql_apply_rule_set(stmt, query)) { + CT_LOG_DEBUG_ERR("Failed to apply rule set."); + return CT_ERROR; + } + + // transform subselect in expressions. + CT_RETURN_IFERR(ctsql_tranform_subselect_in_expr(stmt, query)); + // transform subselect in 'from table'. + CT_RETURN_IFERR(ctsql_tranform_subselect_as_table(stmt, query)); + + return CT_SUCCESS; +} + +status_t ctsql_optimize_logic_select_node(sql_stmt_t *stmt, select_node_t *node) +{ + if (node->type == SELECT_NODE_QUERY) { + return ctsql_transform_query(stmt, node->query); + } + CT_RETURN_IFERR(ctsql_optimize_logic_select_node(stmt, node->left)); + return ctsql_optimize_logic_select_node(stmt, node->right); +} + +status_t ctsql_optimize_logic_select(sql_stmt_t *stmt, void *entry) +{ + select_node_t *node = ((sql_select_t*)entry)->root; + return ctsql_optimize_logic_select_node(stmt, node); +} + +status_t ctsql_optimize_logic_insert(sql_stmt_t *stmt, void *entry) +{ + sql_insert_t *insert = (sql_insert_t*)entry; + if (insert->select_ctx) { + return ctsql_optimize_logic_select(stmt, insert->select_ctx); + } + return CT_SUCCESS; +} + +status_t ctsql_optimize_logic_replace(sql_stmt_t *stmt, void *entry) +{ + sql_replace_t *replace = (sql_replace_t*)entry; + return ctsql_optimize_logic_insert(stmt, &replace->insert_ctx); +} + +status_t ctsql_optimize_logic_delete(sql_stmt_t *stmt, void *entry) +{ + sql_delete_t *delete = (sql_delete_t*)entry; + return ctsql_transform_query(stmt, delete->query); +} + +status_t ctsql_transform_update(sql_stmt_t *stmt, sql_update_t *update) +{ + // TODO: + return CT_SUCCESS; +} + +status_t ctsql_optimize_logic_update(sql_stmt_t *stmt, void *entry) +{ + sql_update_t *update = (sql_update_t*)entry; + if (ctsql_transform_query(stmt, update->query)) { + CT_LOG_DEBUG_ERR("Failed to transform update sql."); + return CT_ERROR; + } + return ctsql_transform_update(stmt, update); +} + +status_t ctsql_optimize_logic_merge(sql_stmt_t *stmt, void *entry) +{ + sql_merge_t *merge = (sql_merge_t*)entry; + // transform subselect in expressions. + if (ctsql_tranform_subselect_in_expr(stmt, merge->query)) { + CT_LOG_DEBUG_ERR("Failed to transform merge sql in expr."); + return CT_ERROR; + } + + // transform subselect in 'from table'. + if (ctsql_tranform_subselect_as_table(stmt, merge->query)) { + CT_LOG_DEBUG_ERR("Failed to transform merge sql in subtable."); + return CT_ERROR; + } + // do predicate devlivery one time. + CTSQL_APPLY_RULE(stmt, merge->query, ct_transf_predicate_delivery); + return CT_SUCCESS; +} + +status_t ctsql_optimize_logic_withas(sql_stmt_t *stmt, void *entry) +{ + sql_withas_t *withas = (sql_withas_t *)entry; + uint32 i = 0; + if (!withas) { + return CT_SUCCESS; + } + + while (i < withas->withas_factors->count) { + sql_withas_factor_t *item = (sql_withas_factor_t *)cm_galist_get(withas->withas_factors, i++); + if (ctsql_optimize_logic_select(stmt, item->subquery_ctx)) { + return CT_ERROR; + } + } + return CT_SUCCESS; +} + #ifdef __cplusplus } #endif diff --git a/pkg/src/ctsql/optimizer/ctsql_transform.h b/pkg/src/ctsql/optimizer/ctsql_transform.h index d2bed9ac700746ecf685fd9887c50a2551125fde..6a2dc381a81611acf940f225551721e63a91f622 100644 --- a/pkg/src/ctsql/optimizer/ctsql_transform.h +++ b/pkg/src/ctsql/optimizer/ctsql_transform.h @@ -65,6 +65,18 @@ extern "C" { #define SQL_IS_DUAL_TABLE(table) \ ((table)->type == NORMAL_TABLE && (table)->entry->dc.oid == 10 && (table)->entry->dc.uid == 0) +typedef status_t (*sql_tranform_rule_func_t)(sql_stmt_t *stmt, sql_query_t *query); +status_t ctsql_transform_one_rule(sql_stmt_t *stmt, sql_query_t *query, const char* rule_name, + sql_tranform_rule_func_t proc); + +#define CTSQL_APPLY_RULE(s, q, p) \ + do { \ + if (ctsql_transform_one_rule(s, q, #p, p)) { \ + CT_LOG_DEBUG_ERR("Failed to transform one rule=%s", #p); \ + } \ + } while (0); + + typedef enum en_new_query_type { QUERY_TYPE_OR_EXPAND = 0, QUERY_TYPE_SUBQRY_TO_TAB, @@ -78,8 +90,23 @@ typedef struct st_new_qb_info { text_t suffix; } new_qb_info_t; -status_t sql_transform(sql_stmt_t *stmt); -status_t sql_transform_select(sql_stmt_t *stmt, select_node_t *node); +typedef status_t (*sql_tranform_func_t)(sql_stmt_t *stmt, void *entry); +status_t sql_transform_dummy(sql_stmt_t *stmt, void *entry); +typedef struct st_transform_sql { + sql_type_t type; + sql_tranform_func_t tranform; +} transform_sql_t; + +status_t ctsql_optimize_logically(sql_stmt_t *stmt); +status_t ctsql_optimize_logic_select(sql_stmt_t *stmt, void *entry); +status_t ctsql_optimize_logic_select_node(sql_stmt_t *stmt, select_node_t *node); +status_t ctsql_optimize_logic_insert(sql_stmt_t *stmt, void *entry); +status_t ctsql_optimize_logic_replace(sql_stmt_t *stmt, void *entry); +status_t ctsql_optimize_logic_delete(sql_stmt_t *stmt, void *entry); +status_t ctsql_optimize_logic_update(sql_stmt_t *stmt, void *entry); +status_t ctsql_optimize_logic_withas(sql_stmt_t *stmt, void *entry); +status_t ctsql_optimize_logic_merge(sql_stmt_t *stmt, void *entry); + status_t phase_1_transform_query(sql_stmt_t *stmt, sql_query_t *query); status_t phase_2_transform_query(sql_stmt_t *stmt, sql_query_t *query); status_t try_chged_2_nest_loop(sql_stmt_t *stmt, sql_join_node_t *join_node); diff --git a/pkg/src/ctsql/parser/ctsql_parser.c b/pkg/src/ctsql/parser/ctsql_parser.c index c1eaf7ae98856f880c0932a41f97284f3628348a..f7a8565a34fe8fbe24360d885791576951cd4aec 100644 --- a/pkg/src/ctsql/parser/ctsql_parser.c +++ b/pkg/src/ctsql/parser/ctsql_parser.c @@ -280,26 +280,23 @@ lang_type_t sql_diag_lang_type(sql_stmt_t *stmt, sql_text_t *sql, word_t *leader } } +sql_parser_t g_sql_parser[] = { + {LANG_DML, sql_parse_dml}, + {LANG_DCL, sql_parse_dcl}, + {LANG_DDL, sql_parse_ddl}, + {LANG_PL, sql_parse_pl}, + {LANG_EXPLAIN, sql_parse_explain} +}; + static status_t sql_parse_by_lang_type(sql_stmt_t *stmt, sql_text_t *sql_text, word_t *leader_word) { status_t status; - switch (stmt->lang_type) { - case LANG_DML: - status = sql_parse_dml(stmt, leader_word->id); - break; - case LANG_DDL: - status = sql_parse_ddl(stmt, leader_word->id); - break; - case LANG_DCL: - status = sql_parse_dcl(stmt, leader_word->id); - break; - case LANG_PL: - status = sql_parse_pl(stmt, leader_word); - break; - default: - CT_SRC_THROW_ERROR(sql_text->loc, ERR_SQL_SYNTAX_ERROR, "key word expected"); - status = CT_ERROR; + if (stmt->lang_type < LANG_MAX && stmt->lang_type != LANG_INVALID) { + status = g_sql_parser[stmt->lang_type - LANG_DML].sql_parse(stmt, leader_word); + } else { + CT_SRC_THROW_ERROR(sql_text->loc, ERR_SQL_SYNTAX_ERROR, "key word expected"); + status = CT_ERROR; } text_t sql_log_text = stmt->session->sql_audit.sql; diff --git a/pkg/src/ctsql/parser/ctsql_parser.h b/pkg/src/ctsql/parser/ctsql_parser.h index 457a2c977a4daadeddb67fe6cbd94c9a0562e596..a8e5b7fdb76b4c876ceb2f2baa1307b7a6b74830 100644 --- a/pkg/src/ctsql/parser/ctsql_parser.h +++ b/pkg/src/ctsql/parser/ctsql_parser.h @@ -33,10 +33,11 @@ extern "C" { #endif +typedef status_t (*sql_parse_stmt)(sql_stmt_t *stmt, word_t *leader); + typedef struct st_sql_parser { - memory_context_t *context; - text_t user; - lex_t lex; + lang_type_t type; + sql_parse_stmt sql_parse; } sql_parser_t; status_t sql_parse(sql_stmt_t *stmt, text_t *sql, source_location_t *loc); diff --git a/pkg/src/ctsql/parser/ctsql_unparser.c b/pkg/src/ctsql/parser/ctsql_unparser.c new file mode 100644 index 0000000000000000000000000000000000000000..0aba128c2707f5eae122cd7583a47d7795838586 --- /dev/null +++ b/pkg/src/ctsql/parser/ctsql_unparser.c @@ -0,0 +1,901 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_unparser.c + * + * + * IDENTIFICATION + * src/ctsql/parser/ctsql_unparser.c + * + * ------------------------------------------------------------------------- + */ + +#include "ctsql_unparser.h" +#include "var_cast.h" +#include "cm_text.h" +#include "cm_word.h" + +// keep the order consistent with expr_node_type_t +static const char* g_expr_oper[] = { " * ", " / ", " % ", " + ", " - ", " << ", " >> ", " & ", " ^ ", " | ", " || " }; + +// keep the order consistent with reserved_wid_t +static const char* g_reserved_word[] = { + "CONNECT_BY_ISCYCLE", + "CONNECT_BY_ISLEAF", + "", + "DEFAULT", + "DELETING", + "FALSE", + "INSERTING", + "LEVEL", + "NULL", + "ROWID", + "ROWNUM", + "ROWSCN", + "SESSIONTIMEZONE", + "SYSDATE", + "SYSTIMESTAMP", + "TRUE", + "UPDATING", + "USER", + "DBTIMEZONE", + "CURRENT_DATE", + "CURRENT_TIMESTAMP", + "LOCALTIMESTAMP", + "DUMMY", + "UTC_TIMESTAMP", + "", + "ROWNODEID", +}; + +typedef status_t (*ctsql_unparse_query)(sql_query_t *query, var_text_t *result); +status_t ctsql_unparse_select_info(select_node_t *sel_root, var_text_t *result, bool32 add_brkt); +status_t ctsql_unparse_expr_node(sql_query_t *query, expr_node_t *expr, var_text_t *result, bool32 table_unparsed); +status_t ctsql_unparse_expr_tree(sql_query_t *query, expr_tree_t *expr, var_text_t *result); +status_t ctsql_unparse_expr_tree_list(sql_query_t *query, galist_t *list, var_text_t *result); +status_t ctsql_unparse_node_func_args(sql_query_t *query, expr_tree_t *args, uint32 func_id, var_text_t *result); +status_t ctsql_unparse_node_func(sql_query_t *query, expr_node_t *node, var_text_t *result, bool32 table_unparsed); +status_t ctsql_unparse_cond_node(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result); +status_t ctsql_unparse_join_tree(sql_query_t *query, sql_join_node_t *join_root, var_text_t *result); +bool32 ctsql_unparse_cond_need(cond_tree_t *cond); + +status_t ctsql_unparse_expr_operation(sql_query_t *query, expr_node_t *expr, var_text_t *result, bool32 table_unparsed) +{ + CT_RETURN_IFERR(ctsql_unparse_expr_node(query, expr->left, result, table_unparsed)); + CT_RETURN_IFERR(cm_concat_var_string(result, g_expr_oper[expr->type - EXPR_NODE_MUL])); + return ctsql_unparse_expr_node(query, expr->right, result, table_unparsed); +} + +status_t ctsql_unparse_prior_node(sql_query_t *query, expr_node_t *expr, var_text_t *result, bool32 table_unparsed) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, "PRIOR ")); + return ctsql_unparse_expr_node(query, expr->right, result, table_unparsed); +} + +status_t ctsql_unparse_reserved_node(expr_node_t *expr, var_text_t *result) +{ + reserved_wid_t res_id = VAR_RES_ID(&expr->value); + if (res_id >= RES_WORD_CONNECT_BY_ISCYCLE && res_id <= RES_WORD_ROWNODEID) { + return cm_concat_var_string(result, g_reserved_word[res_id - RES_WORD_CONNECT_BY_ISCYCLE]); + } + CT_THROW_ERROR(ERR_UNSUPPORT_OPER_TYPE, "reserved", (((expr_node_t *)expr)->value).v_int); + return CT_ERROR; +} + +status_t ctsql_unparse_query_cols(sql_query_t *query, var_text_t *result) +{ + if (query->is_exists_query) { + return cm_concat_var_string(result, " 1"); + } + galist_t *cols = NULL; + if (query->has_distinct) { + CT_RETURN_IFERR(cm_concat_var_string(result, " DISTINCT")); + cols = query->distinct_columns; + } else { + cols = query->rs_columns; + } + + if (cols->count == 0) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " ")); + for (uint32 i = 0; i < cols->count; i++) { + rs_column_t *col = (rs_column_t *)cm_galist_get(cols, i); + if (col->type == RS_COL_CALC) { + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, col->expr, result)); + if (CT_BIT_TEST(col->rs_flag, RS_EXIST_ALIAS)) { + CT_RETURN_IFERR(cm_concat_var_string(result, " AS ")); + CT_RETURN_IFERR(cm_concat_n_var_string(result, col->name.str, col->name.len)); + } + } else { + CT_RETURN_IFERR(cm_concat_n_var_string(result, col->name.str, col->name.len)); + } + if (i < cols->count - 1) { + CT_RETURN_IFERR(cm_concat_var_string(result, ", ")); + } + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_pivot_table(sql_query_t *query, pivot_items_t *items, var_text_t *result) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, " PIVOT(")); + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, items->aggr_expr, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, " FOR (")); + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, items->for_expr, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ") IN(")); + CT_RETURN_IFERR(ctsql_unparse_in_expr(query, items->in_expr, sql_expr_list_len(items->for_expr), result)); + return cm_concat_var_string(result, "))"); +} + +status_t ctsql_unparse_unpivot_name(galist_t *list, var_text_t *result) +{ + for (uint32 i = 0 ; i < list->count; i++) { + text_t *name = (text_t *)cm_galist_get(list, i); + CT_RETURN_IFERR(cm_concat_n_var_string(result, name->str, name->len)); + if (i < list->count - 1) { + CT_RETURN_IFERR(cm_concat_var_string(result, ",")); + } + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_unpivot_column(pivot_items_t *items, var_text_t *result) +{ + uint32 data_count = items->unpivot_data_rs; + for(uint32 i = 0; i < items->column_name->count; i++) { + if (i % data_count == 0) { + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + } + text_t *name = (text_t *)cm_galist_get(items->column_name, i); + CT_RETURN_IFERR(cm_concat_n_var_string(result, name->str, name->len)); + if (i % data_count == data_count - 1) { + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + } + if (i < items->column_name->count - 1) { + CT_RETURN_IFERR(cm_concat_var_string(result, ",")); + } + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_unpivot_table(sql_query_t *query, pivot_items_t *items, var_text_t *result) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, " UNPIVOT")); + if (items->include_nulls) { + CT_RETURN_IFERR(cm_concat_var_string(result, " INCLUDE NULLS((")); + } else { + CT_RETURN_IFERR(cm_concat_var_string(result, " EXCLUDE NULLS((")); + } + CT_RETURN_IFERR(ctsql_unparse_unpivot_name(items->unpivot_data_rs, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ") FOR (")); + CT_RETURN_IFERR(ctsql_unparse_unpivot_name(items->unpivot_alias_rs, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ") IN(")); + CT_RETURN_IFERR(ctsql_unparse_unpivot_column(items, result)); + return cm_concat_var_string(result, "))"); +} + +status_t ctsql_unparse_pivot_or_unpivot_table(sql_table_t *tbl, sql_query_t *query, var_text_t *result) +{ + if (query->tables.count > 1) { + CT_RETURN_IFERR(ctsql_unparse_join_tree(query, query->join_assist.join_node, result)); + } else { + sql_table_t *sub_tbl = (sql_table_t *)sql_array_get(&query->tables, 0); + CT_RETURN_IFERR(ctsql_unparse_table_info(query, sub_tbl, result)); + } + + pivot_items_t *items = query->pivot_items; + if (query->pivot_items->type == PIVOT_TYPE) { + CT_RETURN_IFERR(ctsql_unparse_pivot_table(query, items, result)); + } else { + CT_RETURN_IFERR(ctsql_unparse_unpivot_table(query, items, result)); + } + + if (tbl->alias.implicit || tbl->alias.len == 0) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " ")); + return cm_concat_n_var_string(result, tbl->alias.str, tbl->alias.len); +} + +status_t ctsql_unparse_subselect_table(select_node_t *root, var_text_t *result) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, " (")); + CT_RETURN_IFERR(ctsql_unparse_select_info(root, result, CT_FALSE)); + return cm_concat_var_string(result, ")"); +} + +status_t ctsql_unparse_func_table(sql_query_t *query, table_func_t *func, var_text_t *result) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, "TABLE(")); + if (cm_text_str_equal(&func->name, "CAST")) { + CT_RETURN_IFERR(cm_concat_var_string(result, "CAST(")); + CT_RETURN_IFERR(ctsql_unparse_expr_node(query, func->args->root, result, CT_FALSE)); + CT_RETURN_IFERR(cm_concat_var_string(result, " AS ")); + text_t target_name = func->args->next->root->word.func.name.value; + CT_RETURN_IFERR(cm_concat_n_var_string(result, target_name.str, target_name.len)); + } else { + CT_RETURN_IFERR(cm_concat_n_var_string(result, func->name.str, func->name.len)); + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, func->args, result)); + } + return cm_concat_var_string(result, "))"); +} + +status_t ctsql_unparse_user_table_name(sql_query_t *query, sql_table_t *tbl, var_text_t *result) +{ + if (!tbl->user.implicit && tbl->user.len > 0) { + CT_RETURN_IFERR(cm_concat_n_var_string(result, tbl->user.str, tbl->user.len)); + } + if (tbl->type == FUNC_AS_TABLE) { + return ctsql_unparse_func_table(query, &tbl->func, result); + } + return cm_concat_n_var_string(result, tbl->name.str, tbl->name.len); +} + +status_t ctsql_unparse_partition_info(specify_part_info_t *part, var_text_t *result) +{ + if (part->type == SPECIFY_PART_NONE || part->type == SPECIFY_PART_VALUE) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " PARTITION(")); + CT_RETURN_IFERR(cm_concat_n_var_string(result, part->part_name.str, part->part_name.len)); + return cm_concat_var_string(result, ")"); +} + +status_t ctsql_unparse_table_info(sql_query_t *query, sql_table_t *tbl, var_text_t *result) +{ + if (tbl->type == SUBSELECT_AS_TABLE) { + select_node_t *root = tbl->select_ctx->root; + if (root->type == SELECT_NODE_QUERY && root->query->pivot_items != NULL) { + return ctsql_unparse_pivot_or_unpivot_table(tbl, root->query, result); + } + CT_RETURN_IFERR(ctsql_unparse_subselect_table(root, result)); + } else { + CT_RETURN_IFERR(cm_concat_var_string(result, " ")); + CT_RETURN_IFERR(ctsql_unparse_user_table_name(query, tbl, result)); + } + CT_RETURN_IFERR(ctsql_unparse_partition_info(&tbl->part_info, result)); + + if (tbl->alias.implicit || tbl->alias.len == 0) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " ")); + return cm_concat_n_var_string(result, tbl->alias.str, tbl->alias.len); +} + +status_t ctsql_unparse_join_type(sql_join_type_t type, var_text_t *result) +{ + char *str = NULL; + if (type == JOIN_TYPE_INNER) { + str = " INNER JOIN "; + } else if (type == JOIN_TYPE_LEFT) { + str = " LEFT JOIN "; + } else if (type == JOIN_TYPE_RIGHT) { + str = " RIGHT JOIN "; + } else if (type == JOIN_TYPE_FULL) { + str = " FULL JOIN "; + } else if (type == JOIN_TYPE_CROSS) { + str = " CROSS JOIN "; + } else if (type == JOIN_TYPE_COMMA) { + str = ","; + } else { + CT_THROW_ERROR(ERR_UNSUPPORT_OPER_TYPE, "join", type); + return CT_ERROR; + } + return cm_concat_var_string(result, str); +} + +status_t ctsql_unparse_join_cond(sql_query_t *query, sql_join_node_t *join_node, var_text_t *result) +{ + if (IS_INNER_JOIN(join_node) || !ctsql_unparse_cond_need(join_node->join_cond)) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " ON ")); + return ctsql_unparse_cond_node(query, join_node->join_cond->root, CT_FALSE, result); +} + +status_t ctsql_unparse_join_tree(sql_query_t *query, sql_join_node_t *join_root, var_text_t *result) +{ + if (join_root->left->type != JOIN_TYPE_NONE && join_root->right->type != JOIN_TYPE_NONE) { + CT_RETURN_IFERR(ctsql_unparse_join_tree(query, join_root->left, result)); + return ctsql_unparse_join_tree(query, join_root->right, result); + } + + if (join_root->left->type == JOIN_TYPE_NONE) { + sql_table_t *left_tbl = TABLE_OF_JOIN_LEAF(join_root->left); + CT_RETURN_IFERR(ctsql_unparse_table_info(query, left_tbl, result)); + } else { + CT_RETURN_IFERR(ctsql_unparse_join_tree(query, join_root->left, result)); + } + + CT_RETURN_IFERR(ctsql_unparse_join_type(join_root->type, result)); + + if (join_root->right->type == JOIN_TYPE_NONE) { + sql_table_t *right_tbl = TABLE_OF_JOIN_LEAF(join_root->right); + CT_RETURN_IFERR(ctsql_unparse_table_info(query, right_tbl, result)); + return ctsql_unparse_join_cond(query, join_root, result); + } else { + return ctsql_unparse_join_tree(query, join_root->right, result); + } +} + +status_t ctsql_unparse_query_from(sql_query_t *query, var_text_t *result) +{ + sql_table_t *tbl = NULL; + CT_RETURN_IFERR(cm_concat_var_string(result, " FROM")); + if (query->join_root == NULL) { + tbl = (sql_table_t *)sql_array_get(&query->tables, 0); + return ctsql_unparse_table_info(query, tbl, result); + } else { + return ctsql_unparse_join_tree(query, query->join_assist.join_node, result); + } +} + +bool32 ctsql_unparse_cond_need(cond_tree_t *cond) +{ + if (cond == NULL || cond->root == NULL) { + return CT_FALSE; + } + if (cond->root->type == COND_NODE_TRUE || cond->root->type == COND_NODE_FALSE) { + return CT_FALSE; + } + return CT_TRUE; +} + +status_t ctsql_unparse_query_where(sql_query_t *query, var_text_t *result) +{ + if (ctsql_unparse_cond_need(query->cond)) { + CT_RETURN_IFERR(cm_concat_var_string(result, " WHERE ")); + return ctsql_unparse_cond_node(query, query->cond->root, CT_FALSE, result); + } + if (query->join_root != NULL && ctsql_unparse_cond_need(query->join_root->filter)) { + CT_RETURN_IFERR(cm_concat_var_string(result, " WHERE ")); + return ctsql_unparse_cond_node(query, query->join_root->filter->root, CT_FALSE, result); + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_query_group(sql_query_t *query, var_text_t *result) +{ + if (query->group_sets == NULL || query->group_sets->count == 0) { + return CT_SUCCESS; + } + group_set_t *group_set = NULL; + CT_RETURN_IFERR(cm_concat_var_string(result, " GROUP BY ")); + if (query->group_sets->count == 1) { + group_set = (group_set_t *)cm_galist_get(query->group_sets, 0); + return ctsql_unparse_expr_tree_list(query, group_set->items, result); + } + CT_RETURN_IFERR(cm_concat_var_string(result, "GROUPING SETS(")); + for (uint32 i = 0; i < query->group_sets->count; i++) { + group_set = (group_set_t *)cm_galist_get(query->group_sets, i); + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_expr_tree_list(query, group_set->items, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + if (i < query->group_sets->count - 1) { + CT_RETURN_IFERR(cm_concat_var_string(result, ",")); + } + } + return cm_concat_var_string(result, ")"); +} + +status_t ctsql_unparse_query_having(sql_query_t *query, var_text_t *result) +{ + if (query->having_cond == NULL) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " HAVING ")); + return ctsql_unparse_cond_node(query, query->having_cond->root, CT_FALSE, result); +} + +status_t ctsql_unparse_query_sort(sql_query_t *query, var_text_t *result) +{ + if (query->sort_items == NULL || query->sort_items->count == 0) { + return CT_SUCCESS; + } + sort_item_t *item = NULL; + CT_RETURN_IFERR(cm_concat_var_string(result, " ORDER BY ")); + for (uint32 i = 0; i < query->sort_items->count; i++) { + item = (sort_item_t *)cm_galist_get(query->sort_items, i); + CT_RETURN_IFERR(ctsql_unparse_expr_node(query, item->expr->root, result, CT_FALSE)); + if (item->direction == SORT_MODE_DESC) { + CT_RETURN_IFERR(cm_concat_var_string(result, " DESC")); + } + if (i < query->sort_items->count - 1) { + CT_RETURN_IFERR(cm_concat_var_string(result, ",")); + } + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_query_limit(sql_query_t *query, var_text_t *result) +{ + if (query->limit.count == NULL) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " LIMIT ")); + expr_tree_t *count = (expr_tree_t *)query->limit.count; + expr_tree_t *offset = (expr_tree_t *)query->limit.offset; + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, count, result)); + if (offset == NULL) { + return CT_SUCCESS; + } + CT_RETURN_IFERR(cm_concat_var_string(result, " OFFSET ")); + return ctsql_unparse_expr_tree(query, offset, result); +} + +static ctsql_unparse_query g_unparse_query[] = { + ctsql_unparse_query_cols, + ctsql_unparse_query_from, + ctsql_unparse_query_where, + ctsql_unparse_query_group, + ctsql_unparse_query_having, + ctsql_unparse_query_sort, + ctsql_unparse_query_limit +}; + +status_t ctsql_unparse_query_info(sql_query_t *query, var_text_t *result) +{ + if (query == NULL) { + CT_LOG_RUN_ERR("[UNPARSE] the query is null"); + return CT_ERROR; + } + CT_RETURN_IFERR(cm_concat_var_string(result, "SELECT")); + uint32 count = sizeof(g_unparse_query) / sizeof(ctsql_unparse_query); + for (uint32 i = 0; i < count; i++) { + CT_RETURN_IFERR(g_unparse_query[i](query, result)); + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_select_type(select_node_type_t type, var_text_t *result) +{ + char *str = NULL; + if (type == SELECT_NODE_UNION) { + str = " UNION "; + } else if (type == SELECT_NODE_UNION_ALL) { + str = " UNION ALL "; + } else if (type == SELECT_NODE_MINUS) { + str = " MINUS "; + } else if (type == SELECT_NODE_INTERSECT) { + str = " INTERSECT "; + } else if (type == SELECT_NODE_INTERSECT_ALL) { + str = " INTERSECT ALL "; + } else if (type == SELECT_NODE_EXCEPT) { + str = " EXCEPT "; + } else if (type == SELECT_NODE_EXCEPT_ALL) { + str = " EXCEPT ALL "; + } else { + CT_THROW_ERROR(ERR_UNSUPPORT_OPER_TYPE, "set", type); + return CT_ERROR; + } + return cm_concat_var_string(result, str); +} + +status_t ctsql_unparse_opt_info(select_node_t *sel_root, var_text_t *result) +{ + CT_RETURN_IFERR(ctsql_unparse_select_info(sel_root->left, result, CT_TRUE)); + CT_RETURN_IFERR(ctsql_unparse_select_type(sel_root->type, result)); + return ctsql_unparse_select_info(sel_root->right, result, CT_TRUE); +} + +status_t ctsql_unparse_select_info(select_node_t *sel_root, var_text_t *result, bool32 add_brkt) +{ + if (sel_root->type == SELECT_NODE_QUERY) { + if (!add_brkt) { + return ctsql_unparse_query_info(sel_root->query, result); + } + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_query_info(sel_root->query, result)); + return cm_concat_var_string(result, ")"); + } + return ctsql_unparse_opt_info(sel_root, result); +} + +status_t ctsql_unparse_select_node(expr_node_t *expr, var_text_t *result) +{ + sql_select_t *select_context = (sql_select_t *)expr->value.v_obj.ptr; + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_select_info(select_context->root, result, CT_FALSE)); + return cm_concat_var_string(result, ")"); +} + +status_t ctsql_unparse_seq_mode(seq_mode_t mode, var_text_t *result) +{ + if (mode == SEQ_CURR_VALUE) { + return cm_concat_var_string(result, "CURRVAL"); + } else if (mode == SEQ_NEXT_VALUE) { + return cm_concat_var_string(result, "NEXTVAL"); + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_seq_node(expr_node_t *expr, var_text_t *result) +{ + var_seq_t *seq_var = &expr->value.v_seq; + if (seq_var->user.len != 0) { + CT_RETURN_IFERR(cm_concat_n_var_string(result, seq_var->user.str, seq_var->user.len)); + CT_RETURN_IFERR(cm_concat_var_string(result, ".")); + } + CT_RETURN_IFERR(cm_concat_n_var_string(result, seq_var->name.str, seq_var->name.len)); + CT_RETURN_IFERR(cm_concat_var_string(result, ".")); + return ctsql_unparse_seq_mode(seq_var->mode, result); +} + +status_t ctsql_unparse_case_node(sql_query_t *query, expr_node_t *expr, var_text_t *result) +{ + case_expr_t *case_expr = (case_expr_t *)(VALUE(pointer_t, &expr->value)); + if (case_expr == NULL) { + return CT_SUCCESS; + } + case_pair_t *case_pair = NULL; + CT_RETURN_IFERR(cm_concat_var_string(result, "CASE ")); + if (!case_expr->is_cond) { + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, case_expr->expr, result)); + } + for (uint32 i = 0; i < case_expr->pairs.count; i++) { + case_pair = (case_pair_t *)cm_galist_get(&case_expr->pairs, i); + CT_RETURN_IFERR(cm_concat_var_string(result, " WHEN ")); + + if (case_expr->is_cond) { + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, case_pair->when_cond->root, CT_FALSE, result)); + } else { + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, case_pair->when_expr, result)); + } + + CT_RETURN_IFERR(cm_concat_var_string(result, " THEN ")); + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, case_pair->value, result)); + } + if (case_expr->default_expr == NULL) { + return cm_concat_var_string(result, " END"); + } + CT_RETURN_IFERR(cm_concat_var_string(result, " ELSE ")); + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, case_expr->default_expr, result)); + return cm_concat_var_string(result, " END"); +} + +status_t ctsql_unparse_negative_node(sql_query_t *query, expr_node_t *expr, var_text_t *result, bool32 table_unparsed) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, "-")); + return ctsql_unparse_expr_node(query, expr->right, result, table_unparsed); +} + +status_t ctsql_unparse_array_node(sql_query_t *query, expr_node_t *expr, var_text_t *result) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, "ARRAY[")); + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, expr->argument, result)); + return cm_concat_var_string(result, "]"); +} + +status_t ctsql_unparse_user_func_args(sql_query_t *query, expr_node_t *expr, var_text_t *result) +{ + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_node_func_args(query, expr->argument, CT_INVALID_ID32, result)); + return cm_concat_var_string(result, ")"); +} + +status_t ctsql_unparse_user_func_node(sql_query_t *query, expr_node_t *expr, var_text_t *result) +{ + func_word_t *func = &expr->word.func; + if (func->user.len != 0) { + CT_RETURN_IFERR(cm_concat_n_var_string(result, func->user.str, func->user.len)); + CT_RETURN_IFERR(cm_concat_var_string(result, ".")); + } + if (func->pack.len != 0) { + CT_RETURN_IFERR(cm_concat_n_var_string(result, func->pack.str, func->pack.len)); + CT_RETURN_IFERR(cm_concat_var_string(result, ".")); + } + CT_RETURN_IFERR(cm_concat_n_var_string(result, func->name.str, func->name.len)); + return ctsql_unparse_user_func_args(query, expr, result); +} + +status_t ctsql_unparse_expr_not_operation(sql_query_t *query, expr_node_t *expr, var_text_t *result, + bool32 table_unparsed) +{ + switch (expr->type) { + case EXPR_NODE_PRIOR: + return ctsql_unparse_prior_node(query, expr, result, table_unparsed); + case EXPR_NODE_CONST: + return CT_SUCCESS; // TODO + case EXPR_NODE_FUNC: + case EXPR_NODE_PROC: + return ctsql_unparse_node_func(query, expr, result, table_unparsed); + case EXPR_NODE_PARAM: + case EXPR_NODE_CSR_PARAM: + return cm_concat_var_string(result, "?"); + case EXPR_NODE_COLUMN: + return CT_SUCCESS; // TODO + case EXPR_NODE_STAR: + return cm_concat_var_string(result, "*"); + case EXPR_NODE_RESERVED: + return ctsql_unparse_reserved_node(expr, result); + case EXPR_NODE_SELECT: + return ctsql_unparse_select_node(expr, result); + case EXPR_NODE_SEQUENCE: + return ctsql_unparse_seq_node(expr, result); + case EXPR_NODE_CASE: + return ctsql_unparse_case_node(query, expr, result); + case EXPR_NODE_GROUP: + return CT_SUCCESS; // TODO + case EXPR_NODE_AGGR: + return CT_SUCCESS; // TODO + case EXPR_NODE_USER_FUNC: + return ctsql_unparse_user_func_node(query, expr, result); + case EXPR_NODE_OVER: + return CT_SUCCESS; // TODO + case EXPR_NODE_TRANS_COLUMN: + return CT_SUCCESS; + case EXPR_NODE_NEGATIVE: + return ctsql_unparse_negative_node(query, expr, result, table_unparsed); + case EXPR_NODE_ARRAY: + return ctsql_unparse_array_node(query, expr, result); + default: + CT_THROW_ERROR(ERR_CAPABILITY_NOT_SUPPORT, "expression not in type list"); + return CT_ERROR; + } +} + +status_t ctsql_unparse_expr_node(sql_query_t *query, expr_node_t *expr, var_text_t *result, bool32 table_unparsed) +{ + if (expr->type >= EXPR_NODE_MUL && expr->type <= EXPR_NODE_CAT) { + return ctsql_unparse_expr_operation(query, expr, result, table_unparsed); + } + return ctsql_unparse_expr_not_operation(query, expr, result, table_unparsed); +} + +status_t ctsql_unparse_expr_tree(sql_query_t *query, expr_tree_t *expr, var_text_t *result) +{ + // the first expr node + CT_RETURN_IFERR(ctsql_unparse_expr_node(query, expr->root, result, CT_FALSE)); + expr = expr->next; + // the next expr nodes + while (expr != NULL) { + CT_RETURN_IFERR(cm_concat_var_string(result, ",")); + CT_RETURN_IFERR(ctsql_unparse_expr_node(query, expr->root, result, CT_FALSE)); + expr = expr->next; + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_expr_tree_list(sql_query_t *query, galist_t *list, var_text_t *result) +{ + for (uint32 i = 0; i < list->count; i++) { + expr_tree_t *expr_tree = (expr_tree_t *)cm_galist_get(list, i); + if (NODE_IS_RES_NULL(expr_tree->root)) { + continue; + } + CT_RETURN_IFERR(ctsql_unparse_expr_tree(query, expr_tree, result)); + if (i < list->count - 1) { + CT_RETURN_IFERR(cm_concat_var_string(result, ",")); + } + } + return CT_SUCCESS; +} + +const char *ctsql_get_args_devider(uint32 func_id) +{ + if (func_id == ID_FUNC_ITEM_EXTRACT) { + return " FROM "; + } else if (func_id == ID_FUNC_ITEM_CAST) { + return " AS "; + } else { + return ", "; + } +} + +status_t ctsql_unparse_node_func_args(sql_query_t *query, expr_tree_t *args, uint32 func_id, var_text_t *result) +{ + while (args != NULL) { + CT_RETURN_IFERR(ctsql_unparse_expr_node(query, args->root, result, CT_FALSE)); + if (args->next != NULL) { + CT_RETURN_IFERR(cm_concat_var_string(result, ctsql_get_args_devider(func_id))); + } + args = args->next; + } + return CT_SUCCESS; +} + +static status_t ctsql_unparse_func_if(sql_query_t *query, expr_node_t *node, sql_func_t *func, var_text_t *result) +{ + char func_name[CT_NAME_BUFFER_SIZE] = {0}; + cm_str_to_upper(func->name.str, func_name); + CT_RETURN_IFERR(cm_concat_var_string(result, func_name)); + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, node->cond_arg->root, CT_FALSE, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ", ")); + CT_RETURN_IFERR(ctsql_unparse_node_func_args(query, node->argument, func->builtin_func_id, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + + return CT_SUCCESS; +} + +static status_t ctsql_unparse_func_lnnvl(sql_query_t *query, expr_node_t *node, sql_func_t *func, var_text_t *result) +{ + char func_name[CT_NAME_BUFFER_SIZE] = {0}; + cm_str_to_upper(func->name.str, func_name); + CT_RETURN_IFERR(cm_concat_var_string(result, func_name)); + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, node->cond_arg->root, CT_FALSE, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + + return CT_SUCCESS; +} + +static status_t ctsql_unparse_func_group_concat(sql_query_t *query, expr_node_t *node, sql_func_t *func, + var_text_t *result) +{ + char func_name[CT_NAME_BUFFER_SIZE] = {0}; + cm_str_to_upper(func->name.str, func_name); + CT_RETURN_IFERR(cm_concat_var_string(result, func_name)); + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_node_func_args(query, node->argument->next, func->builtin_func_id, result)); + if (node->sort_items != NULL) { + CT_RETURN_IFERR(cm_concat_var_string(result, " ORDER BY ")); + // TODO, sort items + } + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + + return CT_SUCCESS; +} + +static status_t ctsql_unparse_func_default(sql_query_t *query, expr_node_t *node, sql_func_t *func, var_text_t *result) +{ + char func_name[CT_NAME_BUFFER_SIZE] = {0}; + cm_str_to_upper(func->name.str, func_name); + CT_RETURN_IFERR(cm_concat_var_string(result, func_name)); + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + CT_RETURN_IFERR(ctsql_unparse_node_func_args(query, node->argument, func->builtin_func_id, result)); + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + + return CT_SUCCESS; +} + +status_t ctsql_unparse_node_func(sql_query_t *query, expr_node_t *node, var_text_t *result, bool32 table_unparsed) +{ + sql_func_t *func = sql_get_func(&node->value.v_func); + switch (func->builtin_func_id) { + case ID_FUNC_ITEM_IF: + return ctsql_unparse_func_if(query, node, func, result); + case ID_FUNC_ITEM_LNNVL: + return ctsql_unparse_func_lnnvl(query, node, func, result); + case ID_FUNC_ITEM_GROUP_CONCAT: + return ctsql_unparse_func_group_concat(query, node, func, result); + default: + return ctsql_unparse_func_default(query, node, func, result); + } + + return CT_SUCCESS; +} + +status_t ctsql_unparse_in_expr(sql_query_t *query, expr_tree_t *expr, uint32 len, var_text_t *result) +{ + // TODO + return CT_SUCCESS; +} + +status_t ctsql_unparse_hash_exprs(sql_query_t *query, galist_t *left_exprs, galist_t *right_exprs, var_text_t *result) +{ + return CT_SUCCESS; +} + +status_t ctsql_unparse_connect_mtrl_join_node(sql_query_t *query, plan_node_t *plan, var_text_t *result) +{ + galist_t *prior_exprs = plan->cb_mtrl.prior_exprs; + galist_t *key_exprs = plan->cb_mtrl.key_exprs; + + CT_RETURN_IFERR(ctsql_unparse_hash_exprs(query, prior_exprs, key_exprs, result)); + + cond_tree_t *cond = plan->cb_mtrl.connect_by_cond; + if (cond == NULL || cond->root->type == COND_NODE_TRUE) { + return CT_SUCCESS; + } + + CT_RETURN_IFERR(cm_concat_var_string(result, " AND ")); + return ctsql_unparse_cond_node(query, cond, CT_FALSE, result); +} + +status_t ctsql_unparse_hash_mtrl_node(sql_query_t *query, plan_node_t *plan, var_text_t *result) +{ + galist_t *left_exprs = plan->hash_mtrl.group.exprs; + galist_t *right_exprs = plan->hash_mtrl.remote_keys; + + return ctsql_unparse_hash_exprs(query, left_exprs, right_exprs, result); +} + +status_t ctsql_unparse_cond_unknown(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + return CT_SUCCESS; +} + +status_t ctsql_unparse_cond_compare(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + return CT_SUCCESS; +} + +status_t ctsql_unparse_cond_or(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + // left round brackets + if (add_rnd_brkt) { + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + } + // left node + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, cond->left, (cond->left->type == COND_NODE_AND), result)); + // OR symbol + CT_RETURN_IFERR(cm_concat_var_string(result, " OR ")); + // right node + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, cond->right, (cond->right->type == COND_NODE_AND), result)); + // right round brackets + if (add_rnd_brkt) { + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_cond_and(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + // left round brackets + if (add_rnd_brkt) { + CT_RETURN_IFERR(cm_concat_var_string(result, "(")); + } + // left node + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, cond->left, (cond->left->type == COND_NODE_OR), result)); + // OR symbol + CT_RETURN_IFERR(cm_concat_var_string(result, " AND ")); + // right node + CT_RETURN_IFERR(ctsql_unparse_cond_node(query, cond->right, (cond->right->type == COND_NODE_OR), result)); + // right round brackets + if (add_rnd_brkt) { + CT_RETURN_IFERR(cm_concat_var_string(result, ")")); + } + return CT_SUCCESS; +} + +status_t ctsql_unparse_cond_not(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + return CT_SUCCESS; +} + +status_t ctsql_unparse_cond_true(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + return cm_concat_var_string(result, "NULL IS NOT NULL"); +} + +status_t ctsql_unparse_cond_false(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + return cm_concat_var_string(result, "TRUE"); +} + +static cond_unparser_t g_unparse_conds[] = {{COND_NODE_UNKNOWN, ctsql_unparse_cond_unknown}, + {COND_NODE_COMPARE, ctsql_unparse_cond_compare}, + {COND_NODE_OR, ctsql_unparse_cond_or}, + {COND_NODE_AND, ctsql_unparse_cond_and}, + {COND_NODE_NOT, ctsql_unparse_cond_not}, + {COND_NODE_TRUE, ctsql_unparse_cond_true}, + {COND_NODE_FALSE, ctsql_unparse_cond_false}}; + +status_t ctsql_unparse_cond_node(sql_query_t *query, cond_node_t *cond, bool32 add_rnd_brkt, var_text_t *result) +{ + if (cond == NULL || result == NULL) { + return CT_ERROR; + } + if (cond->type >= sizeof(g_unparse_conds) / sizeof(cond_unparser_t)) { + // TODO: add log + return CT_ERROR; + } + CM_ASSERT(cond->type == g_unparse_conds[cond->type].type); + return g_unparse_conds[cond->type].cond_unparse_func(query, cond, add_rnd_brkt, result); +} diff --git a/pkg/src/ctsql/parser/ctsql_unparser.h b/pkg/src/ctsql/parser/ctsql_unparser.h new file mode 100644 index 0000000000000000000000000000000000000000..1167ada05f8be5feddb89c540dc784055fa0d21a --- /dev/null +++ b/pkg/src/ctsql/parser/ctsql_unparser.h @@ -0,0 +1,56 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ctsql_unparser.h + * + * + * IDENTIFICATION + * src/ctsql/parser/ctsql_unparser.h + * + * ------------------------------------------------------------------------- + */ +#ifndef __CTSQL_UNPARSER_H__ +#define __CTSQL_UNPARSER_H__ + +#include "cm_defs.h" +#include "cm_lex.h" +#include "ctsql_cond.h" +#include "ctsql_stmt.h" +#include "ctsql_expr_def.h" +#include "ctsql_winsort.h" +#include "ctsql_plan.h" + +#define DEFAULT_UNPARSE_STR_LEN 1024 + +typedef status_t (*cond_unparse_func_t)(sql_query_t *query, cond_node_t *cond_node, bool32 add_rnd_brkt, var_text_t *result); + +typedef struct st_cond_unparser { + cond_node_type_t type; + cond_unparse_func_t cond_unparse_func; +} cond_unparser_t; + +typedef status_t (*ctsql_unparse_stmt)(sql_stmt_t *stmt, var_text_t *result); + +typedef struct st_ctsql_unparser { + lang_type_t type; + ctsql_unparse_stmt unparse_stmt_func; +} ctsql_unparser_t; + +status_t ctsql_unparse_hash_mtrl_node(sql_query_t *query, plan_node_t *plan, var_text_t *result); +status_t ctsql_unparse_connect_mtrl_join_node(sql_query_t *query, plan_node_t *plan, var_text_t *result); +status_t ctsql_unparse_cond_node(sql_query_t *query, cond_node_t *cond_node, bool32 add_rnd_brkt, var_text_t *result); + +#endif diff --git a/pkg/src/ctsql/parser/dcl_parser.c b/pkg/src/ctsql/parser/dcl_parser.c index d38f4ec875ef0449b036a789263ec7d95a0a4073..a9b5241b14ae7ff9fadd32888a2c1a4556628758 100644 --- a/pkg/src/ctsql/parser/dcl_parser.c +++ b/pkg/src/ctsql/parser/dcl_parser.c @@ -915,9 +915,10 @@ status_t sql_parse_route(sql_stmt_t *stmt) } #endif -status_t sql_parse_dcl(sql_stmt_t *stmt, key_wid_t key_wid) +status_t sql_parse_dcl(sql_stmt_t *stmt, word_t *leader_word) { status_t status; + key_wid_t key_wid = leader_word->id; stmt->session->sql_audit.audit_type = SQL_AUDIT_DCL; status = sql_alloc_context(stmt); diff --git a/pkg/src/ctsql/parser/dcl_parser.h b/pkg/src/ctsql/parser/dcl_parser.h index 76d5732b133e54fdb310d91fa50f31cf43471338..da74c057662b14c33479533b7a4fe7f1ca5e2339 100644 --- a/pkg/src/ctsql/parser/dcl_parser.h +++ b/pkg/src/ctsql/parser/dcl_parser.h @@ -29,7 +29,7 @@ extern "C" { #endif -status_t sql_parse_dcl(sql_stmt_t *stmt, key_wid_t key_wid); +status_t sql_parse_dcl(sql_stmt_t *stmt, word_t *leader_word); #ifdef __cplusplus } diff --git a/pkg/src/ctsql/parser/dml_parser.c b/pkg/src/ctsql/parser/dml_parser.c index 9c927bd89132231570695f1f5a2430ba6658bb88..12246b2479f909ef4e85261cfe94a011a502f179 100644 --- a/pkg/src/ctsql/parser/dml_parser.c +++ b/pkg/src/ctsql/parser/dml_parser.c @@ -1,6 +1,6 @@ /* ------------------------------------------------------------------------- * This file is part of the Cantian project. - * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * Copyright (c) 2025 Huawei Technologies Co.,Ltd. * * Cantian is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -48,7 +48,6 @@ #include "ctsql_replace_parser.h" #include "ctsql_merge_parser.h" - #ifdef __cplusplus extern "C" { #endif @@ -113,9 +112,13 @@ static status_t sql_create_dml_context(sql_stmt_t *stmt, sql_text_t *sql, key_wi status_t sql_create_dml(sql_stmt_t *stmt, sql_text_t *sql, key_wid_t key_wid) { CT_RETURN_IFERR(sql_create_dml_context(stmt, sql, key_wid)); + CT_RETURN_IFERR(sql_verify(stmt)); + check_table_stats(stmt); + CT_RETURN_IFERR(ctsql_optimize_logically(stmt)); + return sql_create_dml_plan(stmt); } @@ -220,7 +223,7 @@ void sql_parse_set_context_procinfo(sql_stmt_t *stmt) } } -void sql_enrich_context_for_uncached(sql_stmt_t *stmt, timeval_t *timeval_begin) +void sql_update_context_stat_uncached(sql_stmt_t *stmt, timeval_t *timeval_begin) { timeval_t timeval_end; sql_init_context_stat(&stmt->context->stat); @@ -236,7 +239,7 @@ void sql_enrich_context_for_uncached(sql_stmt_t *stmt, timeval_t *timeval_begin) sql_parse_set_context_procinfo(stmt); if (stmt->context->ctrl.memory != NULL) { cm_atomic_add(&g_instance->library_cache_info[stmt->lang_type].pins, - (int64)stmt->context->ctrl.memory->pages.count); + (int64)stmt->context->ctrl.memory->pages.count); cm_atomic_inc(&g_instance->library_cache_info[stmt->lang_type].reloads); } } @@ -254,7 +257,7 @@ status_t sql_parse_dml_directly(sql_stmt_t *stmt, key_wid_t key_wid, sql_text_t CT_RETURN_IFERR(sql_create_dml_currently(stmt, sql_text, key_wid)); - sql_enrich_context_for_uncached(stmt, &timeval_begin); + sql_update_context_stat_uncached(stmt, &timeval_begin); return CT_SUCCESS; } @@ -331,14 +334,33 @@ status_t sql_parse_anonymous_directly(sql_stmt_t *stmt, word_t *leader, sql_text stmt->context->ctrl.ref_count = 0; if (stmt->context->ctrl.memory != NULL) { cm_atomic_add(&g_instance->library_cache_info[stmt->lang_type].pins, - (int64)stmt->context->ctrl.memory->pages.count); + (int64)stmt->context->ctrl.memory->pages.count); cm_atomic_inc(&g_instance->library_cache_info[stmt->lang_type].reloads); } return CT_SUCCESS; } -status_t sql_parse_dml(sql_stmt_t *stmt, key_wid_t key_wid) +void sql_update_context_stat_cached(sql_stmt_t *stmt, timeval_t *tv_beg, ctx_stat_t *old_stat) +{ + timeval_t timeval_end; + sql_context_t *ctx = stmt->context; + sql_init_context_stat(&ctx->stat); + sql_parse_set_context_procinfo(stmt); + // check if can inherit + if (old_stat->parse_calls) { + ctx->stat = *old_stat; + } + ctx->stat.parse_calls = 1; + + ctx->stat.last_load_time = g_timer()->now; + (void)cm_gettimeofday(&timeval_end); + ctx->stat.parse_time = (uint64)TIMEVAL_DIFF_US(tv_beg, &timeval_end); + ctx->module_kind = SESSION_CLIENT_KIND(stmt->session); +} + +status_t sql_parse_dml(sql_stmt_t *stmt, word_t *leader_word) { + key_wid_t key_wid = leader_word->id; CT_LOG_DEBUG_INF("Begin parse DML, SQL = %s", T2S(&stmt->session->lex->text.value)); cm_atomic_inc(&g_instance->library_cache_info[stmt->lang_type].hits); // maybe need load entity from proc$ @@ -346,12 +368,67 @@ status_t sql_parse_dml(sql_stmt_t *stmt, key_wid_t key_wid) stmt->session->sql_audit.audit_type = SQL_AUDIT_DML; uint32 special_word = sql_has_special_word(stmt, &stmt->session->lex->text.value); - CT_RETURN_IFERR(sql_parse_dml_directly(stmt, key_wid, &stmt->session->lex->text)); + if (SQL_HAS_NONE != special_word || stmt->session->disable_soft_parse) { + CT_RETURN_IFERR(sql_parse_dml_directly(stmt, key_wid, &stmt->session->lex->text)); + } else { + // find sql from cache first. + uint32 hashval; + context_bucket_t *ctx_bucket = NULL; + ctx_stat_t stat; + stat.parse_calls = 0; // check if resue the stat. + text_t *ctsql = (text_t *)&stmt->session->lex->text; + if (ctsql_get_context_from_cache(stmt, ctsql, &hashval, &ctx_bucket, &stat) == CT_SUCCESS) { + stmt->context->has_ltt = (special_word & SQL_HAS_LTT); + return CT_SUCCESS; + } + // can not find sql from sql cache, so parse sql. + if (sql_alloc_context(stmt)) { + CT_LOG_DEBUG_ERR("Failed to alloc sql context, SQL = %s.", T2S(ctsql)); + return CT_ERROR; + } + + sql_context_t *ctx = stmt->context; + ctx->ctrl.uid = stmt->session->curr_schema_id; + ctx->ctrl.hash_value = hashval; + ctx->ctrl.bucket = ctx_bucket; + sql_init_plan_count(stmt); + + timeval_t tv_beg; + (void)cm_gettimeofday(&tv_beg); + if (sql_create_dml_currently(stmt, (sql_text_t *)ctsql, key_wid)) { + return CT_ERROR; + } + + CM_ASSERT(ctx->cacheable); + sql_update_context_stat_cached(stmt, &tv_beg, &stat); + status_t ret = ctsql_cache_sql_context(stmt, ctx_bucket, (sql_text_t *)ctsql, hashval); + if (ret != CT_SUCCESS) { + CT_LOG_DEBUG_ERR("Failed to cache sql context, SQL = %s.", T2S(ctsql)); + return CT_ERROR; + } + } stmt->context->has_ltt = (special_word & SQL_HAS_LTT); return CT_SUCCESS; } +status_t sql_parse_explain(sql_stmt_t *stmt, word_t *leader_word) +{ + lex_t *lex = stmt->session->lex; + sql_text_t *sql = lex->curr_text; + + CT_RETURN_IFERR(lex_skip_comments(lex, NULL)); + + stmt->lang_type = sql_diag_lang_type(stmt, sql, leader_word); + if (stmt->lang_type != LANG_DML) { + CT_LOG_DEBUG_ERR("the type: %d can not explain", stmt->lang_type); + return CT_ERROR; + } + status_t ret = sql_parse_dml(stmt, leader_word); + stmt->is_explain = CT_TRUE; + return ret; +} + status_t sql_create_rowid_rs_column(sql_stmt_t *stmt, uint32 id, sql_table_type_t type, galist_t *list) { rs_column_t *rs_column = NULL; @@ -414,7 +491,7 @@ status_t sql_parse_view_subselect(sql_stmt_t *stmt, text_t *sql, sql_select_t ** * set the stmt schema info */ status_t sql_set_schema(sql_stmt_t *stmt, text_t *set_schema, uint32 set_schema_id, char *save_schema, - uint32 save_schema_maxlen, uint32 *save_schema_id) + uint32 save_schema_maxlen, uint32 *save_schema_id) { uint32 len; @@ -437,7 +514,6 @@ status_t sql_set_schema(sql_stmt_t *stmt, text_t *set_schema, uint32 set_schema_ return CT_SUCCESS; } - static bool32 sql_get_view_object_addr(object_address_t *depended, knl_dictionary_t *view_dc, text_t *name) { depended->uid = view_dc->uid; @@ -455,7 +531,7 @@ static bool32 sql_get_view_object_addr(object_address_t *depended, knl_dictionar /* update the dependency info of this view in sys_dependency table */ static void sql_update_view_dependencies(sql_stmt_t *stmt, knl_dictionary_t *view_dc, galist_t *ref_list, - object_address_t depender, bool32 *is_valid) + object_address_t depender, bool32 *is_valid) { bool32 is_successed = CT_FALSE; knl_session_t *session = KNL_SESSION(stmt); @@ -526,7 +602,7 @@ bool32 sql_compile_view_sql(sql_stmt_t *stmt, knl_dictionary_t *view_dc, text_t * This function is used to recompile a view. */ static bool32 sql_compile_view(sql_stmt_t *stmt, text_t *owner, text_t *name, knl_dictionary_t *view_dc, - bool32 update_dep) + bool32 update_dep) { bool32 is_valid; object_address_t depender; @@ -562,7 +638,7 @@ static bool32 sql_compile_view(sql_stmt_t *stmt, text_t *owner, text_t *name, kn } static object_status_t sql_check_synonym_object_valid(sql_stmt_t *stmt, text_t *owner_name, text_t *table_name, - object_address_t *p_obj) + object_address_t *p_obj) { object_status_t obj_status = OBJ_STATUS_VALID; knl_dictionary_t dc; @@ -573,8 +649,8 @@ static object_status_t sql_check_synonym_object_valid(sql_stmt_t *stmt, text_t * } if (dc.type == DICT_TYPE_VIEW) { - obj_status = - sql_compile_view(stmt, owner_name, table_name, &dc, CT_FALSE) ? OBJ_STATUS_VALID : OBJ_STATUS_INVALID; + obj_status = sql_compile_view(stmt, owner_name, table_name, &dc, CT_FALSE) ? OBJ_STATUS_VALID + : OBJ_STATUS_INVALID; cm_reset_error(); } else { obj_status = OBJ_STATUS_VALID; @@ -598,7 +674,7 @@ static object_status_t sql_check_synonym_object_valid(sql_stmt_t *stmt, text_t * } static object_status_t sql_check_pl_synonym_object_valid(sql_stmt_t *stmt, text_t *owner_name, text_t *table_name, - object_address_t *obj_addr, object_type_t syn_type) + object_address_t *obj_addr, object_type_t syn_type) { object_status_t obj_status = OBJ_STATUS_VALID; pl_dc_t dc = { 0 }; @@ -657,7 +733,7 @@ static status_t sql_make_object_address(knl_cursor_t *cursor, object_address_t * } static status_t sql_check_current_synonym(sql_stmt_t *stmt, knl_session_t *session, knl_cursor_t *cursor, - bool32 compile_all, uint32 uid) + bool32 compile_all, uint32 uid) { object_address_t d_obj, p_obj; object_status_t old_status, new_status; @@ -736,10 +812,10 @@ status_t sql_compile_synonym_by_user(sql_stmt_t *stmt, text_t *schema_name, bool knl_open_sys_cursor(session, cursor, CURSOR_ACTION_SELECT, SYS_SYN_ID, 0); knl_init_index_scan(cursor, CT_FALSE); knl_set_scan_key(INDEX_DESC(cursor->index), &cursor->scan_range.l_key, CT_TYPE_INTEGER, (void *)&uid, - sizeof(uint32), 0); + sizeof(uint32), 0); knl_set_key_flag(&cursor->scan_range.l_key, SCAN_KEY_LEFT_INFINITE, 1); knl_set_scan_key(INDEX_DESC(cursor->index), &cursor->scan_range.r_key, CT_TYPE_INTEGER, (void *)&uid, - sizeof(uint32), 0); + sizeof(uint32), 0); knl_set_key_flag(&cursor->scan_range.r_key, SCAN_KEY_RIGHT_INFINITE, 1); while (1) { @@ -834,10 +910,10 @@ status_t sql_compile_view_by_user(sql_stmt_t *stmt, text_t *schema_name, bool32 knl_open_sys_cursor(session, cursor, CURSOR_ACTION_SELECT, SYS_VIEW_ID, 0); knl_init_index_scan(cursor, CT_FALSE); knl_set_scan_key(INDEX_DESC(cursor->index), &cursor->scan_range.l_key, CT_TYPE_INTEGER, (void *)&uid, - sizeof(uint32), 0); + sizeof(uint32), 0); knl_set_key_flag(&cursor->scan_range.l_key, SCAN_KEY_LEFT_INFINITE, 1); knl_set_scan_key(INDEX_DESC(cursor->index), &cursor->scan_range.r_key, CT_TYPE_INTEGER, (void *)&uid, - sizeof(uint32), 0); + sizeof(uint32), 0); knl_set_key_flag(&cursor->scan_range.r_key, SCAN_KEY_RIGHT_INFINITE, 1); if (knl_fetch(session, cursor) != CT_SUCCESS) { @@ -868,6 +944,162 @@ status_t sql_compile_view_by_user(sql_stmt_t *stmt, text_t *schema_name, bool32 return status; } +static inline void ctsql_update_stat_hit(sql_context_t *ctx, uint8 type) +{ + memory_context_t *memory = ctx->ctrl.memory; + if (memory) { + st_library_cache_t *libcache = &g_instance->library_cache_info[type]; + cm_atomic_inc(&libcache->gethits); + cm_atomic_add(&libcache->pinhits, (int64)memory->pages.count); + } +} + +static inline void ctsql_set_ctx_ctrl_after_check(sql_context_t *ctx, uint8 type) +{ + if (ctx->in_sql_pool) { + ctx_pool_lru_move_to_head(sql_pool, &ctx->ctrl); + } + + ctsql_update_stat_hit(ctx, type); +} + +bool32 ctsql_check_sequences(sql_stmt_t *stmt) +{ + sql_context_t *ctx = stmt->context; + galist_t *sequences = ctx->sequences; + galist_t *objs = ctx->ref_objects; + if (!sequences || sequences->count == 0 || !objs) { + return CT_TRUE; + } + + uint32 i = 0; + object_address_t *obj; + while (i < objs->count) { + obj = (object_address_t *)cm_galist_get(objs, i++); + if (obj->tid != OBJ_TYPE_SEQUENCE) { + continue; + } + + if (!knl_chk_seq_entry(KNL_SESSION(stmt), obj->uid, obj->oid, obj->scn)) { + return CT_FALSE; + } + } + + return CT_TRUE; +} + +bool32 ctsql_check_sql_ctx_changed(sql_stmt_t *stmt) +{ + sql_context_t *ctx = stmt->context; + if (ctx->policy_used) { + CT_LOG_DEBUG_INF("Disable soft parse because policy is used."); + return CT_FALSE; + } + if (!ctsql_check_sequences(stmt)) { + return CT_FALSE; + } + + // return CT_SUCCESS if check successfully. + if (sql_check_tables(stmt, ctx)) { + CT_LOG_DEBUG_INF("Cannot soft parse because failed to check table, stmtid=%u.", stmt->id); + return CT_FALSE; + } + + if (!sql_check_procedures(stmt, ctx->dc_lst)) { + CT_LOG_DEBUG_INF("Cannot soft parse because failed to check proc, stmtid=%u.", stmt->id); + return CT_FALSE; + } + + return CT_TRUE; +} + +status_t ctsql_get_context_from_cache(sql_stmt_t *stmt, text_t *ctsql, uint32 *ctsql_hash, context_bucket_t **bucketid, + ctx_stat_t *stat) +{ + uint32 hash_val = cm_hash_text(ctsql, INFINITE_HASH_RANGE); + context_bucket_t *ctx_bucket = &sql_pool->buckets[hash_val % CT_SQL_BUCKETS]; + uint16 sid = (uint16)KNL_SESSION(stmt)->id; + + cm_recursive_lock(sid, &ctx_bucket->parsing_lock, NULL); + uint32 schemaid = stmt->session->curr_schema_id; + sql_context_t *ctx = (sql_context_t *)ctx_pool_find(sql_pool, ctsql, hash_val, schemaid); + SET_STMT_CONTEXT(stmt, ctx); + + if (ctx) { + if (ctsql_check_sql_ctx_changed(stmt)) { + ctx->module_kind = SESSION_CLIENT_KIND(stmt->session); + ctx->stat.parse_calls++; + cm_recursive_unlock(&ctx_bucket->parsing_lock); + ctsql_set_ctx_ctrl_after_check(ctx, stmt->lang_type); + return CT_SUCCESS; + } else { + if (ctx->ctrl.ref_count == 1) { + // continue to use old stat because it is the same sql. + *stat = ctx->stat; + } + ctx->ctrl.valid = CT_FALSE; + sql_release_context(stmt); + } + } + cm_recursive_unlock(&ctx_bucket->parsing_lock); + *bucketid = ctx_bucket; + *ctsql_hash = hash_val; + return CT_ERROR; +} + +static inline void ctsql_update_stat_reload(sql_stmt_t *stmt) +{ + memory_context_t *memory = stmt->context->ctrl.memory; + uint8 type = stmt->lang_type; + if (memory) { + st_library_cache_t *libcache = &g_instance->library_cache_info[type]; + cm_atomic_inc(&libcache->reloads); + cm_atomic_add(&libcache->pins, (int64)memory->pages.count); + } + + stmt->session->stat.hard_parses++; +} + +static inline void ctsql_cache_sql_ctx_final_proc(sql_stmt_t *stmt) +{ + sql_context_t *ctx = stmt->context; + ctx_insert(sql_pool, (context_ctrl_t *)ctx); + ctx->in_sql_pool = CT_TRUE; + ctsql_update_stat_reload(stmt); +} + +status_t ctsql_cache_sql_context(sql_stmt_t *stmt, context_bucket_t *ctx_bucket, sql_text_t *ctsql, uint32 hash_val) +{ + uint16 sid = (uint16)KNL_SESSION(stmt)->id; + sql_context_t *new_ctx = stmt->context; + cm_recursive_lock(sid, &ctx_bucket->parsing_lock, NULL); + uint32 schemaid = stmt->session->curr_schema_id; + sql_context_t *ctx = (sql_context_t *)ctx_pool_find(sql_pool, (text_t *)ctsql, hash_val, schemaid); + if (ctx) { + SET_STMT_CONTEXT(stmt, ctx); + if (ctsql_check_sql_ctx_changed(stmt)) { + ctx->module_kind = SESSION_CLIENT_KIND(stmt->session); + ctx->stat.parse_calls++; + sql_free_context(new_ctx); + cm_recursive_unlock(&ctx_bucket->parsing_lock); + ctsql_update_stat_hit(ctx, stmt->lang_type); + return CT_SUCCESS; + } else { + // the old context is invalid, should be released. + ctx->ctrl.valid = CT_FALSE; + sql_release_context(stmt); + SET_STMT_CONTEXT(stmt, new_ctx); + } + } + ctx = stmt->context; + ctx->ctrl.ref_count = 1; + ctx_bucket_insert(ctx_bucket, (context_ctrl_t *)ctx); + cm_recursive_unlock(&ctx_bucket->parsing_lock); + + ctsql_cache_sql_ctx_final_proc(stmt); + return CT_SUCCESS; +} + #ifdef __cplusplus } #endif diff --git a/pkg/src/ctsql/parser/dml_parser.h b/pkg/src/ctsql/parser/dml_parser.h index 9241b4cdd5ebfc4d2500e1d8593d69d43291f6bb..87d08595ee72613b708af6b8cd3679241c7585c1 100644 --- a/pkg/src/ctsql/parser/dml_parser.h +++ b/pkg/src/ctsql/parser/dml_parser.h @@ -50,10 +50,11 @@ typedef enum en_sql_special_word { } sql_special_word_t; status_t sql_create_list(sql_stmt_t *stmt, galist_t **list); -status_t sql_parse_dml(sql_stmt_t *stmt, key_wid_t key_wid); +status_t sql_parse_dml(sql_stmt_t *stmt, word_t *leader_word); +status_t sql_parse_explain(sql_stmt_t *stmt, word_t *leader); status_t sql_parse_view_subselect(sql_stmt_t *stmt, text_t *sql, sql_select_t **select_ctx, source_location_t *loc); bool32 sql_has_ltt(sql_stmt_t *stmt, text_t *sql_text); -bool32 sql_check_ctx(sql_stmt_t *stmt, sql_context_t *ctx); +bool32 ctsql_check_sql_ctx_changed(sql_stmt_t *stmt); bool32 sql_check_procedures(sql_stmt_t *stmt, galist_t *dc_lst); status_t sql_compile_synonym_by_user(sql_stmt_t *stmt, text_t *schema_name, bool32 compile_all); status_t sql_compile_view_by_user(sql_stmt_t *stmt, text_t *schema_name, bool32 compile_all); @@ -61,8 +62,8 @@ status_t sql_parse_dml_directly(sql_stmt_t *stmt, key_wid_t key_wid, sql_text_t bool32 sql_compile_view_sql(sql_stmt_t *stmt, knl_dictionary_t *view_dc, text_t *owner); bool32 sql_check_equal_join_cond(join_cond_t *join_cond); -#define CTSQL_SAVE_PARSER(stmt) \ - CTSQL_SAVE_STACK(stmt); \ +#define CTSQL_SAVE_PARSER(stmt) \ + CTSQL_SAVE_STACK(stmt); \ sql_context_t *__context__ = (stmt)->context; \ void *__pl_context__ = (stmt)->pl_context; \ lang_type_t __lang_type__ = (stmt)->lang_type; @@ -72,7 +73,7 @@ bool32 sql_check_equal_join_cond(join_cond_t *join_cond); SET_STMT_CONTEXT(stmt, __context__); \ SET_STMT_PL_CONTEXT(stmt, __pl_context__); \ (stmt)->lang_type = __lang_type__; \ - CTSQL_RESTORE_STACK(stmt); \ + CTSQL_RESTORE_STACK(stmt); \ } while (0) #ifdef Z_SHARDING @@ -81,11 +82,14 @@ status_t shd_duplicate_origin_sql(sql_stmt_t *stmt, const text_t *origin_sql); status_t sql_create_rowid_rs_column(sql_stmt_t *stmt, uint32 id, sql_table_type_t type, galist_t *list); -status_t sql_cache_context(sql_stmt_t *stmt, context_bucket_t *bucket, sql_text_t *sql, uint32 hash_value); +status_t ctsql_cache_sql_context(sql_stmt_t *stmt, context_bucket_t *ctx_bucket, sql_text_t *ctsql, uint32 hash_val); status_t sql_create_dml_currently(sql_stmt_t *stmt, sql_text_t *sql_text, key_wid_t key_wid); void sql_prepare_context_ctrl(sql_stmt_t *stmt, uint32 hash_value, context_bucket_t *bucket); void sql_parse_set_context_procinfo(sql_stmt_t *stmt); uint32 sql_has_special_word(sql_stmt_t *stmt, text_t *sql_text); +status_t ctsql_get_context_from_cache(sql_stmt_t *stmt, text_t *ctsql, uint32 *ctsql_id, context_bucket_t **bucketid, + ctx_stat_t *stat); + #ifdef __cplusplus } #endif diff --git a/pkg/src/ctsql/parser_ddl/ddl_parser.c b/pkg/src/ctsql/parser_ddl/ddl_parser.c index d175efdff3b56608e57cd64f3486e90951bff2f7..5bc8ea73bc6eb1f58d1f03cf59571c650fd3aa77 100644 --- a/pkg/src/ctsql/parser_ddl/ddl_parser.c +++ b/pkg/src/ctsql/parser_ddl/ddl_parser.c @@ -809,15 +809,16 @@ status_t sql_parse_comment(sql_stmt_t *stmt) return status; } -status_t sql_parse_ddl(sql_stmt_t *stmt, key_wid_t wid) +status_t sql_parse_ddl(sql_stmt_t *stmt, word_t *leader_word) { status_t status; + key_wid_t key_wid = leader_word->id; text_t origin_sql = stmt->session->lex->text.value; stmt->session->sql_audit.audit_type = SQL_AUDIT_DDL; CT_RETURN_IFERR(sql_alloc_context(stmt)); CT_RETURN_IFERR(sql_create_list(stmt, &stmt->context->ref_objects)); - switch (wid) { + switch (key_wid) { case KEY_WORD_CREATE: status = sql_parse_create(stmt); break; diff --git a/pkg/src/ctsql/parser_ddl/ddl_parser.h b/pkg/src/ctsql/parser_ddl/ddl_parser.h index 9b08e0e5b6a7f95b58d304507e3f0438e5393343..88ed191ea473a60e7839ee877d6fda6f764dea9c 100644 --- a/pkg/src/ctsql/parser_ddl/ddl_parser.h +++ b/pkg/src/ctsql/parser_ddl/ddl_parser.h @@ -55,7 +55,7 @@ typedef struct st_seqence_info { bool32 nocyc_flag; } sql_seqence_info_t; -status_t sql_parse_ddl(sql_stmt_t *stmt, key_wid_t wid); +status_t sql_parse_ddl(sql_stmt_t *stmt, word_t *leader_word); status_t sql_parse_drop(sql_stmt_t *sql_stmt); status_t sql_parse_truncate(sql_stmt_t *stmt); status_t sql_parse_flashback(sql_stmt_t *stmt); diff --git a/pkg/src/ctsql/plan/ctsql_plan.c b/pkg/src/ctsql/plan/ctsql_plan.c index 9f50147af0aab388710c19d34f25d2afd9ebdcbd..d203e4efe95aab2392ab2943b7ddc3cfb5f3b14f 100644 --- a/pkg/src/ctsql/plan/ctsql_plan.c +++ b/pkg/src/ctsql/plan/ctsql_plan.c @@ -33,6 +33,8 @@ #include "expr_parser.h" #include "srv_instance.h" #include "dml_executor.h" +#include "plan_join.h" +#include "cbo_base.h" #ifdef __cplusplus extern "C" { @@ -365,7 +367,6 @@ status_t perfect_tree_and_gen_oper_map(plan_assist_t *pa, uint32 step, sql_join_ return CT_SUCCESS; } - status_t sql_make_index_col_map(plan_assist_t *pa, sql_stmt_t *stmt, sql_table_t *table) { if (pa != NULL && pa->vpeek_flag) { @@ -417,6 +418,221 @@ static inline bool32 select_node_has_hash_join(select_node_t *slct_node) return select_node_has_hash_join(slct_node->right); } +status_t handle_special_query_case(sql_query_t *query, sql_join_node_t **join_node) +{ + if (!IS_WITHAS_QUERY(query) || query->join_root == NULL) { + return CT_ERROR; + } + if (join_node != NULL) { + *join_node = query->join_root; + } + return CT_SUCCESS; +} + +status_t process_single_table_case(sql_stmt_t *stmt, plan_assist_t *plan_assist, sql_query_t *query) +{ + plan_assist->has_parent_join = (bool8)plan_assist->query->cond_has_acstor_col; + const uint32 check_flags = CBO_CHECK_FILTER_IDX | CBO_CHECK_JOIN_IDX; + CBO_SET_FLAGS(plan_assist, check_flags); + return sql_check_table_indexable(stmt, plan_assist, plan_assist->tables[0], query->cond); +} + +status_t build_query_join_tree(sql_stmt_t *stmt, sql_query_t *query, sql_join_node_t **result_root) +{ + status_t status = handle_special_query_case(query, result_root); + if (status == CT_SUCCESS) { + return CT_SUCCESS; + } + plan_assist_t plan_assist; + sql_join_node_t *join_root = NULL; + sql_init_plan_assist(stmt, &plan_assist, query, SQL_QUERY_NODE, NULL); + SQL_LOG_OPTINFO(stmt, ">>> Initializing join tree construction for query, table count=%u", query->tables.count); + status = (plan_assist.table_count == 1) ? process_single_table_case(stmt, &plan_assist, query) + : sql_build_join_tree(stmt, &plan_assist, &join_root); + if (status == CT_SUCCESS) { + if (result_root != NULL) { + *result_root = join_root; + } + SQL_LOG_OPTINFO(stmt, ">>> Complete build query join tree"); + } + return status; +} + +void cbo_get_parent_query_cost(sql_query_t *query, cbo_cost_t *cost) +{ + cost->card = CBO_DEFAULT_NDV; + cost->card = CBO_MIN_COST; + if (query->tables.count == 1) { + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, 0); + cost->cost = table->cost; + cost->card = TABLE_CBO_OUT_CARD(table); + } else if (query->join_root != NULL) { + *cost = query->join_root->cost; + } else { + *cost = query->join_assist.join_node->cost; + } +} + +static sql_query_t *find_top_query(sql_query_t *query) +{ + sql_query_t *current_query = query; + sql_query_t *top_query = NULL; + + while (current_query != NULL) { + top_query = current_query; + current_query = (current_query->owner != NULL) ? current_query->owner->parent : NULL; + } + + return top_query; +} + +static status_t construct_query_join_tree(sql_stmt_t *stmt, sql_query_t *query) +{ + sql_query_t *top_query = find_top_query(query); + + CT_RETURN_IFERR(build_query_join_tree(stmt, top_query, &top_query->join_root)); + + if (query->owner != NULL) { + sql_query_t *parent_query = query->owner->parent; + cbo_get_parent_query_cost(parent_query, &query->cost); + } + + CT_RETURN_IFERR(build_query_join_tree(stmt, query, &query->join_root)); + + return CT_SUCCESS; +} + +static status_t check_table_rewrite_condition(sql_table_t *table, bool32 *need_rewrite) +{ + if (table->type != NORMAL_TABLE) { + *need_rewrite = CT_TRUE; + return CT_SUCCESS; + } + if (table->plan_id == 0) { + if (!(table->col_use_flag & USE_ANCESTOR_COL)) { + *need_rewrite = CT_TRUE; + } + return CT_SUCCESS; + } + if (table->index == NULL || table->index_full_scan) { + *need_rewrite = CT_TRUE; + return CT_SUCCESS; + } + if (!(table->col_use_flag & (USE_ANCESTOR_COL | USE_SELF_JOIN_COL))) { + *need_rewrite = CT_TRUE; + return CT_SUCCESS; + } + return CT_SUCCESS; +} + +static status_t clear_top_query_join_tree(sql_stmt_t *stmt, sql_query_t *query) +{ + sql_query_t *current_query = query; + sql_query_t *top_query = NULL; + while (current_query) { + top_query = current_query; + current_query = (current_query->owner != NULL) ? current_query->owner->parent : NULL; + } + clear_query_cbo_status(top_query); + clear_query_cbo_status(query); + return CT_SUCCESS; +} + +void clear_query_cbo_status(sql_query_t *query) +{ + bool8 has_withas = CT_FALSE; + CT_RETVOID_IFERR(IS_WITHAS_QUERY(query)); + + uint32 table_idx = 0; + while (table_idx < query->tables.count) { + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, table_idx); + table_idx++; + if (query->join_card != CT_INVALID_INT64) { + TABLE_CBO_OUT_CARD(table) = query->join_card; + } + if (table->type == WITH_AS_TABLE) { + has_withas = CT_TRUE; + continue; + } + // 清理表的 CBO 状态 + TABLE_CBO_IDX_REF_COLS(table) = NULL; + TABLE_CBO_DRV_INFOS(table) = NULL; + TABLE_CBO_IS_DEAL(table) = CT_FALSE; + TABLE_CBO_FILTER(table) = NULL; + if (table->type == SUBSELECT_AS_TABLE || table->type == VIEW_AS_TABLE) { + clear_select_node_cbo_status(table->select_ctx->root); + } + } + query->join_root = NULL; + if (!has_withas) { + vmc_free(query->vmc); + } +} + +void clear_select_node_cbo_status(select_node_t *node) +{ + if (node == NULL) { + return; + } + switch (node->type) { + case SELECT_NODE_QUERY: + clear_query_cbo_status(node->query); + break; + default: + clear_select_node_cbo_status(node->left); + clear_select_node_cbo_status(node->right); + break; + } +} + +// TODO: 查询方式成本比较 +status_t cbo_if_hash_semi_effective(bool32 *result) +{ + *result = CT_TRUE; + return CT_SUCCESS; +} + +// TODO: 动态采样统计 +status_t sql_dynamic_sampling_table_stats() +{ + return CT_SUCCESS; +} + +// TODO 根据查询成本(cost)创建轮廓 +status_t sql_create_unnest_outline_by_cost() +{ + return CT_SUCCESS; +} + +static status_t cbo_can_rewrite_by_check_index(sql_stmt_t *stmt, sql_query_t *query, ck_type_t check_type, + bool32 *need_rewrite) +{ + plan_assist_t plan_assist; + sql_init_plan_assist(stmt, &plan_assist, query, SQL_QUERY_NODE, NULL); + *need_rewrite = CT_FALSE; + if (!sql_dynamic_sampling_table_stats()) { + cm_reset_error(); + } + CT_RETURN_IFERR(construct_query_join_tree(stmt, query)); + + if (sql_query_has_hash_join(query)) { + *need_rewrite = CT_TRUE; + return clear_top_query_join_tree(stmt, query); + } + + for (uint32 i = 0; i < query->tables.count; i++) { + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, i); + CT_RETURN_IFERR(check_table_rewrite_condition(table, need_rewrite)); + if (*need_rewrite) { + break; + } + } + if (!*need_rewrite) { + CT_RETURN_IFERR(cbo_if_hash_semi_effective(need_rewrite)); + } + return clear_top_query_join_tree(stmt, query); +} + bool32 sql_query_has_hash_join(sql_query_t *query) { if (query->join_assist.has_hash_oper) { @@ -449,6 +665,111 @@ sql_table_t *sql_get_driver_table(plan_assist_t *plan_ass) return plan_ass->plan_tables[0]; } +status_t rbo_check_index_4_rewrite(sql_stmt_t *stmt, sql_query_t *query, bool32 *result) +{ + sql_table_t *table = NULL; + plan_assist_t plan_assist; + *result = CT_FALSE; + if (query->tables.count > 1) { + *result = CT_TRUE; + return CT_SUCCESS; + } + sql_init_plan_assist(stmt, &plan_assist, query, SQL_QUERY_NODE, NULL); + CT_RETURN_IFERR(sql_check_table_indexable(stmt, &plan_assist, table, query->cond)); + if (table->index == NULL || table->index_full_scan) { + *result = CT_TRUE; + } + return CT_SUCCESS; +} + +static bool32 check_index_match_for_condition(cond_node_t *cond, uint16 idx_equal_to, knl_index_desc_t *index) +{ + uint32 col_id; + cols_used_t cols_used; + expr_tree_t *ref_expr = NULL; + if (cond->type != COND_NODE_COMPARE || cond->cmp->type != CMP_TYPE_EQUAL) { + return CT_TRUE; + } + + col_id = CT_INVALID_ID32; + if (IS_LOCAL_COLUMN(cond->cmp->left)) { + col_id = EXPR_COL(cond->cmp->left); + ref_expr = cond->cmp->right; + } else if (IS_LOCAL_COLUMN(cond->cmp->right)) { + col_id = EXPR_COL(cond->cmp->right); + ref_expr = cond->cmp->left; + } else { + return CT_FALSE; + } + init_cols_used(&cols_used); + sql_collect_cols_in_expr_node(ref_expr->root, &cols_used); + if (HAS_DYNAMIC_SUBSLCT(&cols_used) || HAS_SELF_COLS(cols_used.flags)) { + return CT_FALSE; + } + + for (uint32 i = 0; i < idx_equal_to; i++) { + if (col_id == index->columns[i]) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +static inline bool32 check_basic_conditions(sql_query_t *query) +{ + return query->is_exists_query && query->cond != NULL && query->cond_has_acstor_col && query->tables.count == 1 && + query->aggrs->count == 0 && query->group_sets->count == 0 && !query->has_distinct && + query->winsort_list->count == 0 && query->connect_by_cond == NULL; +} + +bool32 check_index_match_for_all_conditions(sql_stmt_t *stmt, sql_query_t *query, knl_index_desc_t *index, + uint16 idx_equal_to, uint16 idx_col_cnt) +{ + if (!check_basic_conditions(query) || idx_equal_to != idx_col_cnt) { + return CT_FALSE; + } + reorganize_cond_tree(query->cond->root); + cond_node_t *cond = FIRST_NOT_AND_NODE(query->cond->root); + while (cond != NULL) { + if (!check_index_match_for_condition(cond, idx_equal_to, index)) { + return CT_FALSE; + } + cond = cond->next; + } + return CT_TRUE; +} + +static status_t rbo_can_rewrite_by_check_index(sql_stmt_t *stmt, sql_query_t *query, bool32 *need_rewrite) +{ + CT_RETURN_IFERR(rbo_check_index_4_rewrite(stmt, query, need_rewrite)); + CT_RETSUC_IFTRUE(*need_rewrite); + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, 0); + if (!check_index_match_for_all_conditions(stmt, query, table->index, table->idx_equal_to, + (uint16)table->index_match_count)) { + if (table->cost > RBO_COST_INDEX_LIST_SCAN) { + *need_rewrite = CT_TRUE; + return CT_SUCCESS; + } + } + if (!(table->col_use_flag & USE_ANCESTOR_COL)) { + *need_rewrite = CT_TRUE; + } + return CT_SUCCESS; +} + +status_t can_rewrite_by_check_index(sql_stmt_t *stmt, sql_query_t *query, ck_type_t check_type, bool32 *result) +{ + if (CBO_ON) { + status_t status = CT_ERROR; + SYNC_POINT_GLOBAL_START(CTC_CBO_CANNOT_REWRITE_BY_INDEX, result, CT_FALSE); + status = cbo_can_rewrite_by_check_index(stmt, query, check_type, result); + SYNC_POINT_GLOBAL_END; + CT_RETURN_IFERR(status); + return sql_create_unnest_outline_by_cost(); + } + return rbo_can_rewrite_by_check_index(stmt, query, result); +} + #ifdef __cplusplus } #endif diff --git a/pkg/src/ctsql/plan/ctsql_plan.h b/pkg/src/ctsql/plan/ctsql_plan.h index a0e1710cbd8b61745c0d190c89f998a2116b6977..a4b9f313632b5e9564457d6486a8c57c733db1c8 100644 --- a/pkg/src/ctsql/plan/ctsql_plan.h +++ b/pkg/src/ctsql/plan/ctsql_plan.h @@ -169,7 +169,7 @@ status_t rbo_choose_full_scan_index(sql_stmt_t *stmt, plan_assist_t *pa, sql_tab void sql_prepare_query_plan(sql_stmt_t *stmt, plan_assist_t *plan_ass, sql_query_t *query, sql_node_type_t type, plan_assist_t *parent); -void reset_select_node_cbo_status(select_node_t *node); +void clear_select_node_cbo_status(select_node_t *node); void sql_collect_select_nodes(biqueue_t *queue, select_node_t *node); typedef status_t (*query_visit_func_t)(sql_stmt_t *stmt, sql_query_t *query); status_t visit_select_node(sql_stmt_t *stmt, select_node_t *node, query_visit_func_t visit_func); @@ -191,11 +191,10 @@ status_t sql_clone_join_root(sql_stmt_t *stmt, void *ctx, sql_join_node_t *src_j void swap_join_tree_child_node(plan_assist_t *plan_ass, sql_join_node_t *join_root); bool32 if_is_drive_table(sql_join_node_t *join_node, uint16 table); void clear_query_cbo_status(sql_query_t *query); -status_t build_query_join_tree(sql_stmt_t *stmt, sql_query_t *query, plan_assist_t *parent, sql_join_node_t **ret_root, - uint32 driver_table_count); +status_t build_query_join_tree(sql_stmt_t *stmt, sql_query_t *query, sql_join_node_t **ret_root); uint32 sql_calc_rownum(sql_stmt_t *stmt, sql_query_t *query); status_t perfect_tree_and_gen_oper_map(plan_assist_t *pa, uint32 step, sql_join_node_t *join_node); -status_t sql_dynamic_sampling_table_stats(sql_stmt_t *stmt, plan_assist_t *pa); +status_t sql_dynamic_sampling_table_stats(); bool32 sql_query_has_hash_join(sql_query_t *query); status_t clone_tables_4_subqry(sql_stmt_t *stmt, sql_query_t *query, sql_query_t *sub_query); void sql_init_plan_assist_impl(sql_stmt_t *stmt, plan_assist_t *plan_ass, sql_query_t *query, sql_node_type_t type, diff --git a/pkg/src/ctsql/plan/ctsql_plan_defs.h b/pkg/src/ctsql/plan/ctsql_plan_defs.h index 046f5d22a1c8bbbff2540ff4389dec2c468fe73a..9f3c84182dbca56ac212d14136ad7f241b62f741 100644 --- a/pkg/src/ctsql/plan/ctsql_plan_defs.h +++ b/pkg/src/ctsql/plan/ctsql_plan_defs.h @@ -497,6 +497,9 @@ typedef struct st_plan_assist { uint32 scan_part_cnt; // only use part table plan_node_t **filter_node_pptr; pointer_t join_card_map; + + bilist_t *join_tbl_level; + uint32 join_cur_level; } plan_assist_t; typedef enum en_column_match_mode { diff --git a/pkg/src/ctsql/plan/plan_join.c b/pkg/src/ctsql/plan/plan_join.c index 1e23fc61d14f510fd8cdb9ab19a29f7b8b26b8cd..a1a0527d1a1f4f0c5066f1c53ec6d412fb0d1716 100644 --- a/pkg/src/ctsql/plan/plan_join.c +++ b/pkg/src/ctsql/plan/plan_join.c @@ -788,6 +788,202 @@ static status_t sql_create_join_tree(sql_stmt_t *stmt, plan_assist_t *pa, join_a return CT_SUCCESS; } +static bool find_join_rel(sql_stmt_t *stmt, join_tbl_bitmap_t *join_tables_ids, sql_join_table_t *join_tbl) +{ + //todo: find join rel in hash table --rensiyuan + return CT_FALSE; +} + +static status_t build_join_tbl(sql_stmt_t *stmt, plan_assist_t *pa, join_tbl_bitmap_t *join_tables_ids, sql_join_table_t *table1, sql_join_table_t *table2, special_join_info_t *sjoininfo, sql_join_table_t *join_table) +{ + if (find_join_rel(stmt, join_tables_ids, join_table) == CT_TRUE) { + return CT_SUCCESS; + } + join_table->table_type = JOIN_TABLE; + sql_bitmap_copy(join_tables_ids, &join_table->table_ids); + join_table->rows = 0; + cm_bilist_init(&join_table->join_path); + join_table->cheapest_startup_path = NULL; + join_table->cheapest_total_path = NULL; + cm_bilist_init(&join_table->join_info); + + //todo: build join target list --huzhengchao + //build_joinrel_target_list + + //build_joinrel_joinlist(RelOptInfo * joinrel, RelOptInfo * outer_rel, RelOptInfo * inner_rel); //get join_tbl->join_info + + //todo: set join size estimate --huzhengchao + + //todo: add to hash table --rensiyuan + + if (pa->join_tbl_level) { + cm_bilist_add_tail(&join_table->bilist_node, &pa->join_tbl_level[pa->join_cur_level]); + } + return CT_SUCCESS; + +} + + +// todo: add_paths_to_joinrel(stmt, pa, table1, table2, sjoininfo, join_tbl); --zhengchao, 关注join_node +// match_unsorted_outer +// hash_inner_and_outer +// sort_inner_and_outer( +// root, joinrel, outerrel, innerrel, restrictlist, mergeclause_list, jointype, sjinfo, param_source_rels); + + +static status_t sql_make_join_relation(sql_stmt_t *stmt, plan_assist_t *pa, join_assist_t *join_ass, + sql_join_node_t **join_root, sql_join_table_t *table1, sql_join_table_t *table2) +{ + special_join_info_t* sjoininfo = NULL; + + join_tbl_bitmap_t join_tables_ids; + sql_bitmap_init(&join_tables_ids); + sql_bitmap_union(&table1->table_ids, &table2->table_ids, &join_tables_ids); + + //todo: check validity of table1、table2, and determine join type、set sjoininfo. --wangfeihuo + //if (sql_join_is_legal() == CT_FALSE) { + // return CT_FALSE; + //} + + //generate join info, maybe it is useless. + /* if (sjoininfo == NULL) { + sjoininfo->min_lefthand = table1->table_ids; + sjoininfo->min_righthand = table2->table_ids; + sjoininfo->syn_lefthand = table1->table_ids; + sjoininfo->syn_righthand = table2->table_ids; + sjoininfo->jointype = JOIN_TYPE_INNER; //set join type by pa->join_assist->join_node->type + } */ + sql_join_table_t *join_table = NULL; + CT_RETURN_IFERR(sql_stack_alloc(stmt, sizeof(sql_join_table_t), (void **)&join_table)); + CT_RETURN_IFERR(build_join_tbl(stmt, pa, &join_tables_ids, table1, table2, sjoininfo, join_table)); + + //todo: generate join path by JOIN TYPE, and add join path to join_tbl --zhengchao + /* + switch (sjoininfo->jointype) { + case JOIN_TYPE_INNER: + // todo: add_paths_to_joinrel(stmt, pa, table1, table2, sjoininfo, join_tbl); + // todo: add_paths_to_joinrel(stmt, pa, table2, table1, sjoininfo, join_tbl); + break; + case JOIN_TYPE_COMMA: + break; + case JOIN_TYPE_CROSS: + break; + + case JOIN_TYPE_LEFT: + break; + case JOIN_TYPE_RIGHT: + break; + case JOIN_TYPE_FULL: + break; + } + */ + return CT_SUCCESS; +} + +static bool sql_have_relevant_join_relation(sql_join_table_t *table1, sql_join_table_t *table2) +{ + bilist_t *join_list = NULL; + join_tbl_bitmap_t *other_tables; + + if (table1->join_info.count <= table2->join_info.count) { + join_list = &table1->join_info; + other_tables = &table2->table_ids; + } else { + join_list = &table2->join_info; + other_tables = &table1->table_ids; + } + bilist_node_t *node = cm_bilist_head(join_list); + for (; node != NULL; node = BINODE_NEXT(node)) { + tbl_join_info_t *tmp_join_info = BILIST_NODE_OF(tbl_join_info_t, node, bilist_node); + if (sql_bitmap_overlap(&tmp_join_info->table_ids, other_tables) == CT_TRUE) { + return CT_TRUE; // there is join relation between table1 and table2 + } + } + return CT_FALSE; +} + +static status_t sql_make_rels_by_clause_joins(sql_stmt_t *stmt, plan_assist_t *pa, join_assist_t *join_ass, + sql_join_node_t **join_root, sql_join_table_t *old_table, bilist_node_t *other_tables) +{ + bilist_node_t *other_node = other_tables; + for (; other_node != NULL; other_node = BINODE_NEXT(other_node)) { + sql_join_table_t *other_table = BILIST_NODE_OF(sql_join_table_t, other_node, bilist_node); + + // when a table_ids overlap another table_ids, then skip it + if (sql_bitmap_overlap(&old_table->table_ids, &other_table->table_ids) == CT_TRUE) { + continue; + } + + // when the join_table has no join relation with another one, then skip it + if (sql_have_relevant_join_relation(old_table, other_table) == CT_FALSE) { + continue; + } + + CT_RETURN_IFERR(sql_make_join_relation(stmt, pa, join_ass, join_root, old_table, other_table)); + } + return CT_SUCCESS; +} + +static status_t sql_search_one_level(sql_stmt_t *stmt, plan_assist_t *pa, join_assist_t *join_ass, + sql_join_node_t **join_root) +{ + uint32 level = pa->join_cur_level; + bilist_node_t *pre_level_node = cm_bilist_head(&pa->join_tbl_level[level-1]); + + for (; pre_level_node != NULL; pre_level_node = BINODE_NEXT(pre_level_node)) { + sql_join_table_t *old_table = BILIST_NODE_OF(sql_join_table_t, pre_level_node, bilist_node); + + bilist_node_t * other_tables = NULL; + if (level == 1) { /* when level == 1, consider remaining initial rels */ + other_tables = pre_level_node->next; + } else { /* when level >= 2, consider all initial rels */ + other_tables = cm_bilist_head(&pa->join_tbl_level[0]); + } + CT_RETURN_IFERR(sql_make_rels_by_clause_joins(stmt, pa, join_ass, join_root, old_table, other_tables)); + } + + return CT_SUCCESS; +} + +static status_t make_base_join_tbl(sql_stmt_t *stmt, sql_table_t *table, sql_join_table_t *join_table) +{ + sql_bitmap_init(&join_table->table_ids); + sql_bitmap_make_singleton(table->id, &join_table->table_ids); + join_table->table_type = BASE_TABLE; + join_table->join_info = table->join_info; + // TODO: check indexable, get index、sequence scan cost,get path list --chenxiaobin + // TODO: sql_get_cheapest_path(join_table_node->join_table); + return CT_SUCCESS; +} + +static status_t sql_create_join_tree_new(sql_stmt_t *stmt, plan_assist_t *pa, join_assist_t *join_ass, + sql_join_node_t **join_root) +{ + CT_RETURN_IFERR(sql_stack_alloc(stmt, (pa->table_count) * sizeof(bilist_t), (void **)&pa->join_tbl_level)); + for (uint32 i = 0; i < pa->table_count; i++) { + cm_bilist_init(&pa->join_tbl_level[i]); + } + + for (uint32 i = 0; i < pa->table_count; i++) { + sql_join_table_t *join_table = NULL; //sql_join_table_t is brief of sql_table_t + CT_RETURN_IFERR(sql_stack_alloc(stmt, sizeof(sql_join_table_t), (void **)&join_table)); + CT_RETURN_IFERR(make_base_join_tbl(stmt, pa->tables[i], join_table)); + cm_bilist_add_tail(&join_table->bilist_node, &pa->join_tbl_level[0]); + } + + for (uint32 level = 1; level < pa->table_count; level++) { + pa->join_cur_level = level; + CT_RETURN_IFERR(sql_search_one_level(stmt, pa, join_ass, join_root)); + + //bilist_node_t *table_node = cm_bilist_head(&pa->join_tbl_level[level]); --huzhengchao + //for (; table_node != NULL; table_node = BINODE_NEXT(table_node)) { + //TODO: sql_get_cheapest_path(join_table_node->join_table); + //sql_join_table_node_t *join_table_node = BILIST_NODE_OF(sql_join_table_node_t, table_node, bilist_node); + //} + } + return CT_SUCCESS; +} + static inline cond_tree_t *sql_get_right_table_cond(sql_join_node_t *join_node) { if (join_node->type <= JOIN_TYPE_INNER) { @@ -1289,9 +1485,8 @@ status_t sql_build_join_tree(sql_stmt_t *stmt, plan_assist_t *plan_ass, sql_join CT_RETURN_IFERR( sql_get_table_join_cond(plan_ass->stmt, &plan_ass->query->tables, &plan_ass->query->tables, plan_ass->cond, &plan_ass->join_conds)); - { - CT_RETURN_IFERR(sql_create_join_tree(stmt, plan_ass, &join_ass, join_root)); - } + CT_RETURN_IFERR(sql_create_join_tree(stmt, plan_ass, &join_ass, join_root)); + CT_RETURN_IFERR(sql_create_join_tree_new(stmt, plan_ass, &join_ass, join_root)); } SQL_RESTORE_NODE_STACK(stmt); CTSQL_RESTORE_STACK(stmt); diff --git a/pkg/src/ctsql/plan/plan_join.h b/pkg/src/ctsql/plan/plan_join.h index 75049838846e68b085d7c9c264e7237ee84302cb..81326a93093e39874154dd4195dfa2881ce2f873 100644 --- a/pkg/src/ctsql/plan/plan_join.h +++ b/pkg/src/ctsql/plan/plan_join.h @@ -26,6 +26,7 @@ #define __PLAN_JOIN_H__ #include "ctsql_plan.h" +#include "ctsql_context.h" typedef struct st_join_assist { uint32 count; @@ -35,6 +36,55 @@ typedef struct st_join_assist { sql_join_node_t *selected_nodes[CT_MAX_JOIN_TABLES]; } join_assist_t; +typedef struct st_join_path{ + sql_join_node_t *join_node; + double cost; + double rows; + uint32 relids; + sql_join_type_t join_type; + join_oper_t join_method; + cond_tree_t *join_cond; + cond_tree_t *filter; + struct st_join_path *outer; + struct st_join_path *inner; + bool32 is_required; +} join_path_t; + +typedef struct st_tbl_join_info { + bilist_node_t bilist_node; + sql_join_type_t join_type; + join_tbl_bitmap_t table_ids; // bitmap of table ids +} tbl_join_info_t; + +typedef struct st_special_join_info { + join_tbl_bitmap_t min_lefthand; + join_tbl_bitmap_t min_righthand; + join_tbl_bitmap_t syn_lefthand; + join_tbl_bitmap_t syn_righthand; + sql_join_type_t jointype; /* always INNER, LEFT, FULL, SEMI, or ANTI */ + bool lhs_strict; + bool delay_upper_joins; + bool varratio_cached; + bool is_straight_join; +} special_join_info_t; + +typedef enum join_table_type_t { + BASE_TABLE, + JOIN_TABLE +} join_table_type_t; + +typedef struct st_sql_join_table { + bilist_node_t bilist_node; + join_table_type_t table_type; + double rows; + int encode_width; + bilist_t join_path; + join_path_t *cheapest_startup_path; + join_path_t *cheapest_total_path; + join_tbl_bitmap_t table_ids; // bitmap of table_ids + bilist_t join_info; +}sql_join_table_t; + bool32 need_adjust_hash_order(sql_join_node_t *join_root); status_t sql_build_join_tree(sql_stmt_t *stmt, plan_assist_t *plan_ass, sql_join_node_t **join_root); status_t sql_create_join_plan(sql_stmt_t *stmt, plan_assist_t *pa, sql_join_node_t *join_node, cond_tree_t *cond, diff --git a/pkg/src/ctsql/plan/plan_join_bitmap.c b/pkg/src/ctsql/plan/plan_join_bitmap.c new file mode 100644 index 0000000000000000000000000000000000000000..6aaa22feb418f1c6258aa73328f228cb2ddc8f07 --- /dev/null +++ b/pkg/src/ctsql/plan/plan_join_bitmap.c @@ -0,0 +1,121 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * plan_join.c + * + * + * IDENTIFICATION + * src/ctsql/plan/plan_join_bitmap.c + * + * ------------------------------------------------------------------------- + */ + +#include "plan_join_bitmap.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void sql_bitmap_init(join_tbl_bitmap_t *result) +{ + result->num = 0; + for (uint32 i = 0; i < MAX_JOIN_TABLE_GROUP; i++) { //init + result->ids[i] = 0; + } +} + +void sql_bitmap_make_singleton(uint32 table_id, join_tbl_bitmap_t* tables_bms) +{ + uint32 wordnum, bitnum; + + wordnum = table_id / 64; + bitnum = table_id % 64; + tables_bms->num = wordnum + 1; + tables_bms->ids[wordnum] = (1 << bitnum); + return; +} + +void sql_bitmap_copy(join_tbl_bitmap_t *a, join_tbl_bitmap_t *result) +{ + if (a->num == 0) { + sql_bitmap_init(result); + return; + } + result->num = a->num; + for (uint32 i = 0; i < a->num; i++) { + result->ids[i] = a->ids[i]; + } +} + +void sql_bitmap_union_singleton(uint32 a, uint32 b, join_tbl_bitmap_t* result) +{ + join_tbl_bitmap_t bitmap_a; + join_tbl_bitmap_t bitmap_b; + sql_bitmap_init(&bitmap_a); + sql_bitmap_init(&bitmap_b); + + sql_bitmap_make_singleton(a, &bitmap_a); + sql_bitmap_make_singleton(b, &bitmap_b); + + sql_bitmap_union(&bitmap_a, &bitmap_b, result); +} + +void sql_bitmap_union(join_tbl_bitmap_t *a, join_tbl_bitmap_t *b, join_tbl_bitmap_t* result) +{ + join_tbl_bitmap_t *other = NULL; + + if (b->num == 0) { + sql_bitmap_copy(a, result); + return; + } + if (a->num == 0) { + sql_bitmap_copy(b, result); + return; + } + /* Identify shorter and longer input; copy the longer one */ + if (a->num <= b->num) { + sql_bitmap_copy(b, result); + other = a; + } else { + sql_bitmap_copy(a, result); + other = b; + } + /* And union the shorter input into the result */ + for (uint32 i = 0; i < other->num; i++) { + result->ids[i] |= other->ids[i]; + + } + return; +} + +bool8 sql_bitmap_overlap(join_tbl_bitmap_t* a, join_tbl_bitmap_t* b) +{ + if (a->num == 0 || b->num == 0) { + return CT_FALSE; + } + /* Check ids in common */ + uint32 shortlen = MIN(a->num, b->num); + for (uint32 i = 0; i < shortlen; i++) { + if ((a->ids[i] & b->ids[i]) != 0) { + return CT_TRUE; + } + } + return CT_FALSE; +} + +#ifdef __cplusplus +} +#endif diff --git a/pkg/src/ctsql/plan/plan_join_bitmap.h b/pkg/src/ctsql/plan/plan_join_bitmap.h new file mode 100644 index 0000000000000000000000000000000000000000..778aad5e6770eedd1d83f7f145a35cf445d170b9 --- /dev/null +++ b/pkg/src/ctsql/plan/plan_join_bitmap.h @@ -0,0 +1,52 @@ +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. + * + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * plan_join.c + * + * + * IDENTIFICATION + * src/ctsql/plan/plan_join_bitmap.h + * + * ------------------------------------------------------------------------- + */ + +#include "cm_defs.h" + +#ifdef __cplusplus +extern "C" { +#endif +#define MAX_JOIN_TABLE_GROUP (uint32)CT_MAX_JOIN_TABLES/64 + +typedef struct st_join_tbl_bitmap { + uint32 num; //table_id / 64 + uint32 ids[MAX_JOIN_TABLE_GROUP]; // ids[i] = (1 << table_id % 64) +} join_tbl_bitmap_t; + +void sql_bitmap_init(join_tbl_bitmap_t *result); + +void sql_bitmap_make_singleton(uint32 table_id, join_tbl_bitmap_t* tables_bms); + +void sql_bitmap_copy(join_tbl_bitmap_t *a, join_tbl_bitmap_t *result); + +void sql_bitmap_union_singleton(uint32 a, uint32 b, join_tbl_bitmap_t* result); + +void sql_bitmap_union(join_tbl_bitmap_t *a, join_tbl_bitmap_t *b, join_tbl_bitmap_t* result); + +bool8 sql_bitmap_overlap(join_tbl_bitmap_t* a, join_tbl_bitmap_t* b); + +#ifdef __cplusplus +} +#endif diff --git a/pkg/src/ctsql/plan/plan_query.c b/pkg/src/ctsql/plan/plan_query.c index 741ba2b5c15abb6466a7c5c2aff92d2b37619dc0..55346e4f95ed0521a25e09155e425d3c49bba51d 100644 --- a/pkg/src/ctsql/plan/plan_query.c +++ b/pkg/src/ctsql/plan/plan_query.c @@ -199,7 +199,7 @@ plan_node_type_t sql_get_group_plan_type(sql_stmt_t *stmt, sql_query_t *query) } } } - return PLAN_NODE_HASH_GROUP; + return PLAN_NODE_HASH_GROUP; //TODO:ǰʱŻSORT_GROUPҪݴ۹ʱHASH_GROUPSORT_GROUP } static status_t sql_generate_group_sets(sql_stmt_t *stmt, sql_query_t *query, group_plan_t *group_p) diff --git a/pkg/src/ctsql/plan/plan_scan.c b/pkg/src/ctsql/plan/plan_scan.c index 5c896c01c43604b87531e433a53f291538780873..0a1842c9c75bf4ea775ca0048b97ff5ca9eec953 100644 --- a/pkg/src/ctsql/plan/plan_scan.c +++ b/pkg/src/ctsql/plan/plan_scan.c @@ -583,7 +583,7 @@ static inline status_t remove_join_cond_4_slct_node(sql_stmt_t *stmt, select_nod return CT_SUCCESS; } -void reset_select_node_cbo_status(select_node_t *node); +void clear_select_node_cbo_status(select_node_t *node); void cbo_unset_select_node_table_flag(select_node_t *select_node, uint32 cbo_flag, bool32 recurs); static inline status_t replace_table_in_array(sql_array_t *tables, sql_table_t *old_table, sql_table_t *new_table) diff --git a/pkg/src/ctsql/verifier/ctsql_hint_verifier.c b/pkg/src/ctsql/verifier/ctsql_hint_verifier.c index fd6d36a846e4c49cacf5ddd38ae58f8711e12186..259260397dfdefb4794bd8483aaa737577e11f79 100644 --- a/pkg/src/ctsql/verifier/ctsql_hint_verifier.c +++ b/pkg/src/ctsql/verifier/ctsql_hint_verifier.c @@ -42,6 +42,30 @@ uint32 get_dynamic_sampling_level(sql_stmt_t *stmt) return stmt->context->hint_info->opt_params->dynamic_sampling; } +static bool32 is_opt_param_enabled(const sql_context_t *context, uint32 param_id) +{ + if (context->hint_info == NULL || context->hint_info->opt_params == NULL) { + return CT_FALSE; + } + uint64 param_mask = CT_GET_MASK(param_id); + return CT_BIT_TEST(context->hint_info->opt_params->status, param_mask) > 0; +} + +static bool32 get_opt_param_value(const sql_context_t *context, uint32 param_id) +{ + uint64 param_mask = CT_GET_MASK(param_id); + return CT_BIT_TEST(context->hint_info->opt_params->value, param_mask) > 0; +} + +bool32 hint_apply_opt_param(sql_context_t *context, bool32 default_value, uint32 param_id) +{ + if (!is_opt_param_enabled(context, param_id)) { + return default_value; + } + return get_opt_param_value(context, param_id); +} + + #ifdef __cplusplus } #endif diff --git a/pkg/src/ctsql/verifier/ctsql_hint_verifier.h b/pkg/src/ctsql/verifier/ctsql_hint_verifier.h index 24141f45b329e97cb41b1fefd984831639e447ac..fb8c3307ede88e76833990e2fce89d58823f8b42 100644 --- a/pkg/src/ctsql/verifier/ctsql_hint_verifier.h +++ b/pkg/src/ctsql/verifier/ctsql_hint_verifier.h @@ -96,7 +96,7 @@ void sql_verify_merge_hint(sql_verifier_t *verif, sql_merge_t *merge_ctx); void sql_verify_hint(sql_hint_verifier_t *hint_verifier, hint_info_t **query_hint_info); bool32 check_hint_index_ffs_valid(sql_table_t *table); -bool32 hint_apply_opt_param(sql_stmt_t *stmt, bool32 sys_value, uint64 id); +bool32 hint_apply_opt_param(sql_context_t *context, bool32 default_value, uint32 param_id); uint32 get_dynamic_sampling_level(sql_stmt_t *stmt); static inline void sql_init_hint_verf(sql_hint_verifier_t *verif, sql_stmt_t *stmt, sql_array_t *tables, diff --git a/pkg/src/ctsql/verifier/ctsql_table_verifier.c b/pkg/src/ctsql/verifier/ctsql_table_verifier.c index f31872e8a50c3c31c4ff2cf40c4b12725d681c2c..460ec8593c119f650227e2fd16c51b9876cc4eb5 100644 --- a/pkg/src/ctsql/verifier/ctsql_table_verifier.c +++ b/pkg/src/ctsql/verifier/ctsql_table_verifier.c @@ -313,6 +313,25 @@ status_t sql_table_cache_query_field(sql_stmt_t *stmt, sql_table_t *table, query return sql_table_cache_query_field_impl(stmt, table, src_query_fld, CT_FALSE); } +void sql_table_uncache_query_field(sql_query_t *query, var_column_t *v_col) +{ + sql_table_t *table = (sql_table_t *)sql_array_get(&query->tables, v_col->tab); + bilist_node_t *node = cm_bilist_head(&table->query_fields); + + for (; node!= NULL; node = BINODE_NEXT(node)) { + query_field_t *query_field = BILIST_NODE_OF(query_field_t, node, bilist_node); + if (query_field->col_id == v_col->col) { + query_field->ref_count--; + if (query_field->ref_count == 0) { + cm_bilist_del(node, &table->query_fields); + CT_LOG_DEBUG_INF("[PROJ_ELIMINATE] The column is eliminated from the query fields, " + "column_id: %d", v_col->col); + } + break; + } + } +} + status_t sql_table_cache_cond_query_field(sql_stmt_t *stmt, sql_table_t *table, query_field_t *src_query_fld) { return sql_table_cache_query_field_impl(stmt, table, src_query_fld, CT_TRUE); diff --git a/pkg/src/ctsql/verifier/ctsql_verifier.h b/pkg/src/ctsql/verifier/ctsql_verifier.h index 309793eb971223b7d398adaccff33a20d4a6696b..8791ab17b95f779f14011d0e5202b0fe18a56ffb 100644 --- a/pkg/src/ctsql/verifier/ctsql_verifier.h +++ b/pkg/src/ctsql/verifier/ctsql_verifier.h @@ -294,6 +294,7 @@ status_t sql_verify_listagg_order(sql_verifier_t *verif, galist_t *sort_items); void sql_init_aggr_node(expr_node_t *node, uint32 fun_id, uint32 ofun_id); status_t sql_adjust_oper_node(sql_verifier_t *verif, expr_node_t *node); status_t sql_table_cache_query_field(sql_stmt_t *stmt, sql_table_t *table, query_field_t *src_query_fld); +void sql_table_uncache_query_field(sql_query_t *subqry, var_column_t *v_col); status_t sql_table_cache_cond_query_field(sql_stmt_t *stmt, sql_table_t *table, query_field_t *src_query_fld); status_t sql_add_parent_refs(sql_stmt_t *stmt, galist_t *parent_refs, uint32 tab, expr_node_t *node); void sql_del_parent_refs(galist_t *parent_refs, uint32 tab, expr_node_t *node); diff --git a/pkg/src/dbstool/dbs_adp.c b/pkg/src/dbstool/dbs_adp.c index 0e9e196a957346b93e1daed81a833f3b6fdf069e..c2ec55925368628dfcadf7c857965083ad830bfd 100644 --- a/pkg/src/dbstool/dbs_adp.c +++ b/pkg/src/dbstool/dbs_adp.c @@ -37,7 +37,7 @@ #include "cm_date.h" #include "cm_error.h" #include "cm_file.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #include "cm_dbs_defs.h" #include "cm_log.h" #include "cm_dbs_intf.h" @@ -1654,6 +1654,9 @@ int32 ulog_export_handle(char *cluster_name, uint32 total_log_export_len, uint64 char *target_dir) { int32 ret = CT_SUCCESS; + // 先设置dbstor工具ID,跳过后续dbstor校验 + NsTermHandle nsTerm = {.nsIdx = NS_TERM_CLIENT_TEST_NS_IDX, .termIdx = 0}; + dbs_global_handle()->dbs_set_ns_term_handle(&nsTerm); // 根据输入填充lsn区间 ReadBatchLogOption option = { 0 }; ulog_export_option_init(&option, cluster_name, total_log_export_len, start_lsn); @@ -1892,6 +1895,9 @@ int32 dbs_page_export(int32 argc, char *argv[]) } printf("Fs name %s, cluster name %s\n", fs_name, cluster_name); + // 先设置dbstor工具ID,跳过后续dbstor校验 + NsTermHandle nsTerm = {.nsIdx = NS_TERM_CLIENT_TEST_NS_IDX, .termIdx = 0}; + dbs_global_handle()->dbs_set_ns_term_handle(&nsTerm); NameSpaceAttr ns_attr; if (dbs_global_handle()->open_namespace((char *)cluster_name, &ns_attr) != CT_SUCCESS) { printf("Failed to open namespace %s \n", cluster_name); @@ -2233,13 +2239,13 @@ int32 dbs_query_fs_info(int32 argc, char *argv[]) void dbs_perf_display(dbs_stat_item_query* items, uint32 item_num) { printf("-------------------------------------------------------------\n"); - printf("%-32s %-24s %-24s %-24s %-24s %-24s %-24s %-24s\n", - "ItemName", "SuccCnt", "ErrCnt", "MaxDelay", "MinDelay", "AvgDelay", "Iops", "BandWidth"); + printf("%-32s %-24s %-24s %-24s %-24s %-24s %-24s %-24s %-24s\n", + "ItemName", "SuccCnt", "ErrCnt", "MaxDelay", "MinDelay", "AvgDelay", "Iops", "BandWidth", "IoSize"); for (uint32 i = 0; i < item_num; i++) { - printf("%-32s %-24u %-24u %-24u %-24u %-24u %-24u %-24u\n", + printf("%-32s %-24u %-24u %-24u %-24u %-24u %-24u %-24u %-24llu\n", items[i].name, items[i].item.success_cnt, items[i].item.fail_cnt, items[i].item.max_delay, - items[i].item.min_delay, items[i].avg_delay, items[i].iops, items[i].bandWidth); + items[i].item.min_delay, items[i].avg_delay, items[i].iops, items[i].bandWidth, items[i].item.io_size); } printf("-------------------------------------------------------------\n"); } @@ -2386,10 +2392,19 @@ void parse_uint_params_list(int32 argc, char *argv[], const char *param_key, uin for (uint32 i = 0; i < argc; i++) { if (strncmp(argv[i], param_key, strlen(param_key)) == 0) { char *equal_sign = strchr(argv[i], '='); - if (equal_sign != NULL) { - *param_value = strtoul(equal_sign + 1, NULL, 10); - return; + + if (equal_sign != NULL && strchr(equal_sign + 1, '-') == NULL) { + char *end_ptr = '\0'; + *param_value = strtoul(equal_sign + 1, &end_ptr, 10); + if (*end_ptr != '\0') { + *param_value = 0; + } + } else { + *param_value = 0; } + + printf("%s%u\n", param_key, *param_value); + return; } } printf("param %s is not found. \n", param_key); @@ -2406,6 +2421,14 @@ int32 dbs_perf_show(int32 argc, char *argv[]) parse_uint_params_list(argc, argv, DBS_PERF_SHOW_INTERVAL, &interval); parse_uint_params_list(argc, argv, DBS_PERF_SHOW_TIMES, ×); + if (interval <= 0 || interval > MAX_DBS_STATISTICAL_SIZE) { + printf("Converted interval:%u is not within the range of(0,7200]\n", interval); + return CT_ERROR; + } + if (times <= 0) { + printf("Converted times:%u is not greater than 0", times); + return CT_ERROR; + } dbs_uds_req_comm_msg* req_msg = (dbs_uds_req_comm_msg*)malloc(sizeof(dbs_uds_req_comm_msg)); if (req_msg == NULL) { printf("Failed to malloc req msg. \n"); diff --git a/pkg/src/dbstool/dbs_adp.h b/pkg/src/dbstool/dbs_adp.h index 5f94dc2624e288b95900c05ae96585ccacf34390..c8ede9cf25116980c4f77b41474e4f2ef1c24c1e 100644 --- a/pkg/src/dbstool/dbs_adp.h +++ b/pkg/src/dbstool/dbs_adp.h @@ -28,7 +28,7 @@ #include "cm_timer.h" #ifndef MODULE_ID -#define MODULE_ID DBSTORE +#define MODULE_ID DBSTOR #endif #define NUM_ONE 1 @@ -44,7 +44,7 @@ #define LINK_STATE_UNKNOWN 7 #define DBS_BACKUP_FILE_COUNT 10 #define DBS_LOGFILE_SIZE (10 * 1024 * 1024) -#define DBS_TOOL_LOG_FILE_NAME "dbs_tool.log" +#define DBS_TOOL_LOG_FILE_NAME "tool/dbs_tool.log" #define DBS_PERF_ITEM_NAME_LEN 32 #define DBS_UDS_BUFFER_SIZE 1024 diff --git a/pkg/src/dbstool/dbs_main.c b/pkg/src/dbstool/dbs_main.c index e991430b7f781182ed6fc28a7f64b8a0f97cd02d..2913fb815854947daf9622913fc9d6613e5a3b64 100644 --- a/pkg/src/dbstool/dbs_main.c +++ b/pkg/src/dbstool/dbs_main.c @@ -32,7 +32,7 @@ #include "cm_error.h" #include "cm_file.h" #include "dbs_adp.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #define DBS_MAX_CMD_PARAM_COUNT 16 diff --git a/pkg/src/kernel/catalog/dc_part.c b/pkg/src/kernel/catalog/dc_part.c index f64aa18f7a2d2acca474549db59bce292f44d0e7..3512fbb169dbc5a5d96f4a5de8ea33f586a92971 100644 --- a/pkg/src/kernel/catalog/dc_part.c +++ b/pkg/src/kernel/catalog/dc_part.c @@ -1067,6 +1067,7 @@ status_t dc_load_index_part_segment(knl_session_t *session, knl_handle_t dc_enti entity->corrupted = CT_TRUE; CT_THROW_ERROR(ERR_DC_CORRUPTED); } else { + CT_LOG_RUN_ERR("Index has been dropped or truncated."); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "index"); } } @@ -1340,6 +1341,7 @@ status_t dc_load_table_part_segment(knl_session_t *session, knl_handle_t dc_enti ((dc_entity_t *)dc_entity)->corrupted = CT_TRUE; CT_THROW_ERROR(ERR_DC_CORRUPTED); } else { + CT_LOG_RUN_ERR("Table has been dropped or truncated"); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); } diff --git a/pkg/src/kernel/catalog/dc_tbl.c b/pkg/src/kernel/catalog/dc_tbl.c index ebe5be633ed2890ef01b19ae511cbca8e67c4d8c..423292e24ae45f0ec5d92c85f21f68cfd8ab43de 100644 --- a/pkg/src/kernel/catalog/dc_tbl.c +++ b/pkg/src/kernel/catalog/dc_tbl.c @@ -883,7 +883,8 @@ status_t dc_load_entity_internal(knl_session_t *session, dc_user_t *user, uint32 return CT_ERROR; } - if (entity->column_count >= session->kernel->attr.max_column_count) { + if (entity->column_count >= session->kernel->attr.max_column_count || + entity->vircol_count > CT_MAX_INDEX_COLUMNS * CT_MAX_TABLE_INDEXES + CT_MAX_VIRTUAL_COLS) { dc_entry_dec_ref(entity); mctx_destroy(entity->memory); CT_THROW_ERROR_EX(ERR_INVALID_PARAMETER, @@ -1818,9 +1819,9 @@ static status_t db_load_virtual_column(knl_session_t *session, dc_entity_t *enti ret = memset_sp((void *)entity->virtual_columns, size, 0, size); knl_securec_check(ret); - entity->max_virtual_cols = col_pos + 1; } + entity->vircol_count++; entity->virtual_columns[col_pos] = column; return CT_SUCCESS; } @@ -1935,19 +1936,24 @@ static status_t dc_load_column(knl_session_t *session, knl_cursor_t *cursor, dc_ static status_t dc_load_vcolumn_default_expr(knl_session_t *session, dc_entity_t *entity) { knl_column_t *column = NULL; - - for (uint32 id = 0; id < entity->max_virtual_cols; ++id) { + session_t *session_ctc = (session_t *)session; + for (uint32 id = 0; id < entity->vircol_count; ++id) { column = entity->virtual_columns[id]; if (column == NULL || KNL_COLUMN_IS_DELETED(column)) { continue; } - CM_ASSERT(column->default_text.len != 0); - - /* get default expr tree from defalut_text directly instead of default_data */ - if (g_knl_callback.parse_default_from_text((knl_handle_t)session, - (knl_handle_t)entity, (knl_handle_t)column, entity->memory, - &column->default_expr, &column->update_default_expr, column->default_text) != CT_SUCCESS) { - return CT_ERROR; + + // The default_text of VGCs store location in SYS_COLUMNS, no parsing + if (!session_ctc->is_ctc || KNL_COLUMN_IS_HIDDEN(column)) { + CM_ASSERT(column->default_text.len != 0); + /* get default expr tree from defalut_text directly instead of default_data */ + if (g_knl_callback.parse_default_from_text((knl_handle_t)session, + (knl_handle_t)entity, (knl_handle_t)column, entity->memory, + &column->default_expr, &column->update_default_expr, column->default_text) != CT_SUCCESS) { + CT_LOG_RUN_ERR("the default_text: %s , len: %u parse failed!", + column->default_text.str, column->default_text.len); + return CT_ERROR; + } } } diff --git a/pkg/src/kernel/catalog/knl_dc.h b/pkg/src/kernel/catalog/knl_dc.h index 0c9993366f8b3945d4355a6d1d179d2f07adc5cc..9aaee5d333dc89e66c71403d05d1bebf66088ef3 100644 --- a/pkg/src/kernel/catalog/knl_dc.h +++ b/pkg/src/kernel/catalog/knl_dc.h @@ -219,8 +219,8 @@ typedef struct st_dc_entity { volatile bool32 valid; /* valid or not, changed by ddl */ atomic32_t ref_count; /* reference number, inc/dec by sql */ spinlock_t ref_lock; - uint32 column_count; /* column count */ - uint32 max_virtual_cols; /* max virtual column id */ + uint32 column_count; /* column count, including virtual stored column */ + uint32 vircol_count; /* includes virtual generated and created by func_index */ bool32 contain_lob; bool32 corrupted; /* table segment corrupted */ knl_dict_type_t type; diff --git a/pkg/src/kernel/common/knl_context.c b/pkg/src/kernel/common/knl_context.c index 6d559a9f08e683bf751a326b779bcf477ec72380..975a6c38e8a6c2fe1e02af41b5d14ba4aaf5b588 100644 --- a/pkg/src/kernel/common/knl_context.c +++ b/pkg/src/kernel/common/knl_context.c @@ -28,7 +28,7 @@ #include "cm_dbs_intf.h" #include "mes_config.h" #include "cms_interface.h" -#include "cm_dbstore.h" +#include "cm_dbstor.h" #ifdef __cplusplus extern "C" { @@ -96,7 +96,7 @@ status_t knl_startup(knl_handle_t kernel) return CT_ERROR; } if (!cfg->enable) { - CT_LOG_RUN_INF("Note: dbstore is not enabled, the disaster recovery funcs would not work."); + CT_LOG_RUN_INF("Note: dbstor is not enabled, the disaster recovery funcs would not work."); } else { const char* uuid = get_config_uuid(session->kernel->id); uint32 lsid = get_config_lsid(session->kernel->id); diff --git a/pkg/src/kernel/common/knl_context.h b/pkg/src/kernel/common/knl_context.h index 9c3b5d3d1c9b8994e14bb47eaba9fb12b486936a..ea9ea6c8f59b6e67b5edf367b3eba90ab2244fdd 100644 --- a/pkg/src/kernel/common/knl_context.h +++ b/pkg/src/kernel/common/knl_context.h @@ -310,6 +310,7 @@ typedef struct st_knl_attr { bool32 drc_in_reformer_mode; uint32 res_recycle_ratio; uint32 create_index_parallelism; + bool32 enable_dss; } knl_attr_t; typedef struct st_sys_name_context { // for system name diff --git a/pkg/src/kernel/common/knl_ctrl_restore.c b/pkg/src/kernel/common/knl_ctrl_restore.c index 344e91c46b89c4a5acb23fc5dfcab87a20685197..5df5acd946dc282db1c7eb60013d63d886808a19 100644 --- a/pkg/src/kernel/common/knl_ctrl_restore.c +++ b/pkg/src/kernel/common/knl_ctrl_restore.c @@ -901,7 +901,7 @@ status_t knl_backup_iof_kick_by_ns(knl_session_t *session) ret = cm_dbs_iof_kick_by_ns(&iof); SYNC_POINT_GLOBAL_END; if (ret != CT_SUCCESS) { - CT_LOG_RUN_WAR("dbstore iof failed, node_id : %u", iof.nodeid); + CT_LOG_RUN_WAR("dbstor iof failed, node_id : %u", iof.nodeid); return CT_ERROR; } return CT_SUCCESS; diff --git a/pkg/src/kernel/common/knl_syncpoint.c b/pkg/src/kernel/common/knl_syncpoint.c index 41967506a89132901d6e8f4d7ee64b971e80fcfb..49d2b08a5bd4c3713957a71cacc9ccd573fb398f 100644 --- a/pkg/src/kernel/common/knl_syncpoint.c +++ b/pkg/src/kernel/common/knl_syncpoint.c @@ -644,6 +644,8 @@ knl_global_syncpoint_def g_knl_syncpoint[] = { { CTC_GET_CBO_STATS_FAIL, CT_FALSE, "CTC_GET_CBO_STATS_FAIL", 0, knl_syncpoint_inject_errcode, 0 }, { CTC_UNLOCK_MDL_KEY_ABORT, CT_FALSE, "CTC_UNLOCK_MDL_KEY_ABORT", 0, knl_syncpoint_inject_abort, 0 }, + { CTC_CBO_CANNOT_REWRITE_BY_INDEX, CT_FALSE, "CTC_CBO_CANNOT_REWRITE_BY_INDEX", 0, + knl_syncpoint_inject_errcode, 0 }, }; #define KNL_SYNCPOINT_COUNT (sizeof(g_knl_syncpoint) / sizeof(g_knl_syncpoint[0])) diff --git a/pkg/src/kernel/common/knl_syncpoint.h b/pkg/src/kernel/common/knl_syncpoint.h index 4243b514bd3db9ddef08365b9941ff1821aef1b1..fb32d1a0a3c762eb28da4661457c120a6615b961 100644 --- a/pkg/src/kernel/common/knl_syncpoint.h +++ b/pkg/src/kernel/common/knl_syncpoint.h @@ -332,6 +332,7 @@ typedef enum { CTC_FILL_CBO_STATS_INDEX_FAIL, CTC_GET_CBO_STATS_FAIL, CTC_UNLOCK_MDL_KEY_ABORT, + CTC_CBO_CANNOT_REWRITE_BY_INDEX, } knl_syncpoint_id; typedef void (*syncpoint_callback)(int32 *param, int32 ret); diff --git a/pkg/src/kernel/include/db_defs.h b/pkg/src/kernel/include/db_defs.h index 318060256487179f37473d69d743f812ab5079f8..3fc57eebf5eeb1c42933296baa978ab67c2f1ef5 100644 --- a/pkg/src/kernel/include/db_defs.h +++ b/pkg/src/kernel/include/db_defs.h @@ -25,6 +25,7 @@ #ifndef __KNL_DB_DEFS_H__ #define __KNL_DB_DEFS_H__ +#include "dcl_defs.h" #include "knl_defs.h" #include "persist_defs.h" diff --git a/pkg/src/kernel/include/ddl_defs.h b/pkg/src/kernel/include/ddl_defs.h index 617f41f8a085c78e16e05219f41bd948b0d05f8a..01428c195e325f56631d08d2d848c6b6ec407127 100644 --- a/pkg/src/kernel/include/ddl_defs.h +++ b/pkg/src/kernel/include/ddl_defs.h @@ -131,8 +131,7 @@ typedef struct st_knl_constraint_state { uint32 rely_ops : 2; uint32 is_encode : 1; // deprecated field uint32 is_cascade : 1; - uint32 is_contains_vircol : 1; - uint32 unused_ops : 19; + uint32 unused_ops : 20; }; uint32 option; }; @@ -358,6 +357,7 @@ typedef struct st_knl_column_def { bool32 nullable : 1; bool32 primary : 1; // if it is a primary key bool32 unique : 1; + bool32 is_virtual : 1; bool32 is_serial : 1; bool32 is_check : 1; bool32 is_ref : 1; @@ -371,7 +371,7 @@ typedef struct st_knl_column_def { bool32 is_default_null : 1; // empty string treat as null or '' bool32 is_jsonb : 1; // this col must be jsonb type(blob type in fact) bool32 is_unsigned : 1; // unsigned for mysql - bool32 unused_ops : 16; + bool32 unused_ops : 15; }; bool32 is_option_set; }; @@ -448,7 +448,6 @@ typedef struct st_knl_altable_def { void *drop_index_def; uint8 is_for_create_db; uint8 is_mysql_copy; - uint8 contains_vircol; } knl_altable_def_t; typedef struct st_loginfo_base_rec { @@ -492,6 +491,7 @@ typedef struct st_knl_table_def { uint32 pctfree; uint32 parted; uint32 sysid; + uint16 vcol_count; uint8 cr_mode; uint8 csf; knl_storage_def_t storage_def; @@ -508,8 +508,7 @@ typedef struct st_knl_table_def { bool32 is_mysql_copy : 1; // copy algorithm for mysql bool32 is_for_create_db : 1; bool32 is_intrinsic : 1; - bool32 contains_vircol : 1; - bool32 unused : 23; + bool32 unused : 24; }; uint32 options; // if not exists diff --git a/pkg/src/kernel/include/dml_defs.h b/pkg/src/kernel/include/dml_defs.h index 7668d71f5eeee68b834ba74fef5212d2933bf79e..c173a7bf9007dfb3cc24e8499e2e2e3ae2e1e693 100644 --- a/pkg/src/kernel/include/dml_defs.h +++ b/pkg/src/kernel/include/dml_defs.h @@ -26,6 +26,7 @@ #define __KNL_DML_DEFS_H__ #include "knl_defs.h" +#include "index_defs.h" #ifdef __cplusplus extern "C" { @@ -77,7 +78,7 @@ typedef enum st_nologing_type { #define KNL_COLUMN_FLAG_JSONB 0x00000800 /* column is an jsonb, actually is blob */ #define KNL_COLUMN_INVISIBLE(col) \ - ((col)->flags & (KNL_COLUMN_FLAG_HIDDEN | KNL_COLUMN_FLAG_DELETED | KNL_COLUMN_FLAG_VIRTUAL)) + ((col)->flags & (KNL_COLUMN_FLAG_HIDDEN | KNL_COLUMN_FLAG_DELETED)) #define KNL_COLUMN_IS_DELETED(col) (((col)->flags & KNL_COLUMN_FLAG_DELETED) != 0) #define KNL_COLUMN_IS_HIDDEN(col) (((col)->flags & KNL_COLUMN_FLAG_HIDDEN) != 0) #define KNL_COLUMN_IS_UPDATE_DEFAULT(col) (((col)->flags & KNL_COLUMN_FLAG_UPDATE_DEFAULT) != 0) diff --git a/pkg/src/kernel/index/rcr_btree_stat.c b/pkg/src/kernel/index/rcr_btree_stat.c index 2aae34d4e03288883db9c83aa7ecfd52a76bf6a1..b665552b8242fcfb22f6bd876576fcacbf663514 100644 --- a/pkg/src/kernel/index/rcr_btree_stat.c +++ b/pkg/src/kernel/index/rcr_btree_stat.c @@ -308,6 +308,7 @@ status_t btree_level_first_page(knl_session_t *session, btree_t *btree, uint16 l page_type_t page_type; if (segment == NULL) { + CT_LOG_RUN_ERR("Index has been dropped or truncated."); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "index"); return CT_ERROR; } @@ -317,6 +318,7 @@ status_t btree_level_first_page(knl_session_t *session, btree_t *btree, uint16 l page_type = (btree->index->desc.cr_mode == CR_PAGE) ? PAGE_TYPE_PCRB_NODE : PAGE_TYPE_BTREE_NODE; if (!spc_validate_page_id(session, *page_id)) { + CT_LOG_RUN_ERR("Index has been dropped or truncated. Inavlid page id %u-%u", page_id->page, page_id->file); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "index"); return CT_ERROR; } diff --git a/pkg/src/kernel/knl_database.c b/pkg/src/kernel/knl_database.c index b13c23b50f3d4409fa068dc32ad6261cb752def2..13cf195c6fef277aab6d736e3cee10f0e3e017b6 100644 --- a/pkg/src/kernel/knl_database.c +++ b/pkg/src/kernel/knl_database.c @@ -35,6 +35,7 @@ #include "dtc_drc.h" #include "cm_dbs_intf.h" #include "cm_file_iofence.h" +#include "cm_dss_iofence.h" #include "srv_view.h" #ifdef __cplusplus @@ -616,20 +617,29 @@ status_t db_register_iof(knl_instance_t *kernel) { if (knl_dbs_is_enable_dbs()) { if (cm_dbs_open_all_ns() != CT_SUCCESS) { - CT_LOG_RUN_ERR("failed to open dbstore namespace."); + CT_LOG_RUN_ERR("failed to open dbstor namespace."); return CT_ERROR; } if (cm_dbs_iof_reg_all_ns(kernel->id) != CT_SUCCESS) { - CT_LOG_RUN_ERR("failed to iof reg dbstore namespace, inst id %u", kernel->id); + CT_LOG_RUN_ERR("failed to iof reg dbstor namespace, inst id %u", kernel->id); return CT_ERROR; } - } else { - if (kernel->file_iof_thd.id == 0) { - if (cm_file_iof_register(kernel->id, &kernel->file_iof_thd) != CT_SUCCESS) { - CT_LOG_RUN_ERR("failed to iof reg file, inst id %u", kernel->id); - return CT_ERROR; - } + return CT_SUCCESS; + } + + if (kernel->attr.enable_dss) { + if (cm_dss_iof_register() != CT_SUCCESS) { + CT_LOG_RUN_ERR("failed to iof reg dss, inst id %u", kernel->id); + return CT_ERROR; + } + return CT_SUCCESS; + } + + if (kernel->file_iof_thd.id == 0) { + if (cm_file_iof_register(kernel->id, &kernel->file_iof_thd) != CT_SUCCESS) { + CT_LOG_RUN_ERR("failed to iof reg file, inst id %u", kernel->id); + return CT_ERROR; } } return CT_SUCCESS; diff --git a/pkg/src/kernel/knl_db_create.c b/pkg/src/kernel/knl_db_create.c index 11c3d70ab28efa0ad6c664e12170a0223d9ad3f0..e127c20ac182922dc301253401b13cfd95ac25c3 100644 --- a/pkg/src/kernel/knl_db_create.c +++ b/pkg/src/kernel/knl_db_create.c @@ -33,6 +33,7 @@ #include "cm_dbs_intf.h" #include "ct_tbox.h" #include "cm_file_iofence.h" +#include "cm_dss_iofence.h" #ifdef __cplusplus extern "C" { @@ -622,25 +623,34 @@ status_t dbc_register_iof(knl_instance_t *kernel) { if (knl_dbs_is_enable_dbs()) { if (cm_dbs_create_all_ns() != CT_SUCCESS) { - CT_LOG_RUN_ERR("Failed to build dbstore namespace."); + CT_LOG_RUN_ERR("Failed to build dbstor namespace."); return CT_ERROR; } if (cm_dbs_iof_reg_all_ns(kernel->id) != CT_SUCCESS) { - CT_LOG_RUN_ERR("Failed to iof reg dbstore namespace, inst id %u", kernel->id); + CT_LOG_RUN_ERR("Failed to iof reg dbstor namespace, inst id %u", kernel->id); return CT_ERROR; } if (cm_dbs_open_all_ns() != CT_SUCCESS) { - CT_LOG_RUN_ERR("Failed to open dbstore namespace."); + CT_LOG_RUN_ERR("Failed to open dbstor namespace."); return CT_ERROR; } - } else { - if (kernel->file_iof_thd.id == 0) { - if (cm_file_iof_register(kernel->id, &kernel->file_iof_thd) != CT_SUCCESS) { - CT_LOG_RUN_ERR("Failed to iof reg file, inst id %u", kernel->id); - return CT_ERROR; - } + return CT_SUCCESS; + } + + if (kernel->attr.enable_dss) { + if (cm_dss_iof_register() != CT_SUCCESS) { + CT_LOG_RUN_ERR("failed to iof reg dss, inst id %u", kernel->id); + return CT_ERROR; + } + return CT_SUCCESS; + } + + if (kernel->file_iof_thd.id == 0) { + if (cm_file_iof_register(kernel->id, &kernel->file_iof_thd) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to iof reg file, inst id %u", kernel->id); + return CT_ERROR; } } return CT_SUCCESS; diff --git a/pkg/src/kernel/knl_interface.c b/pkg/src/kernel/knl_interface.c index 2176e986f51fe7c3ed8c871668612cf4c6102283..a75bc8b8b60fc991d1ff3997bfecd1ec3431e173 100644 --- a/pkg/src/kernel/knl_interface.c +++ b/pkg/src/kernel/knl_interface.c @@ -30,6 +30,7 @@ #include "cm_device.h" #include "cm_io_record.h" #include "cm_file_iofence.h" +#include "cm_dss_iofence.h" #include "knl_lob.h" #include "rcr_btree.h" #include "rcr_btree_scan.h" @@ -4784,6 +4785,16 @@ uint16 knl_get_column_id(knl_dictionary_t *dc, text_t *name) index = column->next; } + for (uint32 i = 0; i < entity->vircol_count; i++) { + column = entity->virtual_columns[i]; + if (column == NULL || KNL_COLUMN_IS_HIDDEN(column) || KNL_COLUMN_IS_DELETED(column)) { + continue; + } + if (strcmp(column->name, column_name) == 0) { + return column->id; + } + } + return CT_INVALID_ID16; } @@ -9600,7 +9611,7 @@ status_t knl_restore(knl_handle_t session, knl_restore_t *param) } if (knl_db_open_dbstor_ns(session) != CT_SUCCESS) { - CT_LOG_RUN_INF("failed to open dbstore namespace"); + CT_LOG_RUN_INF("failed to open dbstor namespace"); return CT_ERROR; } @@ -9657,16 +9668,25 @@ status_t knl_register_iof(knl_session_t *se) { if (knl_dbs_is_enable_dbs()) { if (knl_db_open_dbstor_ns((knl_handle_t)se) != CT_SUCCESS) { - CT_LOG_RUN_INF("failed to open dbstore namespace"); + CT_LOG_RUN_INF("failed to open dbstor namespace"); return CT_ERROR; } - } else { - knl_instance_t *kernel = (knl_instance_t *)se->kernel; - if (kernel->file_iof_thd.id == 0) { - if (cm_file_iof_register(kernel->id, &kernel->file_iof_thd) != CT_SUCCESS) { - CT_LOG_RUN_ERR("failed to iof reg file, inst id %u", kernel->id); - return CT_ERROR; - } + return CT_SUCCESS; + } + + knl_instance_t *kernel = (knl_instance_t *)se->kernel; + if (g_instance->kernel.attr.enable_dss) { + if (cm_dss_iof_register() != CT_SUCCESS) { + CT_LOG_RUN_ERR("failed to iof reg dss, inst id %u", kernel->id); + return CT_ERROR; + } + return CT_SUCCESS; + } + + if (kernel->file_iof_thd.id == 0) { + if (cm_file_iof_register(kernel->id, &kernel->file_iof_thd) != CT_SUCCESS) { + CT_LOG_RUN_ERR("failed to iof reg file, inst id %u", kernel->id); + return CT_ERROR; } } return CT_SUCCESS; @@ -14818,13 +14838,13 @@ status_t knl_db_open_dbstor_ns(knl_handle_t session) status = cm_dbs_open_all_ns(); SYNC_POINT_GLOBAL_END; if (status != CT_SUCCESS) { - CT_LOG_RUN_ERR("failed to open dbstore namespace."); + CT_LOG_RUN_ERR("failed to open dbstor namespace."); return CT_ERROR; } knl_session_t *se = (knl_session_t *)session; knl_instance_t *kernel = (knl_instance_t *)se->kernel; if (cm_dbs_iof_reg_all_ns(kernel->id) != CT_SUCCESS) { - CT_LOG_RUN_ERR("failed to iof reg dbstore namespace, inst id %u", kernel->id); + CT_LOG_RUN_ERR("failed to iof reg dbstor namespace, inst id %u", kernel->id); return CT_ERROR; } return CT_SUCCESS; @@ -15318,9 +15338,6 @@ status_t knl_alter_table4mysql(knl_handle_t se, knl_handle_t stmt, return CT_ERROR; } log_add_lrep_ddl_begin(session); - if (def->action == ALTABLE_ADD_COLUMN && def->contains_vircol) { - CT_RETURN_IFERR(db_altable_drop_logical_log(session, dc, def)); - } for (; count < def_count; count++) { cur_def = &def[count]; ret = knl_alter_single_action(session, stmt, cur_def, dc, is_lrep_log, &contain_hash_part); diff --git a/pkg/src/kernel/lob/knl_lob.c b/pkg/src/kernel/lob/knl_lob.c index cf6a4302d70ecdcd98203a4fe0cfbedfaf1701db..8b10ea6bf2c251e275ba78c7892f400a0b3cf241 100644 --- a/pkg/src/kernel/lob/knl_lob.c +++ b/pkg/src/kernel/lob/knl_lob.c @@ -2373,6 +2373,7 @@ status_t knl_read_lob_check(knl_session_t *session, page_id_t page_id, lob_locat data = LOB_CURR_DATA_PAGE(session); if (data->head.type != PAGE_TYPE_LOB_DATA) { + CT_LOG_RUN_ERR("Table has been dropped or truncated. data->head.type: %d", data->head.type); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -2386,6 +2387,9 @@ status_t knl_read_lob_check(knl_session_t *session, page_id_t page_id, lob_locat } if (!LOB_CHECK_ORG_SCN(locator, data)) { + CT_LOG_RUN_ERR("Table has been dropped or truncated, " + "data->head.type: %d, data->chunk.org_scn: %llu, locator->org_scn: %llu", + data->head.type, data->chunk.org_scn, locator->org_scn); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -2405,6 +2409,7 @@ status_t lob_temp_read_lob_check(knl_session_t *session, page_id_t page_id, lob_ data = (lob_data_page_t *)(buf_curr_temp_page(session))->data; if (data->head.type != PAGE_TYPE_LOB_DATA) { + CT_LOG_RUN_ERR("Table has been dropped or truncated, data->head.type: %d", data->head.type); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -2419,6 +2424,7 @@ status_t lob_temp_read_lob_check(knl_session_t *session, page_id_t page_id, lob_ } if (!LOB_CHECK_ORG_SCN(locator, data)) { + CT_LOG_RUN_ERR("Table has been dropped or truncated."); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } diff --git a/pkg/src/kernel/persist/knl_ckpt.c b/pkg/src/kernel/persist/knl_ckpt.c index bfddaeaba069cf9afe7c182541daf1e0702bef0a..d0a08372807c2e8082fd3681c4b255532ef72b12 100644 --- a/pkg/src/kernel/persist/knl_ckpt.c +++ b/pkg/src/kernel/persist/knl_ckpt.c @@ -2274,6 +2274,10 @@ void ckpt_set_trunc_point(knl_session_t *session, log_point_t *point) return; } cm_spin_lock(&ctx->queue.lock, &session->stat->spin_stat.stat_ckpt_queue); + // update curr_node_idx only in standby cluster + if (!DB_IS_PRIMARY(&session->kernel->db)) { + ctx->queue.curr_node_idx = session->kernel->id; + } ctx->queue.trunc_point = *point; cm_spin_unlock(&ctx->queue.lock); } @@ -2561,7 +2565,7 @@ void dbwr_proc(thread_t *thread) if (thread->closed) { break; } - // if enable dbstore batch flush, dbwr->begin and dbwr->end will unuse + // if enable dbstor batch flush, dbwr->begin and dbwr->end will unuse knl_panic(dbwr->end >= dbwr->begin || (cm_dbs_is_enable_dbs() && cm_dbs_is_enable_batch_flush())); knl_panic(dbwr->dbwr_trigger); diff --git a/pkg/src/kernel/statistics/knl_rstat.c b/pkg/src/kernel/statistics/knl_rstat.c index 10db8681f61d68243e6b3df9e1bc8e99fc938469..b7324042c29534f188da4cddd3634eef2fce50f0 100644 --- a/pkg/src/kernel/statistics/knl_rstat.c +++ b/pkg/src/kernel/statistics/knl_rstat.c @@ -1210,6 +1210,7 @@ status_t stats_next_page_in_extent(knl_session_t *session, knl_cursor_t *cursor, page_id.page += random_step; if (!spc_validate_page_id(session, page_id)) { + CT_LOG_RUN_ERR("%s has been dropped or truncated. Invalid page id %u-%u", name, page_id.page, page_id.file); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, name); return CT_ERROR; } @@ -1298,6 +1299,7 @@ static status_t stats_next_extent_page(knl_session_t *session, knl_cursor_t *cur } if (!spc_validate_page_id(session, extent)) { + CT_LOG_RUN_ERR("%s has been dropped or truncated. Invalid page id %u-%u", name, extent.page, extent.file); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, name); return CT_ERROR; } @@ -1314,6 +1316,9 @@ static status_t stats_next_extent_page(knl_session_t *session, knl_cursor_t *cur if (cursor->page_soft_damaged) { CT_THROW_ERROR(ERR_PAGE_SOFT_DAMAGED, (cursor)->rowid.file, (cursor)->rowid.page); } else { + CT_LOG_RUN_ERR("%s has been dropped or truncated, " + "stat_sample->hwm_extents: %u, page->head.type: %d", + name, stat_sample->hwm_extents, page->head.type); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, name); } return CT_ERROR; @@ -1358,6 +1363,7 @@ static status_t stats_next_sample_page(knl_session_t *session, knl_cursor_t *cur table->desc.name; // table name or table part name if (!spc_validate_page_id(session, entry)) { + CT_LOG_RUN_ERR("%s has been dropped or truncated. Invalid page id %u-%u", name, entry.page, entry.file); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, name); return CT_ERROR; } @@ -1366,6 +1372,9 @@ static status_t stats_next_sample_page(knl_session_t *session, knl_cursor_t *cur page_head_t *page = (page_head_t *)CURR_PAGE(session); segment = HEAP_SEG_HEAD(session); if (page->type != PAGE_TYPE_HEAP_HEAD || segment->seg_scn != seg_scn) { + CT_LOG_RUN_ERR("%s has been dropped or truncated, " + "page->type: %u, segment->seg_scn: %llu, seg_scn: %llu", + name, page->type, segment->seg_scn, seg_scn); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, name); buf_leave_page(session, CT_FALSE); return CT_ERROR; @@ -2602,6 +2611,7 @@ static status_t stats_get_nonsample_page(knl_session_t *session, knl_cursor_t *c if (cursor->page_soft_damaged) { CT_THROW_ERROR(ERR_PAGE_SOFT_DAMAGED, (cursor)->rowid.file, (cursor)->rowid.page); } else { + CT_LOG_RUN_ERR("%s has been dropped or truncated, page_id: %u-%u", name, page_id.page, page_id.file); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, name); } return CT_ERROR; @@ -4702,6 +4712,7 @@ static status_t stats_update_sys_column(knl_session_t *session, stats_col_handle if (cursor->eof) { CM_RESTORE_STACK(session->stack); + CT_LOG_RUN_ERR("%s has been dropped or truncated.", column->name); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, column->name); return CT_ERROR; } @@ -5130,6 +5141,7 @@ status_t stats_update_sys_subtablepart(knl_session_t *session, table_part_t *tab if (cursor->eof) { CM_RESTORE_STACK(session->stack); + CT_LOG_RUN_ERR("%s has been dropped or truncated.", table_sub->desc.name); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, table_sub->desc.name); stats_try_end_auton_rm(session, CT_ERROR, is_dynamic); return CT_ERROR; @@ -5208,6 +5220,7 @@ status_t stats_update_sys_tablepart(knl_session_t *session, knl_dictionary_t *dc if (cursor->eof) { CM_RESTORE_STACK(session->stack); + CT_LOG_RUN_ERR("%s has been dropped or truncated.", table_part->desc.name); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, table_part->desc.name); stats_try_end_auton_rm(session, CT_ERROR, tab_stats->is_dynamic); return CT_ERROR; @@ -7058,6 +7071,7 @@ static status_t stats_check_segment_valid(knl_session_t *session, table_part_t * page_head_t *page = (page_head_t *)CURR_PAGE(session); *seg = HEAP_SEG_HEAD(session); if (page->type != PAGE_TYPE_HEAP_HEAD || (*seg)->seg_scn != table_part->desc.seg_scn) { + CT_LOG_RUN_ERR("%s has been dropped or truncated.", table_part->desc.name); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, table_part->desc.name); buf_leave_page(session, CT_FALSE); return CT_ERROR; @@ -7687,6 +7701,7 @@ static status_t stats_update_sys_table(knl_session_t *session, stats_table_t *ta if (cursor->eof) { CM_RESTORE_STACK(session->stack); + CT_LOG_RUN_ERR("%s has been dropped or truncated.", table->desc.name); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, table->desc.name); stats_try_end_auton_rm(session, CT_ERROR, tab_stats->is_dynamic); return CT_ERROR; @@ -10787,6 +10802,7 @@ status_t stats_check_analyzing(knl_session_t *session, knl_dictionary_t *dc, boo } if (cursor->eof) { + CT_LOG_RUN_ERR("Table has been dropped or truncated."); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); status = CT_ERROR; break; @@ -10951,6 +10967,7 @@ status_t stats_set_analyze_time(knl_session_t *session, knl_dictionary_t *dc, bo if (cursor->eof) { CM_RESTORE_STACK(session->stack); + CT_LOG_RUN_ERR("Table has been dropped or truncated."); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -11072,6 +11089,7 @@ static status_t stats_update_sys_table_force(knl_session_t *session, knl_diction if (cursor->eof) { CM_RESTORE_STACK(session->stack); + CT_LOG_RUN_ERR("Table has been dropped or truncated."); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -11135,6 +11153,7 @@ static status_t stats_update_sys_tablepart_force(knl_session_t *session, knl_dic if (cursor->eof) { CM_RESTORE_STACK(session->stack); + CT_LOG_RUN_ERR("%s has been dropped or truncated.", table_part->desc.name); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, table_part->desc.name); return CT_ERROR; } diff --git a/pkg/src/kernel/table/knl_heap.c b/pkg/src/kernel/table/knl_heap.c index e0daef407dd3db3cdc7d450fc4835d968950f185..af8eb9b5dacc10cebfb3ae2be0d836db860b64dd 100644 --- a/pkg/src/kernel/table/knl_heap.c +++ b/pkg/src/kernel/table/knl_heap.c @@ -5874,6 +5874,8 @@ static status_t heap_fetch_chain_row(knl_session_t *session, knl_cursor_t *curso if ((uint16)next_rid.slot >= page->dirs) { buf_leave_page(session, CT_FALSE); + CT_LOG_RUN_ERR("Table has been dropped or truncated. next_rid.slot: %u, page->dirs: %u", + next_rid.slot, page->dirs); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -6158,6 +6160,8 @@ static status_t heap_scan_full_page(knl_session_t *session, knl_cursor_t *cursor } if ((uint16)cursor->rowid.slot > page->dirs) { + CT_LOG_RUN_ERR("Table has been dropped or truncated. cursor->rowid.slot: %u, page->dirs: %u", + cursor->rowid.slot, page->dirs); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } diff --git a/pkg/src/kernel/table/knl_heap.h b/pkg/src/kernel/table/knl_heap.h index 8d7a2083d28ee324895230addb61b1db40c86483..1752b584d5821c9ccb49d1931f54d07e2379426e 100644 --- a/pkg/src/kernel/table/knl_heap.h +++ b/pkg/src/kernel/table/knl_heap.h @@ -271,6 +271,7 @@ typedef struct st_heap_compact_def { if ((cursor)->page_soft_damaged) { \ CT_THROW_ERROR(ERR_PAGE_SOFT_DAMAGED, (cursor)->rowid.file, (cursor)->rowid.page); \ } else if ((cursor)->rowid_count == 0) { \ + CT_LOG_RUN_ERR("Table has been dropped or truncated."); \ CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); \ } else { \ CT_THROW_ERROR(ERR_INVALID_ROWID); \ diff --git a/pkg/src/kernel/table/knl_table.c b/pkg/src/kernel/table/knl_table.c index 79080a14a6fcd03977250580717503e53a502881..7f12879ac2f1676f1933974b3bb091e90eac9298 100644 --- a/pkg/src/kernel/table/knl_table.c +++ b/pkg/src/kernel/table/knl_table.c @@ -244,7 +244,7 @@ status_t db_init_table_desc_spc(knl_session_t *session, knl_table_desc_t *desc, static status_t db_init_table_desc_by_def(knl_session_t *session, knl_table_desc_t *desc, knl_table_def_t *def) { - desc->column_count = def->columns.count; + desc->column_count = def->columns.count - def->vcol_count; (void)cm_text2str(&def->name, desc->name, CT_NAME_BUFFER_SIZE); desc->entry = INVALID_PAGID; desc->type = def->type; @@ -351,7 +351,7 @@ status_t db_init_table_desc(knl_session_t *session, knl_table_desc_t *desc, knl_ return CT_ERROR; } - if (def->columns.count > session->kernel->attr.max_column_count - 1) { + if (def->columns.count - def->vcol_count > session->kernel->attr.max_column_count - 1) { CT_THROW_ERROR(ERR_MAX_COLUMN_SIZE, session->kernel->attr.max_column_count - 1); return CT_ERROR; } @@ -430,6 +430,11 @@ static void db_init_column_flg(knl_column_def_t *def, knl_column_t *column) } } + if (def->is_virtual) { + COLUMN_SET_VIRTUAL(column); + column->default_text = def->default_text; + } + if (!def->is_default_null) { COLUMN_RESET_DEFAULT_NULL(column); column->default_text = def->default_text; @@ -1817,7 +1822,7 @@ static void db_alloc_vcol_id(dc_entity_t *entity, uint32 *vcol_id) { knl_column_t *column = NULL; - while (*vcol_id < entity->max_virtual_cols + DC_VIRTUAL_COL_START) { + while (*vcol_id < entity->vircol_count + DC_VIRTUAL_COL_START) { column = dc_get_column(entity, *vcol_id); if (column == NULL) { return; @@ -1864,7 +1869,7 @@ static status_t db_prepare_idx_cols(knl_session_t *session, knl_index_def_t *def type = index_col->datatype; col_size = index_col->size; index_col->nullable = column->nullable; - + // Process the virtual column created by func_index if (db_create_virtual_icol(session, dc, index_col, vcol_id, column) != CT_SUCCESS) { return CT_ERROR; } @@ -2850,7 +2855,7 @@ static status_t db_create_cons(knl_session_t *session, knl_dictionary_t *dc, knl } if (IS_COMPATIBLE_MYSQL_INST && !IS_SQL_SERVER_INITIALIZING && - table->desc.type == TABLE_TYPE_HEAP && def->type == CONS_TYPE_PRIMARY && !def->cons_state.is_contains_vircol) { + table->desc.type == TABLE_TYPE_HEAP && def->type == CONS_TYPE_PRIMARY) { if (db_write_syslogicrep(session, table->desc.uid, table->desc.id, index_id)) { return CT_ERROR; } @@ -2954,8 +2959,6 @@ static status_t db_create_constraints(knl_session_t *session, knl_table_desc_t * continue; } - cons_def->cons_state.is_contains_vircol = def->contains_vircol; - if (db_create_cons(session, &dc, cons_def) != CT_SUCCESS) { dc_close_table_private(&dc); return CT_ERROR; @@ -3212,6 +3215,18 @@ static status_t db_write_sys_external(knl_session_t *session, knl_cursor_t *curs return knl_internal_insert(session, cursor); } +// The default_text of VGC indicates the position in the SYS_COLUMNS +static status_t db_set_default_text(knl_session_t *session, knl_column_def_t *column_def, uint32 location) +{ + char *pos_buf = (char *)cm_push(session->stack, CT_MAX_INT32_STRLEN + 1); + int32 len = snprintf_s(pos_buf, CT_MAX_INT32_STRLEN + 1, + CT_MAX_INT32_STRLEN, PRINT_FMT_UINT32, location); + knl_securec_check_ss(len); + column_def->default_text.str = pos_buf; + column_def->default_text.len = (uint32)len; + return CT_SUCCESS; +} + static status_t db_construct_columns(knl_session_t *session, knl_cursor_t *cursor, knl_table_def_t *def, table_t *table) { knl_column_def_t *column_def = NULL; @@ -3220,7 +3235,17 @@ static status_t db_construct_columns(knl_session_t *session, knl_cursor_t *curso bool32 is_encrypt = SPACE_IS_ENCRYPT(space); column.name = (char *)cm_push(session->stack, CT_NAME_BUFFER_SIZE); - + /* + For virtual generated column (VGC), the column_id start with DC_VIRTUAL_COL_START, + virtual columns and ordinary columns use independent ID generation methods. + For example, table (cola, colb, colc_v, cold, cole_v, colf), column_ids ={0, 1, 60000, 2, 60001, 3} + */ + uint32 id, col_id = 0; + uint32 vcol_id = DC_VIRTUAL_COL_START; + /* + Here only handle VGCs and regular columns. vir_cols created by func_indexes at the end of all columns, + which can be seen @db_prepare_idx_cols, @db_create_virtual_icol. + */ for (uint32 i = 0; i < def->columns.count; i++) { column_def = (knl_column_def_t *)cm_galist_get(&def->columns, i); if (is_encrypt && (!CT_IS_LOB_TYPE(column_def->typmod.datatype) && !column_def->typmod.is_array)) { @@ -3231,8 +3256,17 @@ static status_t db_construct_columns(knl_session_t *session, knl_cursor_t *curso return CT_ERROR; } } + id = column_def->is_virtual ? vcol_id++ : col_id++; + + if (column_def->is_virtual) { + if(db_set_default_text(session, column_def, i) != CT_SUCCESS) { + cm_pop(session->stack); + return CT_ERROR; + } + } + + db_convert_column_def(&column, table->desc.uid, table->desc.id, column_def, NULL, id); - db_convert_column_def(&column, table->desc.uid, table->desc.id, column_def, NULL, i); if (def->type != TABLE_TYPE_HEAP && KNL_COLUMN_IS_ARRAY(&column)) { CT_THROW_ERROR(ERR_WRONG_TABLE_TYPE); cm_pop(session->stack); @@ -9667,7 +9701,13 @@ static status_t db_prepare_add_column(knl_session_t *session, knl_dictionary_t * bool32 is_table_null = CT_FALSE; char col_name[CT_NAME_BUFFER_SIZE]; - if (entity->column_count >= session->kernel->attr.max_column_count - 1) { + if (def->is_virtual && entity->vircol_count >= CT_MAX_VIRTUAL_COLS + + CT_MAX_INDEX_COLUMNS * CT_MAX_TABLE_INDEXES - 1) { + CT_THROW_ERROR(ERR_TOO_MANY_COLUMNS, "virtual column"); + return CT_ERROR; + } + + if (!def->is_virtual && entity->column_count >= session->kernel->attr.max_column_count - 1) { CT_THROW_ERROR(ERR_MAX_COLUMN_SIZE, session->kernel->attr.max_column_count - 1); return CT_ERROR; } @@ -9814,8 +9854,11 @@ status_t db_altable_add_column(knl_session_t *session, knl_dictionary_t *dc, voi knl_dictionary_t new_dc; bool32 update_default = CT_FALSE; space_t *space = NULL; + uint32 col_id = 0; for (i = 0; i < def->column_defs.count; i++) { + bool32 is_change_col_cnt = CT_TRUE; + bool32 is_add = CT_TRUE; column_def = (knl_alt_column_prop_t *)cm_galist_get(&def->column_defs, i); new_column = &column_def->new_column; @@ -9846,7 +9889,19 @@ status_t db_altable_add_column(knl_session_t *session, knl_dictionary_t *dc, voi cursor = knl_push_cursor(session); column.name = (char *)cm_push(session->stack, CT_NAME_BUFFER_SIZE); - db_convert_column_def(&column, table->desc.uid, table->desc.id, new_column, NULL, entity->column_count); + + col_id = new_column->is_virtual ? + entity->vircol_count + DC_VIRTUAL_COL_START : entity->column_count; + + if (new_column->is_virtual) { + if (db_set_default_text(session, new_column, i) != CT_SUCCESS) { + CM_RESTORE_STACK(session->stack); + dc_close_table_private(&new_dc); + return CT_ERROR; + } + } + + db_convert_column_def(&column, table->desc.uid, table->desc.id, new_column, NULL, col_id); if (db_write_syscolumn(session, cursor, &column) != CT_SUCCESS) { CM_RESTORE_STACK(session->stack); @@ -9862,7 +9917,13 @@ status_t db_altable_add_column(knl_session_t *session, knl_dictionary_t *dc, voi } } - if (db_update_table_desc(session, &table->desc, CT_TRUE, CT_TRUE) != CT_SUCCESS) { + // For virtual generated columns, it does not affect column_count in table->desc. + if (new_column->is_virtual) { + is_change_col_cnt = CT_FALSE; + is_add = CT_FALSE; + } + + if (db_update_table_desc(session, &table->desc, is_change_col_cnt, is_add) != CT_SUCCESS) { dc_close_table_private(&new_dc); CM_RESTORE_STACK(session->stack); return CT_ERROR; @@ -11523,7 +11584,6 @@ status_t db_altable_add_cons(knl_session_t *session, knl_dictionary_t *dc, knl_a def->cons_def.new_cons.name.str = cons_name; def->cons_def.new_cons.cons_state.is_anonymous = CT_TRUE; } - def->cons_def.new_cons.cons_state.is_contains_vircol = def->contains_vircol; switch (def->cons_def.new_cons.type) { case CONS_TYPE_PRIMARY: diff --git a/pkg/src/kernel/table/knl_temp.c b/pkg/src/kernel/table/knl_temp.c index c2ff86e691d791f9487500dca5d8f4b024ade079..f66e87a09a13df35d9fdca771026a6f92aa32190 100644 --- a/pkg/src/kernel/table/knl_temp.c +++ b/pkg/src/kernel/table/knl_temp.c @@ -529,6 +529,9 @@ static status_t temp_heap_get_migr_row(knl_session_t *session, knl_cursor_t *cur page = TEMP_HEAP_CURR_PAGE(session); if (!temp_heap_check_page(session, cursor, page, PAGE_TYPE_TEMP_HEAP)) { buf_leave_temp_page_nolock(session, CT_FALSE); + CT_LOG_RUN_ERR("Temp table has been dropped or truncated, " + "page->head.type: %d, page->org_scn: %llu, table->desc.org_scn: %llu", + page->head.type, page->org_scn, ((table_t *)cursor->table)->desc.org_scn); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "temp table"); return CT_ERROR; } @@ -612,6 +615,9 @@ static status_t temp_heap_fetch_by_page(knl_session_t *session, knl_cursor_t *cu page = TEMP_HEAP_CURR_PAGE(session); if (!temp_heap_check_page(session, cursor, page, PAGE_TYPE_TEMP_HEAP)) { buf_leave_temp_page_nolock(session, CT_FALSE); + CT_LOG_RUN_ERR("Temp table has been dropped or truncated, " + "page->head.type: %d, page->org_scn: %llu, table->desc.org_scn: %llu", + page->head.type, page->org_scn, ((table_t *)cursor->table)->desc.org_scn); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "temp table"); return CT_ERROR; } diff --git a/pkg/src/kernel/table/pcr_heap_scan.c b/pkg/src/kernel/table/pcr_heap_scan.c index e485197470aca5f81f3b6d5874e3cfd40d67f398..8c94d8ad1cecc2a0324949720e02bbf2ff542e0b 100644 --- a/pkg/src/kernel/table/pcr_heap_scan.c +++ b/pkg/src/kernel/table/pcr_heap_scan.c @@ -365,6 +365,8 @@ static status_t pcrh_search_cr_page(knl_session_t *session, knl_cursor_t *cursor return CT_SUCCESS; } else if (cursor->rowid.slot > cr_page->dirs) { + CT_LOG_RUN_ERR("Table has been dropped or truncated, cursor->rowid.slot: %u, cr_page->dirs: %u", + cursor->rowid.slot, cr_page->dirs); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -619,6 +621,8 @@ status_t pcrh_fetch_chain_r(knl_session_t *session, knl_cursor_t *cursor, knl_sc page = pcrh_get_current_page(session, cursor); if (rowid.slot >= page->dirs) { pcrh_leave_current_page(session, cursor); + CT_LOG_RUN_ERR("Table has been dropped or truncated, rowid.slot: %u, page->dirs: %u", + rowid.slot, page->dirs); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -626,6 +630,7 @@ status_t pcrh_fetch_chain_r(knl_session_t *session, knl_cursor_t *cursor, knl_sc dir = pcrh_get_dir(page, (uint16)rowid.slot); if (PCRH_DIR_IS_FREE(dir)) { pcrh_leave_current_page(session, cursor); + CT_LOG_RUN_ERR("Table has been dropped or truncated, dir: %u", *dir); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } @@ -826,6 +831,7 @@ static status_t pcrh_fetch_link_r(knl_session_t *session, knl_cursor_t *cursor, pcr_row_dir_t *dir = pcrh_get_dir(page, (uint16)cursor->link_rid.slot); if (PCRH_DIR_IS_FREE(dir)) { pcrh_leave_current_page(session, cursor); + CT_LOG_RUN_ERR("Table has been dropped or truncated."); CT_THROW_ERROR(ERR_OBJECT_ALREADY_DROPPED, "table"); return CT_ERROR; } diff --git a/pkg/src/mec/mes_uc.c b/pkg/src/mec/mes_uc.c index ac4b0275975229bd09ec124b712b92e6147f8994..475f43f0fdee8919ef1e3ee597100c140ce7726d 100644 --- a/pkg/src/mec/mes_uc.c +++ b/pkg/src/mec/mes_uc.c @@ -125,13 +125,17 @@ static status_t mes_load_symbol(void *lib_handle, char *symbol, void **sym_lib_h status_t uc_init_lib(void) { mes_interface_t *intf = &g_mes_interface; - intf->uc_handle = dlopen("libdbstoreClient.so", RTLD_LAZY); const char *dlopen_err = NULL; + intf->uc_handle = dlopen("libdbstorClient.so", RTLD_LAZY); dlopen_err = dlerror(); - if (intf->uc_handle == NULL) { - CT_LOG_RUN_ERR("fail to load libdbstoreClient.so, maybe lib path error, errno %s", dlopen_err); - return CT_ERROR; + CT_LOG_RUN_WAR("Failed to load libdbstorClient.so, trying libdbstoreClient.so instead, original error: %s", dlopen_err); + intf->uc_handle = dlopen("libdbstoreClient.so", RTLD_LAZY); + dlopen_err = dlerror(); + if (intf->uc_handle == NULL) { + CT_LOG_RUN_ERR("Failed to load libdbstoreClient.so, maybe lib path error, errno %s", dlopen_err); + return CT_ERROR; + } } CT_RETURN_IFERR(mes_load_symbol(intf->uc_handle, "dpuc_msg_alloc", (void **)(&intf->dpuc_msg_alloc))); @@ -156,7 +160,7 @@ status_t uc_init_lib(void) CT_RETURN_IFERR(mes_load_symbol(intf->uc_handle, "dpuc_link_create_with_addr", (void **)(&intf->dpuc_link_create_with_addr))); CT_RETURN_IFERR(mes_load_symbol(intf->uc_handle, "dpuc_qlink_close", (void **)(&intf->dpuc_qlink_close))); CT_RETURN_IFERR(mes_load_symbol(intf->uc_handle, "dpuc_set_security_cert_info", (void **)(&intf->dpuc_set_security_cert_info))); - CT_LOG_RUN_INF("load uc from libdbstoreClient.so done"); + CT_LOG_RUN_INF("load uc from libdbstorClient.so done"); return CT_SUCCESS; } diff --git a/pkg/src/protocol/BUILD.bazel b/pkg/src/protocol/BUILD.bazel index 4809a7b7111ccc66a8edcbd7bed7627d68448594..09aa59c359a3d191770dba02d5ed221e7db49d9e 100644 --- a/pkg/src/protocol/BUILD.bazel +++ b/pkg/src/protocol/BUILD.bazel @@ -119,7 +119,7 @@ cc_binary( ] + COMMON_LINKOPTS, deps = [ "//library:cgw_client", - "//library:dbstoreClient", + "//library:dbstorClient", "//library:dbstor_tool", "//library:nomlnx/xnetlite", "//library:iod", diff --git a/pkg/src/server/BUILD.bazel b/pkg/src/server/BUILD.bazel index fbbcdc3f8ebcecc09fb855fd9c67dd2ec8fb919c..f0cd5aefb7585bec7a08af22758dfefb1bfd63d6 100644 --- a/pkg/src/server/BUILD.bazel +++ b/pkg/src/server/BUILD.bazel @@ -103,7 +103,7 @@ cc_binary( "//library:lz4", "//library:protobuf-c", "//library:cgw_client", - "//library:dbstoreClient", + "//library:dbstorClient", "//library:dbstor_tool", "//library:nomlnx/xnetlite", "//library:iod", diff --git a/pkg/src/server/params/load_kernel.c b/pkg/src/server/params/load_kernel.c index 7f022445515fcfaba3b96b0f9b6266727d4a28f9..a170a07223e03d321b11120df1660aa6ce6e4c40 100644 --- a/pkg/src/server/params/load_kernel.c +++ b/pkg/src/server/params/load_kernel.c @@ -432,7 +432,7 @@ static status_t srv_get_file_options_params(knl_attr_t *attr) } if (cm_str_equal_ins(value, "ASYNCH")) { - if (g_instance->attr.enable_dss != CT_TRUE) { + if (g_instance->kernel.attr.enable_dss != CT_TRUE) { attr->enable_asynch = CT_TRUE; } attr->enable_directIO = CT_TRUE; @@ -451,7 +451,7 @@ static status_t srv_get_file_options_params(knl_attr_t *attr) } if (cm_str_equal_ins(value, "SETALL")) { - if (g_instance->attr.enable_dss != CT_TRUE) { + if (g_instance->kernel.attr.enable_dss != CT_TRUE) { attr->enable_asynch = CT_TRUE; } attr->enable_directIO = CT_TRUE; diff --git a/pkg/src/server/params/load_server.c b/pkg/src/server/params/load_server.c index f7181ea6df0ca9cac1f1baea4f4ac3b37adbbfcd..25b720152d5e474a133b6b3d9643f9524ce0383c 100644 --- a/pkg/src/server/params/load_server.c +++ b/pkg/src/server/params/load_server.c @@ -1006,7 +1006,7 @@ status_t srv_load_server_params(void) bool32 enable_dss = CT_FALSE; CT_RETURN_IFERR(srv_get_param_bool32("ENABLE_DSS", &enable_dss)); if (enable_dss) { - g_instance->attr.enable_dss = enable_dss; + g_instance->kernel.attr.enable_dss = enable_dss; CT_RETURN_IFERR(srv_load_dss_path()); CT_RETURN_IFERR(srv_device_init(g_instance->kernel.dtc_attr.ctstore_inst_path)); } diff --git a/pkg/src/server/params/srv_param.c b/pkg/src/server/params/srv_param.c index e95685b2e51801866a2a5f05b1c237bffb073d44..59f960086abcf060076bab83a051f8fa8799a242 100644 --- a/pkg/src/server/params/srv_param.c +++ b/pkg/src/server/params/srv_param.c @@ -1097,11 +1097,11 @@ config_item_t g_parameters[] = { { "SHARED_PATH", CT_TRUE, ATTR_READONLY, "", NULL, NULL, "-", "-", "CT_TYPE_VARCHAR", NULL, PARAM_SHARED_PATH, EFFECT_REBOOT, CFG_DB, sql_verify_als_comm, NULL, NULL, NULL }, /* dbstor */ { "ENABLE_DBSTOR", CT_TRUE, ATTR_NONE, "FALSE", NULL, NULL, "-", "FALSE,TRUE", "CT_TYPE_BOOLEAN", NULL, - PARAM_ENABLE_DBSTORE, EFFECT_REBOOT, CFG_INS, sql_verify_als_bool, sql_notify_als_bool, sql_notify_als_bool, NULL }, + PARAM_ENABLE_DBSTOR, EFFECT_REBOOT, CFG_INS, sql_verify_als_bool, sql_notify_als_bool, sql_notify_als_bool, NULL }, { "DBSTOR_DEPLOY_MODE", CT_TRUE, ATTR_NONE, "0", NULL, NULL, "-", "[0,1]", "CT_TYPE_INTEGER", NULL, - PARAM_DBSTORE_DEPLOY_MODE, EFFECT_REBOOT, CFG_INS, sql_verify_als_bool, NULL, NULL, NULL }, + PARAM_DBSTOR_DEPLOY_MODE, EFFECT_REBOOT, CFG_INS, sql_verify_als_bool, NULL, NULL, NULL }, { "DBSTOR_NAMESPACE", CT_TRUE, ATTR_NONE, "", NULL, NULL, "-", "-", "CT_TYPE_VARCHAR", NULL, - PARAM_DBSTORE_NAMESPACE, EFFECT_REBOOT, CFG_INS, sql_verify_als_comm, NULL, NULL, NULL }, + PARAM_DBSTOR_NAMESPACE, EFFECT_REBOOT, CFG_INS, sql_verify_als_comm, NULL, NULL, NULL }, { "COMPATIBLE_MYSQL", CT_TRUE, ATTR_READONLY, "1", NULL, NULL, "-", "[0, 1]", "CT_TYPE_INTEGER", NULL, PARAM_COMPATIBLE_MYSQL, EFFECT_REBOOT, CFG_INS, sql_verify_als_compatible_mysql, sql_notify_als_compatible_mysql, NULL, NULL }, diff --git a/pkg/src/server/params/srv_param_def.h b/pkg/src/server/params/srv_param_def.h index e4a1ee6ea35e9f75f64c270da0912db5711d4df7..9b0450a7891326ccce08dcfad1174b3c13057792 100644 --- a/pkg/src/server/params/srv_param_def.h +++ b/pkg/src/server/params/srv_param_def.h @@ -418,9 +418,9 @@ typedef enum en_param_global { // PARAM_MEM_POOL_MAX_SIZE, /* ADD HERE */ // dbstor - PARAM_ENABLE_DBSTORE, - PARAM_DBSTORE_DEPLOY_MODE, - PARAM_DBSTORE_NAMESPACE, + PARAM_ENABLE_DBSTOR, + PARAM_DBSTOR_DEPLOY_MODE, + PARAM_DBSTOR_NAMESPACE, PARAM_COMPATIBLE_MYSQL, PARAM_ENABLE_CANTIAN_STATS, PARAM_ENABLE_CTC_STATS, diff --git a/pkg/src/server/srv_instance.h b/pkg/src/server/srv_instance.h index dd305e69e17826166ede1c3e7354a313dfa93263..98b53e6e1bace3326684516a246fe6ceb4c81720 100644 --- a/pkg/src/server/srv_instance.h +++ b/pkg/src/server/srv_instance.h @@ -157,7 +157,6 @@ typedef struct st_instance_attr { bool32 disable_var_peek; bool32 enable_cursor_sharing; bool32 enable_use_spm; - bool32 enable_dss; } instance_attr_t; typedef struct st_os_run_desc { diff --git a/pkg/src/server/srv_session.h b/pkg/src/server/srv_session.h index 7106404253044369ef3faf788ba921fee238938a..52f5f6a97d1eba6726f253a8703c200ddce4e141 100644 --- a/pkg/src/server/srv_session.h +++ b/pkg/src/server/srv_session.h @@ -205,6 +205,7 @@ typedef struct st_ctc_context { rowid_t conflict_rid; // for on duplicate key update uint32_t ctc_inst_id; } ctc_context_t; + typedef struct st_session { knl_session_t knl_session; // need to be first! spinlock_t kill_lock; diff --git a/pkg/src/tms/BUILD.bazel b/pkg/src/tms/BUILD.bazel index 11d06ef48b8a7630c289a26a11f02565f8e8a00c..865eca7957487c1fccfa80d258de99c29eccf897 100644 --- a/pkg/src/tms/BUILD.bazel +++ b/pkg/src/tms/BUILD.bazel @@ -171,7 +171,7 @@ cc_library ( "//library:mockcpp", "//library:protobuf-c", "//library:cgw_client", - "//library:dbstoreClient", + "//library:dbstorClient", "//library:dbstor_tool", "//library:nomlnx/xnetlite", "//library:iod", diff --git a/pkg/src/upgrade_check/knl_dc_persistent.h b/pkg/src/upgrade_check/knl_dc_persistent.h index 1592003e3b668a9c433bff143f0a7a406a566804..687a277a83620bb0da4114d8c6e40ddad521216a 100644 --- a/pkg/src/upgrade_check/knl_dc_persistent.h +++ b/pkg/src/upgrade_check/knl_dc_persistent.h @@ -123,7 +123,9 @@ typedef struct st_rd_lock_info_4mysql_ddl { typedef struct st_rd_invalid_dd_4mysql_ddl { uint32 op_type; uint32_t buff_len : 24; - uint32_t is_dcl : 8; + uint32_t is_dcl : 1; + uint32_t is_flush : 1; + uint32_t reserved : 6; char buff[]; } rd_invalid_dd_4mysql_ddl; diff --git a/pkg/src/utils/ctbackup/ctbackup.c b/pkg/src/utils/ctbackup/ctbackup.c index a9613b36eb8b4eb6ab63e2e736db39b7ce99ca5b..4b392ed601707b0a08769c641f33fec13535908d 100644 --- a/pkg/src/utils/ctbackup/ctbackup.c +++ b/pkg/src/utils/ctbackup/ctbackup.c @@ -28,13 +28,13 @@ #include "ctbackup_common.h" #ifdef WIN32 -const char *cantiand_get_dbversion() +char *cantiand_get_dbversion() { return "NONE"; } #else -extern const char* cantiand_get_dbversion(void); +extern char* cantiand_get_dbversion(void); #endif diff --git a/pkg/src/utils/ctbackup/ctbackup_archivelog.c b/pkg/src/utils/ctbackup/ctbackup_archivelog.c index f22ce3035ca4ad8b48b7ca680dccb3ec8241bea2..3e8723df3553ee797f5c98d00b2580b80550503e 100644 --- a/pkg/src/utils/ctbackup/ctbackup_archivelog.c +++ b/pkg/src/utils/ctbackup/ctbackup_archivelog.c @@ -68,7 +68,7 @@ status_t ctbak_parse_archivelog_args(int32 argc, char** argv, ctbak_param_t* ctb status_t ctbak_do_force_archive(char *ct_params[], char *ctsql_binary_path) { status_t status; - if (check_cantiand_status() != CT_SUCCESS) { + if (check_cantiand_status(CT_FALSE) != CT_SUCCESS) { return CT_ERROR; } diff --git a/pkg/src/utils/ctbackup/ctbackup_common.c b/pkg/src/utils/ctbackup/ctbackup_common.c index 50d91a515ce39e8f8db3db14d8224444d1ef4dfe..a1f6671fc127f87051fa9a6cd25484aa295b7d57 100644 --- a/pkg/src/utils/ctbackup/ctbackup_common.c +++ b/pkg/src/utils/ctbackup/ctbackup_common.c @@ -95,7 +95,7 @@ status_t ctbak_do_shell_background(text_t* command, int* child_pid, int exec_mod sleep(1); int wait = waitpid(child, &status, exec_mode); if (wait == child && WIFEXITED((unsigned int)status) && WEXITSTATUS((unsigned int)status) != 0) { - printf("[ctbackup]child process exec failed\n"); + // printf("[ctbackup]child process exec failed\n"); return CT_ERROR; } *child_pid = child; @@ -659,15 +659,20 @@ status_t start_cantiand_server(void) return CT_SUCCESS; } -status_t check_cantiand_status(void) +status_t check_cantiand_status(bool32 expect_running) { int child_pid; text_t check_cantiand_status_cmd; cm_str2text(CHECK_CANTAIND_STATUS_CMD, &check_cantiand_status_cmd); status_t result = ctbak_do_shell_background(&check_cantiand_status_cmd, &child_pid, 0); if (result != CT_SUCCESS) { - printf("[ctbackup]cantiand is running, cannot execute restore/recovery/force_archive!\n"); - return CT_ERROR; + if (expect_running) { + printf("[ctbackup]cantiand is running, can execute snapshot!\n"); + return CT_SUCCESS; + } else { + printf("[ctbackup]cantiand is running, cannot execute restore/recovery/force_archive!\n"); + return CT_ERROR; + } } printf("[ctbackup]check cantiand status finished!\n"); return CT_SUCCESS; @@ -992,8 +997,10 @@ status_t ctbackup_set_metadata_mode(ctbak_param_t *ctbak_param) if (strcmp(metadata_mode, "TRUE") == 0) { ctbak_param->is_mysql_metadata_in_cantian = CT_TRUE; + printf("[ctbackup]mysql metadata in cantian!\n"); } else if (strcmp(metadata_mode, "FALSE") == 0) { ctbak_param->is_mysql_metadata_in_cantian = CT_FALSE; + printf("[ctbackup]mysql metadata not in cantian!\n"); } else { printf("[ctbackup]invalid mysql_metadata_in_cantian param!\n"); return CT_ERROR; diff --git a/pkg/src/utils/ctbackup/ctbackup_common.h b/pkg/src/utils/ctbackup/ctbackup_common.h index 7a325a235ebf5439e71fda36de80922f33cada28..5199ecc2cce3b535ac8b7ed0700a89c935705718 100644 --- a/pkg/src/utils/ctbackup/ctbackup_common.h +++ b/pkg/src/utils/ctbackup/ctbackup_common.h @@ -28,6 +28,7 @@ #include "dirent.h" #include "cm_defs.h" #include "cm_file.h" +#include "cm_dbstor.h" #include "ctbackup_info.h" #ifdef __cplusplus @@ -68,6 +69,10 @@ extern "C" { #define CTSQL_FULL_BACKUP_STATEMENT_PREFIX "BACKUP DATABASE INCREMENTAL LEVEL 0 FORMAT \'" #define CTSQL_INCREMENT_BACKUP_STATEMENT_PREFIX "BACKUP DATABASE INCREMENTAL LEVEL 1 FORMAT \'" #define CTSQL_INCREMENT_CUMULATIVE_BACKUP_STATEMENT_PREFIX "BACKUP DATABASE INCREMENTAL LEVEL 1 CUMULATIVE FORMAT \'" +#define CTSQL_SNAPSHOT_BACKUP_STATEMENT_PREFIX "BACKUP DATABASE SNAPSHOT FORMAT \'" +#define CTSQL_RECYCLE_REDO_STATEMENT_PREFIX "ALTER SYSTEM SET PREVENT_SNAPSHOT_BACKUP_RECYCLE_REDO=" +#define CTSQL_TRUE "TRUE" +#define CTSQL_FALSE "FALSE" #define CTSQL_STATEMENT_QUOTE "\'" #define CTSQL_PARALLELISM_OPTION " PARALLELISM " #define CTSQL_BUFFER_OPTION " BUFFER SIZE " @@ -81,10 +86,12 @@ extern "C" { #define MYSQL_BACKUP_DIR "/mysql" // TARGET_DIR_PARAM_OPTION's next level dir, for store cantian backup files #define CANTIAN_BACKUP_DIR "/cantian" +#define CANTIAN_SNAPSHOT_DIR "/snapshot" #define CANTIAN_BACKUP_BACKUPSET "/backupset" #define DECRYPT_CMD_ECHO "echo " #define DECRYPT_CMD_BASE64 " | openssl base64 -d" #define CANTIAN_BACKUP_FILE_LENGTH 129 +#define CANTIAN_SNAPSHOT_FILE_LENGTH 129 #define MAX_TARGET_DIR_LENGTH 120 #define MAX_PARALLELISM_COUNT 16 #define MAX_STATEMENT_LENGTH 512 @@ -130,6 +137,9 @@ extern "C" { #define STOP_CANTIAND_SERVER_CMD "installdb.sh -P stopcantiand" #define TRY_CONN_CTSQL_CMD "installdb.sh -P tryconnctsql" +#define SNAPSHOT_INFO_FS_PATH "/backup" +#define CTBAK_SNAP_INFO_FILE_NAME "snap_info" + #define CTSQL_QUERY_CANTIAN_PARAMETERS "'SHOW PARAMETERS'" #define MYSQL_METADATA_IN_CANTIAN_GREP "| grep MYSQL_METADATA_IN_CANTIAN | awk '{print $4}'" @@ -137,6 +147,7 @@ extern "C" { #define CTSQL_CMD_BUFFER_SIZE (CTSQL_FILE_NAME_BUFFER_SIZE + MAX_STATEMENT_LENGTH) #define CTSQL_CMD_OUT_BUFFER_SIZE (CT_MAX_CMD_LEN + 1) #define CTSQL_CMD_IN_BUFFER_SIZE (CT_MAX_CMD_LEN + 1) +#define CT_SNAPSHOT_BACKUP_NAME_MAX_LEN 255 #ifndef WIFEXITED #define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0) @@ -159,6 +170,12 @@ extern "C" { } \ } while (0) +typedef struct st_snapshot_backup_info { + snapshot_result_info page_fs_snap_info; + snapshot_result_info archive_fs_snap_info; + snapshot_result_info log_fs_snap_info; +} snapshot_backup_info_t; + typedef enum en_ctbak_ctsql_exec_mode { CTBAK_CTSQL_EXECV_MODE, CTBAK_CTSQL_SHELL_MODE, @@ -194,7 +211,7 @@ status_t set_mysql_single_param_value(char **param, text_t ctbak_param_option, c status_t start_cantiand_server(void); -status_t check_cantiand_status(void); +status_t check_cantiand_status(bool32 expect_running); status_t stop_cantiand_server(void); @@ -223,6 +240,8 @@ status_t ctbak_do_shell_get_output(text_t *command, char *cmd_out, status_t get_cfg_ini_file_name(char *ctsql_ini_file_name, char *cantiand_ini_file_path); +status_t get_cantiand_ini_file_name(char *cantiand_ini_file_path); + #ifdef __cplusplus } #endif diff --git a/pkg/src/utils/ctbackup/ctbackup_dbs_common.c b/pkg/src/utils/ctbackup/ctbackup_dbs_common.c new file mode 100644 index 0000000000000000000000000000000000000000..dd14d3c172e4e84eddaf38b0a9259573c81b10fe --- /dev/null +++ b/pkg/src/utils/ctbackup/ctbackup_dbs_common.c @@ -0,0 +1,922 @@ +/* ------------------------------------------------------------------------- +* This file is part of the Cantian project. +* Copyright (c) 2024 Huawei Technologies Co.,Ltd. +* +* Cantian is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +* ------------------------------------------------------------------------- +* +* ctbackup_dbs_common.c +* +* +* IDENTIFICATION +* src/utils/ctbackup/ctbackup_dbs_common.c +* +* ------------------------------------------------------------------------- + */ + +#include "ctbackup_dbs_common.h" + +#define DBS_CONFIG_FILE_NAME_LEN 32 +#define DBS_WAIT_CGW_LINK_INIT_TIME_SECOND 2 +#define DBS_WAIT_CONFIG_RETRY_NUM 2 +#define DBS_WAIT_CONFIG_INTERVAL_TIME 2000 +#define DBS_CLUSTER_UUID_LEN 37 +#define DBS_CONFIG_MAX_PARAM 256 +#define DBS_QUERY_FILE_PRAMA_NUM 3 +#define DBS_QUERY_FILE_CHECK_PRAMA_NUM 1 +#define USER_NAME_LEN 32 +#define GROUP_NAME_LEN 255 +#define MODE_STR_LEN 10 +#define TIME_STR_LEN 25 +#define BOOL_TRUE_LEN 4 +#define DBS_TOOL_PARAM_BOOL_LEN 6 +#define DBS_COPY_FILE_PRAMA_NUM 5 +#define DBS_COPY_FILE_CHECK_PRAMA_NUM 3 +#define DEV_RW_BUFFER_SIZE (1 * 1024 * 1024) +#define NUM_ONE 1 +#define NUM_TWO 2 +#define NUM_THREE 3 +#define NUM_FOUR 4 +#define NUM_FIVE 5 +#define NUM_SIX 6 +#define NUM_SEVEN 7 +#define NUM_EIGHT 8 +#define NUM_NINE 9 + +#define DBS_HOME_PATH "/opt/cantian" +#define DBS_TOOL_CONFIG_PATH "/opt/cantian/dbstor/conf/dbs" +#define DBS_TOOL_PARAM_FS_NAME "--fs-name=" +#define DBS_TOOL_PARAM_FILE_DIR "--file-dir=" +#define DBS_TOOL_PARAM_VSTORE_ID "--vstore_id=" +#define MAX_VALUE_UINT32 "4294967295" +#define DBS_FILE_TYPE_UNKNOWN "unknown" +#define DBS_FILE_TYPE_DIR "dir" +#define DBS_FILE_TYPE_FILE "file" +#define DBS_COPY_FILE_PARAM "--copy-file" +#define DBS_TOOL_PARAM_OVERWRITE "--overwrite" +#define BOOL_TRUE "true" +#define BOOL_FALSE "false" +#define DBS_TOOL_PARAM_FILE_NAME "--file-name=" +#define DBS_TOOL_PARAM_SOURCE_DIR "--source-dir=" +#define DBS_TOOL_PARAM_TARGET_DIR "--target-dir=" +#define DBS_IMPORT_PARAM "--import" +#define DBS_EXPORT_PARAM "--export" + +dbs_fs_info_t g_dbs_fs_info = { 0 }; +int32 g_lockConfigHandle = CT_INVALID_HANDLE; + +typedef enum { + RETURN_FS_SNAP_OP_OK = 0, /* valid diff, and delta > 0 */ + RETURN_FS_SNAP_OP_NONEXIST = 1, /* query finish, valid value, delta number>=0 */ + RETURN_FS_SNAP_OP_INVALID_PARAM = 2, /* INVALID_PARAM - param error, invalid result */ + RETURN_FS_SNAP_OP_NEED_RETRY = 3, /* retry result, invalid result */ + RETURN_FS_SNAP_OP_NO_RETRY = 4, /* no retry result, invalid result */ + RETURN_FS_SNAP_UUID_EXISTS = 5, /* uuid has exists */ + RETURN_FS_SNAP_NAME_EXISTS = 6, /* name has exists */ + RETURN_FS_SNAP_NUM_FULL = 7, /* snap num is full */ + RETURN_FS_SNAP_TP_INCONSISTENT = 8, /* tp inconsistent */ + RETURN_FS_SNAP_FS_NOT_EXIST = 9, /* filesystem not exist */ + RETURN_FS_SNAP_FS_IS_ROLLBAKING = 10, /* snap is rollbacking */ + RETURN_FS_SNAP_IS_SYNCING = 11, /* snap is syncing */ + RETURN_FS_SNAP_CREATING_CONFIT_MS_DEL = 12, /* metro is deleting, can not create snap */ + + RETURN_FS_SNAP_OP_BUTT +}RETURN_FS_SNAP_OP_E; + +const char* ctbak_snap_error[] = { + [RETURN_FS_SNAP_OP_OK] = "valid diff, and delta > 0", + [RETURN_FS_SNAP_OP_NONEXIST] = "query finish, valid value, delta number>=0", + [RETURN_FS_SNAP_OP_INVALID_PARAM] = "INVALID_PARAM - param error, invalid result", + [RETURN_FS_SNAP_OP_NEED_RETRY] = "retry result, invalid result", + [RETURN_FS_SNAP_OP_NO_RETRY] = "no retry result, invalid result", + [RETURN_FS_SNAP_UUID_EXISTS] = "uuid has exists", + [RETURN_FS_SNAP_NAME_EXISTS] = "name has exists", + [RETURN_FS_SNAP_NUM_FULL] = "snap num is full", + [RETURN_FS_SNAP_TP_INCONSISTENT] = "tp inconsistent", + [RETURN_FS_SNAP_FS_NOT_EXIST] = "filesystem not exist", + [RETURN_FS_SNAP_FS_IS_ROLLBAKING] = "snap is rollbacking", + [RETURN_FS_SNAP_IS_SYNCING] = "snap is syncing", + [RETURN_FS_SNAP_CREATING_CONFIT_MS_DEL] = "metro is deleting, can not create snap" +}; + + +status_t dbs_get_param_value(char *line, char *value, uint32 length) +{ + char line_cpy[DBS_CONFIG_MAX_PARAM] = { 0 }; + char *context = NULL; + text_t param = { 0 }; + errno_t ret = strcpy_s(line_cpy, DBS_CONFIG_MAX_PARAM, line); + if (ret != EOK) { + CT_LOG_RUN_ERR("strcpy_s line failed %d.", ret); + return CT_ERROR; + } + param.str = strtok_s(line_cpy, "=", &context); + param.str = strtok_s(NULL, "\n", &context); + param.len = strlen(param.str); + cm_trim_text(¶m); + ret = strcpy_s(value, length, param.str); + if (ret != EOK) { + CT_LOG_RUN_ERR("strcpy_s value failed %d.", ret); + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t dbs_get_uuid_lsid_from_config(char* cfg_name, uint32* lsid, char* uuid) +{ + char file_path[CT_FILE_NAME_BUFFER_SIZE]; + char line[DBS_CONFIG_MAX_PARAM]; + errno_t ret = sprintf_s(file_path, CT_FILE_NAME_BUFFER_SIZE, "%s/%s", + DBS_TOOL_CONFIG_PATH, cfg_name); + PRTS_RETURN_IFERR(ret); + FILE* fp = fopen(file_path, "r"); + if (fp == NULL) { + CT_LOG_RUN_ERR("Failed to open file %s\n", file_path); + return CT_ERROR; + } + + while (fgets(line, sizeof(line), fp) != NULL) { + char *context = NULL; + if (strstr(line, "INST_ID") != NULL) { + text_t lsid_t; + lsid_t.str = strtok_s(line, "=", &context); + lsid_t.str = strtok_s(NULL, "\n", &context); + lsid_t.len = strlen(lsid_t.str); + cm_trim_text(&lsid_t); + ret = cm_str2uint32((const char *)lsid_t.str, lsid); + if (ret != CT_SUCCESS) { + CT_LOG_RUN_ERR("Str2uint32 failed %d.", ret); + break; + } + } else if (strstr(line, "DBS_TOOL_UUID") != NULL) { + text_t uuid_t; + uuid_t.str = strtok_s(line, "=", &context); + uuid_t.str = strtok_s(NULL, "\n", &context); + uuid_t.len = strlen(uuid_t.str); + cm_trim_text(&uuid_t); + ret = strcpy_s(uuid, DBS_CLUSTER_UUID_LEN, uuid_t.str); + if (ret != CT_SUCCESS) { + CT_LOG_RUN_ERR("strcpy_s failed %d.", ret); + break; + } + } + } + (void)fclose(fp); + return ret; +} + +status_t dbs_get_fs_info_from_config(char* cfg_name) +{ + char file_path[CT_FILE_NAME_BUFFER_SIZE]; + char line[DBS_CONFIG_MAX_PARAM]; + errno_t ret = sprintf_s(file_path, CT_FILE_NAME_BUFFER_SIZE, "%s/%s", + DBS_TOOL_CONFIG_PATH, cfg_name); + PRTS_RETURN_IFERR(ret); + FILE* fp = fopen(file_path, "r"); + if (fp == NULL) { + CT_LOG_RUN_ERR("Failed to open file %s\n", file_path); + return CT_ERROR; + } + + status_t result = CT_SUCCESS; + while (fgets(line, sizeof(line), fp) != NULL) { + if (strstr(line, "NAMESPACE_FSNAME") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.log_fs_name, MAX_DBS_FS_NAME_LEN); + } else if (strstr(line, "NAMESPACE_PAGE_FSNAME") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.page_fs_name, MAX_DBS_FS_NAME_LEN); + } else if (strstr(line, "CLUSTER_NAME") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.cluster_name, MAX_DBS_FILE_NAME_LEN); + } else if (strstr(line, "LOG_VSTOR") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.log_fs_vstore_id, MAX_DBS_VSTORE_ID_LEN); + } else if (strstr(line, "DBS_LOG_PATH") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.dbs_log_path, MAX_DBS_FS_NAME_LEN); + } else if (strstr(line, "PAGE_VSTOR") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.page_fs_vstore_id, MAX_DBS_VSTORE_ID_LEN); + } else if (strstr(line, "NAMESPACE_SHARE_FSNAME") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.share_fs_name, MAX_DBS_FS_NAME_LEN); + } else if (strstr(line, "NAMESPACE_ARCHIVE_FSNAME") != NULL) { + result = dbs_get_param_value(line, g_dbs_fs_info.archive_fs_name, MAX_DBS_FS_NAME_LEN); + } + if (result != CT_SUCCESS) { + CT_LOG_RUN_ERR("get param value failed, line %s.", line); + break; + } + } + (void)fclose(fp); + return result; +} + +status_t dbs_get_and_flock_conf_file(char *config_name) +{ + char dbs_conf_dir_path[CT_FILE_NAME_BUFFER_SIZE] = DBS_TOOL_CONFIG_PATH; + + DIR *dir_ptr; + struct dirent *entry; + + dir_ptr = opendir(dbs_conf_dir_path); + if (dir_ptr == NULL) { + printf("open dbs_conf_dir_path failed!\n"); + return CT_ERROR; + } + + int32 ret = 0; + char dbs_conf_file_path[CT_FILE_NAME_BUFFER_SIZE] = { 0 }; + while ((entry = readdir(dir_ptr)) != NULL) { + if (strstr(entry->d_name, "tool") == NULL) { + continue; + } + ret = memset_s(dbs_conf_file_path, CT_FILE_NAME_BUFFER_SIZE, 0, CT_FILE_NAME_BUFFER_SIZE); + if (ret != EOK) { + printf("memset_s dbs_conf_file_path failed!\n"); + break; + } + ret = sprintf_s(dbs_conf_file_path, CT_FILE_NAME_BUFFER_SIZE, "%s/%s", dbs_conf_dir_path, entry->d_name); + if (ret == -1) { + printf("Failed to assemble the dbstor conf file path by instance home(%s).\n", dbs_conf_dir_path); + break; + } + if (cm_open_file(dbs_conf_file_path, O_RDWR, &g_lockConfigHandle) != CT_SUCCESS) { + printf("open dbs_conf_file failed!\n"); + break; + } + if (flock(g_lockConfigHandle, LOCK_EX | LOCK_NB) == 0) { + ret = strcpy_s(config_name, DBS_CONFIG_FILE_NAME_LEN, entry->d_name); + if (ret != EOK) { + printf("strcpy_s config_name failed!\n"); + closedir(dir_ptr); + return CT_ERROR; + } + closedir(dir_ptr); + return CT_SUCCESS; + } + cm_close_file(g_lockConfigHandle); + } + + closedir(dir_ptr); + return CT_ERROR; +} + +status_t dbs_client_init(char* cfg_name) +{ + int64_t start_time = cm_now(); + status_t ret = dbs_init_lib(); + if (ret != CT_SUCCESS) { + CT_LOG_RUN_ERR("Init dbs lib failed(%d).", ret); + return ret; + } + + if (dbs_get_fs_info_from_config(cfg_name) != CT_SUCCESS) { + CT_LOG_RUN_ERR("cms get fs info from config(%s) failed.\n", cfg_name); + return CT_ERROR; + } + CT_LOG_RUN_INF("log fs name:%s, page fs name:%s, cluster name %s", + g_dbs_fs_info.log_fs_name, g_dbs_fs_info.page_fs_name, g_dbs_fs_info.cluster_name); + + uint32 lsid; + char uuid[DBS_CLUSTER_UUID_LEN] = { 0 }; + + CT_LOG_RUN_INF("dbstor client is inited by config file %s", cfg_name); + if (dbs_get_uuid_lsid_from_config(cfg_name, &lsid, uuid) != CT_SUCCESS) { + CT_LOG_RUN_ERR("cms get uuid lsid from config(%s) failed.\n", cfg_name); + return CT_ERROR; + } + CT_LOG_RUN_INF("uuid:%s, lsid:%u", uuid, lsid); + cm_set_dbs_uuid_lsid((const char*)uuid, lsid); + + cm_dbs_cfg_s *cfg = cm_dbs_get_cfg(); + cfg->enable = CT_TRUE; + + ret = cm_dbs_init(DBS_HOME_PATH, cfg_name, DBS_RUN_DBS_TOOL); + if (ret != CT_SUCCESS) { + (void)dbs_global_handle()->dbs_client_flush_log(); + CT_LOG_RUN_ERR("Dbs init failed(%d).", ret); + } + int64_t end_time = cm_now(); + CT_LOG_RUN_INF("dbstor client init time %ld (ns)", end_time - start_time); + return ret; +} + +status_t dbs_alloc_conf_file_retry(char *config_name) +{ + uint32_t retry_num = DBS_WAIT_CONFIG_RETRY_NUM; + do { + int32_t ret = memset_s(config_name, DBS_CONFIG_FILE_NAME_LEN, 0, DBS_CONFIG_FILE_NAME_LEN); + if (ret != EOK) { + CT_LOG_RUN_ERR("memset_s config_name failed!"); + return CT_ERROR; + } + if (dbs_get_and_flock_conf_file(config_name) == CT_SUCCESS) { + return CT_SUCCESS; + } + retry_num--; + cm_sleep(DBS_WAIT_CONFIG_INTERVAL_TIME); + } while (retry_num > 0); + + printf("Get free dbstor config file timeout, please wait a while and try again.\n"); + return CT_ERROR; +} + +status_t dbs_init(ctbak_param_t* ctbak_param) +{ + char dbs_cfg_name[DBS_CONFIG_FILE_NAME_LEN] = { 0 }; + if (dbs_alloc_conf_file_retry(dbs_cfg_name) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Init dbs havn't dbs chain."); + return CT_ERROR; + } + + if (dbs_client_init(dbs_cfg_name) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Init dbs failed."); + return CT_ERROR; + } + sleep(DBS_WAIT_CGW_LINK_INIT_TIME_SECOND); + + cm_str2text(g_dbs_fs_info.page_fs_vstore_id, &ctbak_param->page_fs_vstore_id); + cm_str2text(g_dbs_fs_info.page_fs_name, &ctbak_param->page_fs_name); + cm_str2text(g_dbs_fs_info.share_fs_name, &ctbak_param->share_fs_name); + cm_str2text(g_dbs_fs_info.log_fs_name, &ctbak_param->log_fs_name); + cm_str2text(g_dbs_fs_info.log_fs_vstore_id, &ctbak_param->log_fs_vstore_id); + cm_str2text(g_dbs_fs_info.archive_fs_name, &ctbak_param->archive_fs_name); + printf("DBstor init success.\n"); + return CT_SUCCESS; +} + +// query_file +status_t timestamp_to_readable(uint64_t timestamp, char* readable_time) { + time_t time = (time_t)timestamp; + return strftime(readable_time, TIME_STR_LEN, "%Y-%m-%d %H:%M:%S", + localtime(&time)) > 0 ? CT_SUCCESS : CT_ERROR; +} + +status_t gid_to_groupname(uint32_t gid, char* groupname) { + struct group* gr = getgrgid(gid); + if (gr != NULL) { + MEMS_RETURN_IFERR(strncpy_s(groupname, GROUP_NAME_LEN, gr->gr_name, strlen(gr->gr_name))); + return CT_SUCCESS; + } + return CT_ERROR; +} + +status_t uid_to_username(uint32_t uid, char* username) { + struct passwd* pw = getpwuid(uid); + if (pw != NULL) { + MEMS_RETURN_IFERR(strncpy_s(username, USER_NAME_LEN, pw->pw_name, strlen(pw->pw_name))); + return CT_SUCCESS; + } + return CT_ERROR; +} + +status_t mode_to_string(uint32_t mode_num, char* mode_str) { + MEMS_RETURN_IFERR(strncpy_s(mode_str, MODE_STR_LEN, "---------", strlen("---------"))); + + // 检查用户(owner)权限 + if (mode_num & 0400) mode_str[0] = 'r'; + if (mode_num & 0200) mode_str[1] = 'w'; + if (mode_num & 0100) mode_str[2] = 'x'; + + // 检查组(group)权限 + if (mode_num & 0040) mode_str[3] = 'r'; + if (mode_num & 0020) mode_str[4] = 'w'; + if (mode_num & 0010) mode_str[5] = 'x'; + + // 检查其他用户(others)权限 + if (mode_num & 0004) mode_str[6] = 'r'; + if (mode_num & 0002) mode_str[7] = 'w'; + if (mode_num & 0001) mode_str[8] = 'x'; + mode_str[9] = '\0'; + return CT_SUCCESS; +} + +bool32 compare_bool_param(char *argv[], params_list_t *params_list, uint32 i, uint32 j, bool32 *matched) +{ + char *params[] = {DBS_TOOL_PARAM_OVERWRITE}; + uint32 params_len = 1; + if (strncmp(argv[i], params_list->keys[j], strlen(params_list->keys[j])) == 0) { + for (uint32 k = 0; k < params_len; k++) { + if (strncmp(argv[i], params[k], strlen(params[k])) == 0) { + MEMS_RETURN_IFERR(strncpy_sp(params_list->values[j], params_list->value_len[j], + BOOL_TRUE, BOOL_TRUE_LEN)); + *matched = CT_TRUE; + return CT_TRUE; + } + } + + } + return CT_FALSE; +} + +status_t compare_param(char *argv[], params_list_t *params_list, uint32 i, uint32 j, bool32 *matched) +{ + if (compare_bool_param(argv, params_list, i, j, matched) == CT_TRUE) { + return CT_SUCCESS; + } + if (strncmp(argv[i], params_list->keys[j], strlen(params_list->keys[j])) == 0) { + if (strlen(argv[i]) - strlen(params_list->keys[j]) >= params_list->value_len[j]) { + printf("Parameter value is too long for %s.\n", params_list->keys[j]); + return CT_ERROR; + } + MEMS_RETURN_IFERR(strncpy_sp(params_list->values[j], params_list->value_len[j], + argv[i] + strlen(params_list->keys[j]), + strlen(argv[i]) - strlen(params_list->keys[j]))); + *matched = CT_TRUE; + } + return CT_SUCCESS; +} + +uint32 get_parse_params_init_value(char *argv[]) +{ + uint32 i = 1; + char *params[] = {DBS_COPY_FILE_PARAM}; + uint32 params_len = 1; + for (uint32 j = 0; j < params_len; j++) { + if (strncmp(argv[i], params[j], strlen(params[j])) == 0) { + return i + 2; + } + } + return i + 1; +} + +status_t file_info_screen_print(void *file_list, uint32 file_num, char *path, file_info_version_t info_version) +{ + if (file_num == 0) { + printf("No files found in directory: %s\n", path); + } else { + printf("Files in directory %s:\n", path); + for (uint32 i = 0; i < file_num; i++) { + char *file_name = NULL; + if (info_version == DBS_FILE_INFO_VERSION_1) { + dbstor_file_info *file_info = (dbstor_file_info *)((char *)file_list + i * sizeof(dbstor_file_info)); + file_name = file_info->file_name; + if (file_name != NULL) { + printf("%s\n", file_name); + } + continue; + } + dbstor_file_info_detail *file_info = (dbstor_file_info_detail *)((char *)file_list + + i * sizeof(dbstor_file_info_detail)); + file_name = file_info->file_name; + if (file_name == NULL || strlen(file_name) == 0) { + continue; + } + uint32_t file_size = file_info->file_size; + char *file_type = DBS_FILE_TYPE_UNKNOWN; + if (file_info->type == CS_FILE_TYPE_DIR) { + file_type = DBS_FILE_TYPE_DIR; + } else if (file_info->type == CS_FILE_TYPE_FILE) { + file_type = DBS_FILE_TYPE_FILE; + } + char username[USER_NAME_LEN] = {0}; + char groupname[GROUP_NAME_LEN] = {0}; + char mode_str[MODE_STR_LEN] = {0}; + char timr_str[TIME_STR_LEN] = {0}; + PRTS_RETURN_IFERR(mode_to_string(file_info->mode, mode_str)); + PRTS_RETURN_IFERR(uid_to_username(file_info->uid, username)); + PRTS_RETURN_IFERR(gid_to_groupname(file_info->gid, groupname)); + PRTS_RETURN_IFERR(timestamp_to_readable(file_info->mtimeSec, timr_str)); + printf("%s %s %s %s %u %s %s\n", mode_str, file_type, username, + groupname, file_size, timr_str, file_name); + } + } + return CT_SUCCESS; +} + +status_t parse_params_list(int32 argc, char *argv[], params_list_t *params_list) +{ + uint32 i = get_parse_params_init_value(argv); + for (; i < argc; i++) { + bool32 matched = CT_FALSE; + for (uint32 j = 0; j < params_list->params_num; j++) { + if (compare_param(argv, params_list, i, j, &matched) != CT_SUCCESS) { + return CT_ERROR; + } + if (matched) { + break; + } + } + if (!matched) { + printf("Invalid parameter: %s\n", argv[i]); + return CT_ERROR; + } + } + for (uint32 k = 0; k < params_list->check_num; k++) { + if (strlen(params_list->check_list[k].value) == 0) { + printf("%s not specified.\n", params_list->check_list[k].key); + return CT_ERROR; + } + if (strcmp(params_list->check_list[k].key, DBS_TOOL_PARAM_VSTORE_ID) == 0) { + if (strlen(params_list->check_list[k].value) > strlen(MAX_VALUE_UINT32)) { + printf("Invalid vstore_id %s.\n", params_list->check_list[k].value); + return CT_ERROR; + } + if ((strlen(params_list->check_list[k].value) == strlen(MAX_VALUE_UINT32)) && + (strcmp(params_list->check_list[k].value, MAX_VALUE_UINT32) > 0)) { + printf("Invalid vstore_id %s.\n", params_list->check_list[k].value); + return CT_ERROR; + } + } + } + return CT_SUCCESS; +} + +int32 dbs_query_fs_file(int32 argc, char *argv[]) +{ + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char file_path[MAX_DBS_FILE_PATH_LEN] = {0}; + char vstore_id[MAX_DBS_VSTORE_ID_LEN] = {0}; + const char *params[] = {DBS_TOOL_PARAM_FS_NAME, DBS_TOOL_PARAM_FILE_DIR, DBS_TOOL_PARAM_VSTORE_ID}; + char *results[] = {fs_name, file_path, vstore_id}; + size_t result_lens[] = {MAX_DBS_FS_NAME_LEN, MAX_DBS_FILE_PATH_LEN, MAX_DBS_VSTORE_ID_LEN}; + params_check_list_t check_list[] = {{DBS_TOOL_PARAM_FS_NAME, fs_name}}; + params_list_t params_list = {params, results, result_lens, check_list, DBS_QUERY_FILE_PRAMA_NUM, + DBS_QUERY_FILE_CHECK_PRAMA_NUM}; + + if (parse_params_list(argc, argv, ¶ms_list) != CT_SUCCESS) { + printf("Invalid command.\nUsage: --query-file --fs-name=xxx [--file-dir=xxx] [--vstore-id=*]\n"); + return CT_ERROR; + } + char full_path[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + if (strlen(file_path) == 0) { + PRTS_RETURN_IFERR(snprintf_s(full_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s", fs_name)); + } else { + PRTS_RETURN_IFERR(snprintf_s(full_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, file_path)); + } + dbs_device_info_t query_info = { .handle = -1, .type = DEV_TYPE_DBSTOR_FILE, .path = "" }; + MEMS_RETURN_IFERR(strncpy_s(query_info.path, MAX_DBS_FS_FILE_PATH_LEN, full_path, strlen(full_path))); + + void *file_list = NULL; + uint32 file_num = 0; + uint32 vstore_id_uint = 0; + file_info_version_t info_version = DBS_FILE_INFO_VERSION_1; + if (strlen(vstore_id) > 0) { + vstore_id_uint = (uint32)atoi(vstore_id); + } + if (dbs_global_handle()->dbs_file_get_list_detail != NULL) { + info_version = DBS_FILE_INFO_VERSION_2; + } + if (cm_malloc_file_list_by_version_id(info_version, vstore_id_uint, + &file_list, query_info.path, &file_num) != CT_SUCCESS) { + printf("Failed to allocate memory for file list.\n"); + return CT_ERROR; + } + status_t ret = cm_dbs_query_dir_vstore_id(vstore_id_uint, query_info.path, file_list, &file_num); + if (ret != CT_SUCCESS) { + printf("Failed to query files in directory: %s with vstore-id: %u\n", query_info.path, vstore_id_uint); + cm_free_file_list(&file_list); + return CT_ERROR; + } + MEMS_RETURN_IFERR(file_info_screen_print(file_list, file_num, query_info.path, info_version)); + cm_free_file_list(&file_list); + return CT_SUCCESS; +} + +status_t copy_file(const dbs_device_info_t *src_info, const dbs_device_info_t *dst_info) +{ + aligned_buf_t buf = { 0 }; + if (cm_aligned_malloc(DEV_RW_BUFFER_SIZE, "copy_file_buffer", &buf) != CT_SUCCESS) { + return CT_ERROR; + } + + int64 offset_read = 0; + int64 offset_write = 0; + int32 read_size = 0; + + while (CT_TRUE) { + status_t ret = cm_read_device_nocheck(src_info->type, src_info->handle, offset_read, buf.aligned_buf, + buf.buf_size, &read_size); + if (ret != CT_SUCCESS) { + cm_aligned_free(&buf); + printf("Read error from source file\n"); + return CT_ERROR; + } + + if (read_size == 0) { + break; // EOF + } + + if (cm_write_device(dst_info->type, dst_info->handle, offset_write, buf.aligned_buf, read_size) != CT_SUCCESS) { + cm_aligned_free(&buf); + printf("Write error to destination file\n"); + return CT_ERROR; + } + + offset_read += read_size; + offset_write += read_size; + } + + cm_aligned_free(&buf); + return CT_SUCCESS; +} + +status_t check_strcat_path(const char *dir, const char *name, char *strcat_name) +{ + if ((strlen(dir) + strlen(name)) >= MAX_DBS_FS_FILE_PATH_LEN) { + CT_LOG_RUN_ERR("srch file name is too long. dir is %s, file name is %s.", dir, name); + return CT_ERROR; + } + int32 ret = snprintf_s(strcat_name, MAX_DBS_FS_FILE_PATH_LEN, MAX_DBS_FS_FILE_PATH_LEN - 1, "%s/%s", dir, name); + PRTS_RETURN_IFERR(ret); + return CT_SUCCESS; +} + +status_t copy_file_by_name(const char *file_name, dbs_device_info_t *src_info, + dbs_device_info_t *dst_info, bool32 overwrite) +{ + char src_file_name[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + char dst_file_name[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + if (check_strcat_path(src_info->path, file_name, src_file_name) != CT_SUCCESS) { + return CT_ERROR; + } + if (cm_exist_device(src_info->type, src_file_name) != CT_TRUE) { + CT_LOG_RUN_ERR("file not exsit, path is %s.", src_file_name); + return CT_ERROR; + } + if (check_strcat_path(dst_info->path, file_name, dst_file_name) != CT_SUCCESS) { + return CT_ERROR; + } + if (cm_exist_device(dst_info->type, dst_file_name) == CT_TRUE) { + CT_LOG_RUN_INF("file exsit, path is %s.", dst_file_name); + if (overwrite) { + if (cm_remove_device(dst_info->type, dst_file_name) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to remove file, path is %s.", dst_file_name); + return CT_ERROR; + } + } else{ + printf("File exsit, skip it, path is %s.\n", dst_file_name); + return CT_SUCCESS; + } + } + + if (cm_open_device(src_file_name, src_info->type, O_RDONLY, &src_info->handle) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to open arch file: %s", src_file_name); + cm_close_device(src_info->type, &src_info->handle); + return CT_ERROR; + } + + if (cm_create_device(dst_file_name, dst_info->type, 0, &dst_info->handle) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to create dbs file, file path is: %s.", dst_file_name); + cm_close_device(src_info->type, &src_info->handle); + cm_close_device(dst_info->type, &dst_info->handle); + return CT_ERROR; + } + + if (copy_file(src_info, dst_info) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to copy file from %s to %s.", src_file_name, dst_file_name); + cm_close_device(src_info->type, &src_info->handle); + cm_close_device(dst_info->type, &dst_info->handle); + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t copy_files_to_target_dir(dbs_device_info_t *src_info, dbs_device_info_t *dst_info, + const char *file_name, bool32 overwrite) +{ + status_t ret; + uint32 file_num = 0; + + if (file_name != NULL) { + ret = copy_file_by_name(file_name, src_info, dst_info, overwrite); + if (ret != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to copy file from source dir, file name is %s, src handle %d, dst handle %d.", + file_name, src_info->handle, dst_info->handle); + return CT_ERROR; + } + printf("Copying file: %s\n", file_name); + return CT_SUCCESS; + } + + // 没有指定文件名则复制整个目录的所有文件 + void *file_list = NULL; + if (cm_malloc_file_list(src_info->type, &file_list, src_info->path, &file_num) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to malloc file list."); + return CT_ERROR; + } + + ret = cm_query_device(src_info->type, src_info->path, file_list, &file_num); + if (ret != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to get file list, dir is %s.", src_info->path); + cm_free_file_list(&file_list); + return CT_ERROR; + } + + for (uint32 i = 0; i < file_num; i++) { + char *current_file_name = cm_get_name_from_file_list(src_info->type, file_list, i); + if (current_file_name == NULL) { + CT_LOG_RUN_ERR("Failed to get file name, please check info type %d.", src_info->type); + cm_free_file_list(&file_list); + return CT_ERROR; + } + if (cm_check_dir_type_by_file_list(src_info->type, file_list, i)) { + continue; + } + + ret = copy_file_by_name(current_file_name, src_info, dst_info, overwrite); + if (ret != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to copy file from source dir, file name is %s, src handle %d, dst handle %d.", + current_file_name, src_info->handle, dst_info->handle); + cm_free_file_list(&file_list); + return CT_ERROR; + } + printf("Copying file: %s\n", current_file_name); + cm_close_device(src_info->type, &src_info->handle); + cm_close_device(dst_info->type, &dst_info->handle); + } + + cm_free_file_list(&file_list); + + CT_LOG_RUN_INF("Successfully copied files to %s.", dst_info->path); + return CT_SUCCESS; +} + +status_t check_dir_exist(const char *direction, const char *src_path, const char *dst_path, + char *fs_path, const char *fs_name) +{ + if (strncmp(direction, DBS_IMPORT_PARAM, strlen(DBS_IMPORT_PARAM)) == 0) { + if (cm_dir_exist(src_path) != CT_TRUE) { + printf("Source directory is does not exist %s\n", src_path); + return CT_ERROR; + } + + PRTS_RETURN_IFERR(snprintf_s(fs_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, dst_path)); + return CT_SUCCESS; + } + + if (strncmp(direction, DBS_EXPORT_PARAM, strlen(DBS_EXPORT_PARAM)) == 0) { + PRTS_RETURN_IFERR(snprintf_s(fs_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, src_path)); + if (cm_dbs_exist_file(fs_path, DIR_TYPE) != CT_TRUE) { + printf("Source directory is does not exist %s\n", fs_path); + return CT_ERROR; + } + if (cm_dir_exist(dst_path) != CT_TRUE) { + printf("Target directory is does not exist %s\n", dst_path); + return CT_ERROR; + } + + return CT_SUCCESS; + } + + return CT_ERROR; +} + +status_t dbs_copy_fs_file(int32 argc, char *argv[]) +{ + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char file_name[MAX_DBS_FILE_PATH_LEN] = {0}; + char source_dir[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + char target_dir[MAX_DBS_FILE_PATH_LEN] = {0}; + char overwrite[DBS_TOOL_PARAM_BOOL_LEN] = BOOL_FALSE; + const char *params[] = {DBS_TOOL_PARAM_FS_NAME, DBS_TOOL_PARAM_FILE_NAME, DBS_TOOL_PARAM_SOURCE_DIR, + DBS_TOOL_PARAM_TARGET_DIR, DBS_TOOL_PARAM_OVERWRITE}; + char *results[] = {fs_name, file_name, source_dir, target_dir, overwrite}; + size_t result_lens[] = {MAX_DBS_FS_NAME_LEN, MAX_DBS_FILE_PATH_LEN, MAX_DBS_FS_FILE_PATH_LEN, MAX_DBS_FILE_PATH_LEN, + DBS_TOOL_PARAM_BOOL_LEN}; + params_check_list_t check_list[] = {{DBS_TOOL_PARAM_FS_NAME, fs_name}, {DBS_TOOL_PARAM_SOURCE_DIR, source_dir}, + {DBS_TOOL_PARAM_TARGET_DIR, target_dir}}; + params_list_t params_list = {params, results, result_lens, check_list, DBS_COPY_FILE_PRAMA_NUM, + DBS_COPY_FILE_CHECK_PRAMA_NUM}; + if (parse_params_list(argc, argv, ¶ms_list) != CT_SUCCESS) { + printf("Invalid command.\nUsage: --copy-file --import --fs-name=xxx --source-dir=* --target-dir=* " + "[--file-name=*] [--overwrite]\n"); + return CT_ERROR; + } + char file_system_path[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + if (check_dir_exist(argv[2], source_dir, target_dir, file_system_path, fs_name) != CT_SUCCESS) { + return CT_ERROR; + } + dbs_device_info_t src_info = {.handle = -1, .path = ""}; + dbs_device_info_t dst_info = {.handle = -1, .path = ""}; + + if (strncmp(argv[2], DBS_IMPORT_PARAM, strlen(DBS_IMPORT_PARAM)) == 0) { + src_info.type = DEV_TYPE_FILE; + dst_info.type = DEV_TYPE_DBSTOR_FILE; + MEMS_RETURN_IFERR(strncpy_s(src_info.path, MAX_DBS_FS_FILE_PATH_LEN, source_dir, strlen(source_dir))); + MEMS_RETURN_IFERR(strncpy_s(dst_info.path, MAX_DBS_FS_FILE_PATH_LEN, + file_system_path, strlen(file_system_path))); + } else if (strncmp(argv[2], DBS_EXPORT_PARAM, strlen(DBS_EXPORT_PARAM)) == 0) { + src_info.type = DEV_TYPE_DBSTOR_FILE; + dst_info.type = DEV_TYPE_FILE; + MEMS_RETURN_IFERR(strncpy_s(src_info.path, MAX_DBS_FS_FILE_PATH_LEN, + file_system_path, strlen(file_system_path))); + MEMS_RETURN_IFERR(strncpy_s(dst_info.path, MAX_DBS_FS_FILE_PATH_LEN, target_dir, strlen(target_dir))); + } else { + printf("Invalid command, Missing parameters '--import/--export'.\n"); + return CT_ERROR; + } + // 将源文件或目录复制到目标目录 + if (copy_files_to_target_dir(&src_info, &dst_info, strlen(file_name) == 0 ? NULL : file_name, + strncmp(overwrite, BOOL_TRUE, strlen(BOOL_TRUE)) == 0 + ? CT_TRUE : CT_FALSE) != CT_SUCCESS) { + printf("Failed to copy files from %s to %s.\n", src_info.path, dst_info.path); + return CT_ERROR; + } + printf("File(s) copied successfully from %s to %s.\n", src_info.path, dst_info.path); + return CT_SUCCESS; +} + +int32 dbs_set_io_forbidden(int32 argc, char *argv[]) +{ + if (dbs_global_handle()->dbs_ns_io_forbidden == NULL) { + printf("dbs_ns_io_forbidden is not support\n"); + return CT_ERROR; + } + + if (argc != NUM_THREE) { + printf("Invalid input, arg num %d\n", argc); + printf("Usage: dbstor --io-forbidden <0, 1>t\n"); + return CT_ERROR; + } + bool isForbidden = (bool)atoi(argv[NUM_TWO]); + status_t ret = dbs_global_handle()->dbs_ns_io_forbidden(g_dbs_fs_info.cluster_name, isForbidden); + if (ret != CT_SUCCESS) { + printf("Set ns forbidden failed(%d).\n", ret); + return ret; + } + printf("Set ns forbidden success.\n"); + return ret; +} + +status_t dbs_create_fs_snap(char* fsName, uint32_t vstorId, snapshot_result_info* snap_info) +{ + int32 ret; + ret = dbs_global_handle()->create_fs_snap(fsName, vstorId, snap_info); + if (ret != 0) { + printf("Failed to create snapshot from fs %s, %s\n", fsName, ctbak_snap_error[ret]); + CT_LOG_RUN_ERR("Failed to create snapshot from fs %s, %s", fsName, ctbak_snap_error[ret]); + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t dbs_delete_fs_snap(char* fsName, uint32_t vstorId, snapshot_result_info* snap_info) +{ + SNAP_UUID_S snapUUID = {0}; + if (memcpy_s(snapUUID.buf, sizeof(snapUUID.buf), snap_info->snapUUID, sizeof(snap_info->snapUUID)) != EOK) { + CT_LOG_RUN_ERR("Failed to delete snapshot of fs %s, get snapUUID failed", fsName); + return CT_ERROR; + } + int32 ret; + ret = dbs_global_handle()->delete_fs_snap(fsName, vstorId, snap_info->snapshotID, snap_info->timepoint, snapUUID); + if (ret != 0) { + CT_LOG_RUN_ERR("Failed to delete snapshot of fs %s", fsName); + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t dbs_create_snapshot_info_file(const char *file_name, int32 *handle) +{ + if (cm_dbs_create_file(file_name, handle)!= CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to create snapshot_info_file %s", file_name); + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t dbs_write_snapshot_info_file(int32 handle, int64 offset, const void *buf, int32 size) +{ + + if (cm_dbs_write_file(handle, offset, buf, size) != CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to write snapshot_info_file"); + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t dbs_read_snapshot_info_file(object_id_t* handle, uint64 offset, void* buf, uint32 length) +{ + if (cm_read_dbs_file(handle, offset, buf, length)!= CT_SUCCESS) { + CT_LOG_RUN_ERR("Failed to read snapshot_info_file"); + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t ctbak_get_file_handle_from_share_fs(char *file_path, char *file_name, object_id_t *file_handle) +{ + char full_file_path[CT_MAX_FILE_PATH_LENGH] = { 0 }; + + int ret = snprintf_s(full_file_path, CT_MAX_FILE_PATH_LENGH, CT_MAX_FILE_PATH_LENGH - 1, "/%s/%s", file_path, file_name); + if (ret == CT_ERROR) { + CT_LOG_RUN_ERR("Failed to get full file path"); + return CT_ERROR; + } + + if (cm_get_dbs_last_file_handle(full_file_path, file_handle)) { + printf("[ctbackup]Failed to get file handle\n"); + return CT_ERROR; + } + printf("[ctbackup]get file from share fs success.\n"); + return CT_SUCCESS; +} \ No newline at end of file diff --git a/pkg/src/utils/ctbackup/ctbackup_dbs_common.h b/pkg/src/utils/ctbackup/ctbackup_dbs_common.h new file mode 100644 index 0000000000000000000000000000000000000000..4a4d2393c0170b99577bf206f603bf5cf57314b9 --- /dev/null +++ b/pkg/src/utils/ctbackup/ctbackup_dbs_common.h @@ -0,0 +1,111 @@ +/* ------------------------------------------------------------------------- +* This file is part of the Cantian project. +* Copyright (c) 2024 Huawei Technologies Co.,Ltd. +* +* Cantian is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +* ------------------------------------------------------------------------- +* +* ctbackup_dbs_common.h +* +* +* IDENTIFICATION +* src/utils/ctbackup/ctbackup_dbs_common.h +* +* ------------------------------------------------------------------------- + */ + +#ifndef CANTIANDB_CTBACKUP_DBS_COMMON_H +#define CANTIANDB_CTBACKUP_DBS_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cm_defs.h" +#include "cm_log.h" +#include "cm_file.h" +#include "cm_date.h" +#include "cm_dbstor.h" +#include "dirent.h" +#include "cm_dbs_defs.h" +#include "cm_dbs_file.h" +#include "cm_utils.h" +#include "ctbackup_module.h" +#include "ctbackup_info.h" + +typedef struct { + char *key; + char *value; +} params_check_list_t; + +typedef struct { + const char **keys; + char **values; + size_t *value_len; + params_check_list_t *check_list; + uint32 params_num; + uint32 check_num; +} params_list_t; + +typedef struct { + device_type_t type; + int32 handle; + char path[MAX_DBS_FS_FILE_PATH_LEN]; +} dbs_device_info_t; + +typedef struct { + char log_fs_name[MAX_DBS_FS_NAME_LEN]; + char page_fs_name[MAX_DBS_FS_NAME_LEN]; + char cluster_name[MAX_DBS_FILE_NAME_LEN]; + char log_fs_vstore_id[MAX_DBS_VSTORE_ID_LEN]; + char dbs_log_path[MAX_DBS_FS_NAME_LEN]; + char page_fs_vstore_id[MAX_DBS_VSTORE_ID_LEN]; + char share_fs_name[MAX_DBS_FS_NAME_LEN]; + char archive_fs_name[MAX_DBS_FS_NAME_LEN]; +} dbs_fs_info_t; + +status_t dbs_init(ctbak_param_t* ctbak_param); +// dbstor --query-file --fs-name=xxx [--file-dir=xxx] [--vstore_id=*] +int32 dbs_query_fs_file(int32 argc, char *argv[]); +// dbstor --copy-file --import/--export --fs-name=xxx --source-dir=* --target-dir=* [--file-name=*] [--overwrite] +status_t dbs_copy_fs_file(int32 argc, char *argv[]); +// dbstor --io-forbidden <0, 1> +int32 dbs_set_io_forbidden(int32 argc, char *argv[]); +status_t dbs_create_fs_snap(char* fsName, uint32_t vstorId, snapshot_result_info* snap_info); +status_t dbs_delete_fs_snap(char* fsName, uint32_t vstorId, snapshot_result_info* snap_info); +status_t dbs_create_snapshot_info_file(const char *file_name, int32 *handle); +status_t dbs_write_snapshot_info_file(int32 handle, int64 offset, const void *buf, int32 size); +status_t dbs_read_snapshot_info_file(object_id_t* handle, uint64 offset, void* buf, uint32 length); +uint32 get_parse_params_init_value(char *argv[]); +bool32 compare_bool_param(char *argv[], params_list_t *params_list, uint32 i, uint32 j, bool32 *matched); +status_t compare_param(char *argv[], params_list_t *params_list, uint32 i, uint32 j, bool32 *matched); +status_t parse_params_list(int32 argc, char *argv[], params_list_t *params_list); +status_t check_dir_exist(const char *direction, const char *src_path, const char *dst_path, + char *fs_path, const char *fs_name); +status_t copy_file(const dbs_device_info_t *src_info, const dbs_device_info_t *dst_info); +status_t check_strcat_path(const char *dir, const char *name, char *strcat_name); +status_t copy_file_by_name(const char *file_name, dbs_device_info_t *src_info, + dbs_device_info_t *dst_info, bool32 overwrite); +status_t copy_files_to_target_dir(dbs_device_info_t *src_info, dbs_device_info_t *dst_info, + const char *file_name, bool32 overwrite); +status_t ctbak_get_file_handle_from_share_fs(char *file_path, char *file_name, object_id_t *file_handle); +status_t file_info_screen_print(void *file_list, uint32 file_num, char *path, file_info_version_t info_version); + +extern dbs_fs_info_t g_dbs_fs_info; + +#endif // CANTIANDB_CTBACKUP_DBS_COMMON_H diff --git a/pkg/src/utils/ctbackup/ctbackup_dbs_operator.c b/pkg/src/utils/ctbackup/ctbackup_dbs_operator.c new file mode 100644 index 0000000000000000000000000000000000000000..c0313c9fd8ea4ab96725e4210235a9061ab91198 --- /dev/null +++ b/pkg/src/utils/ctbackup/ctbackup_dbs_operator.c @@ -0,0 +1,508 @@ +/* ------------------------------------------------------------------------- +* This file is part of the Cantian project. +* Copyright (c) 2024 Huawei Technologies Co.,Ltd. +* +* Cantian is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +* ------------------------------------------------------------------------- +* +* ctbackup_dbs_operator.c +* +* +* IDENTIFICATION +* src/utils/ctbackup/ctbackup_dbs_operator.c +* +* ------------------------------------------------------------------------- + */ + +#include "ctbackup_dbs_operator.h" +#include "ctbackup_dbs_common.h" + +//dbs_fs_info_t g_dbs_fs_info = { 0 }; + +bool32 ulog_file_filter(const char *file_name) +{ + return strcmp(file_name, g_dbs_fs_info.cluster_name) == 0; +} + +bool32 page_file_filter(const char *file_name) +{ + return strcmp(file_name, "SplitLsnInfo") == 0; +} + +bool32 arch_file_filter(const char *file_name) +{ + return !cm_match_arch_pattern(file_name) && strstr(file_name, "arch_file.tmp") == NULL; +} + +status_t get_archive_location(const char *file_name, const char *conf_name, char *location_value) +{ + char file_buf[CT_MAX_CONFIG_FILE_SIZE] = {0}; + uint32 text_size = sizeof(file_buf); + if (cm_read_config_file(file_name, file_buf, &text_size, CT_FALSE, CT_FALSE) != CT_SUCCESS) { + printf("read config file failed!, the file_name is %s.\n", file_name); + return CT_ERROR; + } + text_t text; + text_t line; + text_t name; + text_t value; + text.len = text_size; + text.str = file_buf; + + while (cm_fetch_text(&text, '\n', '\0', &line)) { + cm_trim_text(&line); + if (line.len == 0 || *line.str == '#') { + continue; + } + + cm_split_text(&line, '=', '\0', &name, &value); + cm_trim_text(&value); + cm_text_upper(&name); + cm_trim_text(&name); + if (cm_text_str_equal_ins(&name, conf_name)) { + char *location = strstr(value.str, "location="); + if (location != NULL) { + location += strlen("location="); + cm_trim_text(&value); + errno_t ret = strncpy_s(location_value, CT_PARAM_BUFFER_SIZE, location, + value.len - (location - value.str)); + return ret == EOK ? CT_SUCCESS : CT_ERROR; + } + } + } + return CT_ERROR; +} + +status_t get_location_by_cfg(char *location_value) +{ + char cantiand_ini_file_name[CT_MAX_FILE_PATH_LENGH] = {0}; + status_t status = get_cantiand_ini_file_name(cantiand_ini_file_name); + if (status != CT_SUCCESS) { + printf("Failed to get cantiand ini file. Status: %d\n", status); + return CT_ERROR; + } + + status = get_archive_location(cantiand_ini_file_name, ARCHIVE_DEST_PATH, location_value); + if (status != CT_SUCCESS) { + printf("Failed to get archive location from config. Ini file: %s, Status: %d\n", + cantiand_ini_file_name, status); + return CT_ERROR; + } + + return CT_SUCCESS; +} + +status_t dbs_set_ns_io_forbidden(bool32 isForbidden) +{ + if (dbs_global_handle()->dbs_ns_io_forbidden == NULL) { + printf("dbs_ns_io_forbidden is not support\n"); + return CT_ERROR; + } + + status_t ret = dbs_global_handle()->dbs_ns_io_forbidden(g_dbs_fs_info.cluster_name, isForbidden); + if (ret != CT_SUCCESS) { + printf("Set ns forbidden failed(%d).\n", ret); + return ret; + } + printf("Set ns forbidden success.\n"); + return ret; +} + +status_t dbs_clean_files(dbs_device_info_t *src_info, void *file_list, uint32 file_num, file_filter_func filter_func) +{ + CT_LOG_RUN_INF("[DBSTOR] Removed files in dir %s", src_info->path); + printf("Remove files list:\n"); + for (uint32 i = 0; i < file_num; i++) { + char file_path[MAX_DBS_FS_FILE_PATH_LEN] = { 0 }; + char *file_name = cm_get_name_from_file_list(src_info->type, file_list, i); + if (file_name == NULL) { + printf("Failed to get file name.\n"); + return CT_ERROR; + } + + if (filter_func != NULL && filter_func(file_name) == CT_TRUE) { + continue; + } + + PRTS_RETURN_IFERR(snprintf_s(file_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "%s/%s", src_info->path, file_name)); + + if (cm_remove_device(src_info->type, file_path) != CT_SUCCESS) { + printf("remove file failed, file name %s\n", file_name); + CT_LOG_RUN_ERR("[DBSTOR] remove file failed, file name %s", file_name); + return CT_ERROR; + } + printf("%s\n", file_name); + CT_LOG_RUN_INF("[DBSTOR] Removed file: %s\n", file_name); + } + printf("Remove files successful.\n"); + return CT_SUCCESS; +} + +// dbstor --pagepool-clean [--fs-name=xxx] [--cluster-name=xxx] +int32 dbs_pagepool_clean() +{ + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char cluster_name[MAX_DBS_FILE_PATH_LEN] = {0}; + MEMS_RETURN_IFERR(strncpy_s(fs_name, MAX_DBS_FS_NAME_LEN, g_dbs_fs_info.page_fs_name, + strlen(g_dbs_fs_info.page_fs_name))); + MEMS_RETURN_IFERR(strncpy_s(cluster_name, MAX_DBS_FILE_PATH_LEN, g_dbs_fs_info.cluster_name, + strlen(g_dbs_fs_info.cluster_name))); + + char pagepool_path[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + PRTS_RETURN_IFERR(snprintf_s(pagepool_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, cluster_name)); + + void *file_list = NULL; + uint32 file_num = 0; + dbs_device_info_t src_info = { .handle = -1, .type = DEV_TYPE_DBSTOR_FILE, .path = "" }; + MEMS_RETURN_IFERR(strncpy_s(src_info.path, MAX_DBS_FS_FILE_PATH_LEN, pagepool_path, strlen(pagepool_path))); + + if (cm_malloc_file_list(src_info.type, &file_list, src_info.path, &file_num) != CT_SUCCESS) { + return CT_ERROR; + } + + if (cm_query_device(src_info.type, src_info.path, file_list, &file_num) != CT_SUCCESS) { + printf("Failed to get file list, dir is %s.\n", src_info.path); + cm_free_file_list(&file_list); + return CT_ERROR; + } + + if (dbs_clean_files(&src_info, file_list, file_num, page_file_filter) != CT_SUCCESS) { + printf("Pagepool clean failed.\n"); + cm_free_file_list(&file_list); + return CT_ERROR; + } + + cm_free_file_list(&file_list); + printf("Pagepool clean successful.\n"); + return CT_SUCCESS; +} + +status_t dbs_clean_files_ulog(uint32 vstore_id, dbs_device_info_t *src_info, void *file_list, + uint32 file_num, file_filter_func filter_func) +{ + CT_LOG_RUN_INF("[DBSTOR] Removed files in dir %s", src_info->path); + printf("Remove files list:\n"); + file_info_version_t info_version = DBS_FILE_INFO_VERSION_1; + if (dbs_global_handle()->dbs_file_get_list_detail != NULL) { + info_version = DBS_FILE_INFO_VERSION_2; + } + for (uint32 i = 0; i < file_num; i++) { + char file_path[MAX_DBS_FS_FILE_PATH_LEN] = { 0 }; + char *file_name = NULL; + if (info_version == DBS_FILE_INFO_VERSION_1) { + dbstor_file_info *file_info = (dbstor_file_info *)((char *)file_list + i * sizeof(dbstor_file_info)); + file_name = file_info->file_name; + } else { + dbstor_file_info_detail *file_info = (dbstor_file_info_detail *)((char *)file_list + + i * sizeof(dbstor_file_info_detail)); + file_name = file_info->file_name; + } + + if (file_name == NULL || strlen(file_name) == 0) { + printf("Failed to get file name.\n"); + return CT_ERROR; + } + + if (filter_func != NULL && filter_func(file_name) == CT_TRUE) { + continue; + } + + PRTS_RETURN_IFERR(snprintf_s(file_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "%s/%s", src_info->path, file_name)); + + if (cm_dbs_remove_file_vstore_id(vstore_id, file_path) != CT_SUCCESS) { + printf("remove file failed, file name %s\n", file_name); + CT_LOG_RUN_ERR("[DBSTOR] remove file failed, file name %s", file_name); + return CT_ERROR; + } + printf("%s\n", file_name); + CT_LOG_RUN_INF("[DBSTOR] Removed file: %s\n", file_name); + } + printf("Remove files successful.\n"); + return CT_SUCCESS; +} + +// dbstor --ulog-clean [--fs-name=xxx] [--cluster-name=xxx] +int32 dbs_ulog_clean() +{ + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char cluster_name[MAX_DBS_FILE_PATH_LEN] = {0}; + char vstore_id[MAX_DBS_VSTORE_ID_LEN] = {0}; + MEMS_RETURN_IFERR(strncpy_s(fs_name, MAX_DBS_FS_NAME_LEN, g_dbs_fs_info.log_fs_name, + strlen(g_dbs_fs_info.log_fs_name))); + MEMS_RETURN_IFERR(strncpy_s(cluster_name, MAX_DBS_FILE_PATH_LEN, cluster_name, + strlen(g_dbs_fs_info.cluster_name))); + MEMS_RETURN_IFERR(strncpy_s(vstore_id, MAX_DBS_VSTORE_ID_LEN, g_dbs_fs_info.log_fs_vstore_id, + strlen(g_dbs_fs_info.log_fs_vstore_id))); + + uint32 vstore_id_uint = (uint32)atoi(vstore_id); + char ulog_path[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + PRTS_RETURN_IFERR(snprintf_s(ulog_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, cluster_name)); + + void *file_list = NULL; + uint32 file_num = 0; + dbs_device_info_t src_info = { .handle = -1, .type = DEV_TYPE_DBSTOR_FILE, .path = "" }; + MEMS_RETURN_IFERR(strncpy_s(src_info.path, MAX_DBS_FS_FILE_PATH_LEN, ulog_path, strlen(ulog_path))); + file_info_version_t info_version = DBS_FILE_INFO_VERSION_1; + if (dbs_global_handle()->dbs_file_get_list_detail != NULL) { + info_version = DBS_FILE_INFO_VERSION_2; + } + if (cm_malloc_file_list_by_version_id(info_version, vstore_id_uint, + &file_list, src_info.path, &file_num) != CT_SUCCESS) { + printf("Failed to allocate memory for file list.\n"); + return CT_ERROR; + } + + if (cm_dbs_query_dir_vstore_id(vstore_id_uint, src_info.path, file_list, &file_num) != CT_SUCCESS) { + printf("Failed to get file list, dir is %s.\n", src_info.path); + cm_free_file_list(&file_list); + return CT_ERROR; + } + + if (dbs_clean_files_ulog(vstore_id_uint, &src_info, file_list, file_num, ulog_file_filter) != CT_SUCCESS) { + printf("ULOG clean failed.\n"); + cm_free_file_list(&file_list); + return CT_ERROR; + } + + cm_free_file_list(&file_list); + printf("ULOG clean successful.\n"); + return CT_SUCCESS; +} + +status_t dbs_get_arch_location(char *archive_location, const char *fs_name) +{ + if (strlen(fs_name) == 0) { + if (get_location_by_cfg(archive_location) != CT_SUCCESS) { + printf("Failed to get archive location.\n"); + return CT_ERROR; + } + } else { + PRTS_RETURN_IFERR(snprintf_s(archive_location, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/archive", fs_name)); + } + if (strlen(archive_location) == 0) { + printf("Failed to get archive location,\n"); + return CT_ERROR; + } + return CT_SUCCESS; +} +// dbstor --arch-clean [--fs-name=xxx] +int32 dbs_arch_clean() +{ + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char archive_location[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + + if (dbs_get_arch_location(archive_location, fs_name) != CT_SUCCESS) { + return CT_ERROR; + } + + void *file_list = NULL; + uint32 file_num = 0; + dbs_device_info_t src_info = { .handle = -1, .type = DEV_TYPE_DBSTOR_FILE, .path = "" }; + MEMS_RETURN_IFERR(strncpy_s(src_info.path, MAX_DBS_FS_FILE_PATH_LEN, archive_location, strlen(archive_location))); + + if (cm_malloc_file_list(src_info.type, &file_list, src_info.path, &file_num) != CT_SUCCESS) { + return CT_ERROR; + } + + if (cm_query_device(src_info.type, src_info.path, file_list, &file_num) != CT_SUCCESS) { + printf("Failed to get file list, dir is %s.\n", src_info.path); + cm_free_file_list(&file_list); + return CT_ERROR; + } + + if (dbs_clean_files(&src_info, file_list, file_num, arch_file_filter) != CT_SUCCESS) { + printf("Archive files clean failed.\n"); + cm_free_file_list(&file_list); + return CT_ERROR; + } + + cm_free_file_list(&file_list); + printf("Archive files clean successful.\n"); + return CT_SUCCESS; +} + +// dbstor --copy-file --import/--export --fs-name=xxx --source-dir=* --target-dir=* [--file-name=*] [--overwrite] +status_t dbs_copy_file(int32 argc, char *argv[]) +{ + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char file_name[MAX_DBS_FILE_PATH_LEN] = {0}; + char source_dir[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + char target_dir[MAX_DBS_FILE_PATH_LEN] = {0}; + char overwrite[DBS_TOOL_PARAM_BOOL_LEN] = BOOL_FALSE; + const char *params[] = {DBS_TOOL_PARAM_FS_NAME, DBS_TOOL_PARAM_FILE_NAME, DBS_TOOL_PARAM_SOURCE_DIR, + DBS_TOOL_PARAM_TARGET_DIR, DBS_TOOL_PARAM_OVERWRITE}; + char *results[] = {fs_name, file_name, source_dir, target_dir, overwrite}; + size_t result_lens[] = {MAX_DBS_FS_NAME_LEN, MAX_DBS_FILE_PATH_LEN, MAX_DBS_FS_FILE_PATH_LEN, MAX_DBS_FILE_PATH_LEN, + DBS_TOOL_PARAM_BOOL_LEN}; + params_check_list_t check_list[] = {{DBS_TOOL_PARAM_FS_NAME, fs_name}, {DBS_TOOL_PARAM_SOURCE_DIR, source_dir}, + {DBS_TOOL_PARAM_TARGET_DIR, target_dir}}; + params_list_t params_list = {params, results, result_lens, check_list, DBS_COPY_FILE_PRAMA_NUM, + DBS_COPY_FILE_CHECK_PRAMA_NUM}; + if (parse_params_list(argc, argv, ¶ms_list) != CT_SUCCESS) { + printf("Invalid command.\nUsage: --copy-file --import --fs-name=xxx --source-dir=* --target-dir=* " + "[--file-name=*] [--overwrite]\n"); + return CT_ERROR; + } + char file_system_path[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + if (check_dir_exist(argv[DBS_COPY_FILE_OP_PRAMA], source_dir, target_dir, file_system_path, fs_name) != CT_SUCCESS) { + return CT_ERROR; + } + dbs_device_info_t src_info = {.handle = -1, .path = ""}; + dbs_device_info_t dst_info = {.handle = -1, .path = ""}; + + if (strncmp(argv[DBS_COPY_FILE_OP_PRAMA], DBS_IMPORT_PARAM, strlen(DBS_IMPORT_PARAM)) == 0) { + src_info.type = DEV_TYPE_FILE; + dst_info.type = DEV_TYPE_DBSTOR_FILE; + MEMS_RETURN_IFERR(strncpy_s(src_info.path, MAX_DBS_FS_FILE_PATH_LEN, source_dir, strlen(source_dir))); + MEMS_RETURN_IFERR(strncpy_s(dst_info.path, MAX_DBS_FS_FILE_PATH_LEN, + file_system_path, strlen(file_system_path))); + } else if (strncmp(argv[DBS_COPY_FILE_OP_PRAMA], DBS_EXPORT_PARAM, strlen(DBS_EXPORT_PARAM)) == 0) { + src_info.type = DEV_TYPE_DBSTOR_FILE; + dst_info.type = DEV_TYPE_FILE; + MEMS_RETURN_IFERR(strncpy_s(src_info.path, MAX_DBS_FS_FILE_PATH_LEN, + file_system_path, strlen(file_system_path))); + MEMS_RETURN_IFERR(strncpy_s(dst_info.path, MAX_DBS_FS_FILE_PATH_LEN, target_dir, strlen(target_dir))); + } else { + printf("Invalid command, Missing parameters '--import/--export'.\n"); + return CT_ERROR; + } + // 将源文件或目录复制到目标目录 + if (copy_files_to_target_dir(&src_info, &dst_info, strlen(file_name) == 0 ? NULL : file_name, + strncmp(overwrite, BOOL_TRUE, strlen(BOOL_TRUE)) == 0 + ? CT_TRUE : CT_FALSE) != CT_SUCCESS) { + printf("Failed to copy files from %s to %s.\n", src_info.path, dst_info.path); + return CT_ERROR; + } + printf("File(s) copied successfully from %s to %s.\n", src_info.path, dst_info.path); + return CT_SUCCESS; +} + +status_t dbs_query_file(int32 argc, char *argv[], void **file_list, uint32 *file_num, file_info_version_t *info_version) +{ + printf("start dbs query file\n"); + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char file_path[MAX_DBS_FILE_PATH_LEN] = {0}; + char vstore_id[MAX_DBS_VSTORE_ID_LEN] = {0}; + const char *params[] = {DBS_TOOL_PARAM_FS_NAME, DBS_TOOL_PARAM_FILE_DIR, DBS_TOOL_PARAM_VSTORE_ID}; + char *results[] = {fs_name, file_path, vstore_id}; + size_t result_lens[] = {MAX_DBS_FS_NAME_LEN, MAX_DBS_FILE_PATH_LEN, MAX_DBS_VSTORE_ID_LEN}; + params_check_list_t check_list[] = {{DBS_TOOL_PARAM_FS_NAME, fs_name}}; + params_list_t params_list = {params, results, result_lens, check_list, DBS_QUERY_FILE_PRAMA_NUM, + DBS_QUERY_FILE_CHECK_PRAMA_NUM}; + + if (parse_params_list(argc, argv, ¶ms_list) != CT_SUCCESS) { + printf("Invalid command.\nUsage: --query-file --fs-name=xxx [--file-dir=xxx] [--vstore-id=*]\n"); + return CT_ERROR; + } + char full_path[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + if (strlen(file_path) == 0) { + PRTS_RETURN_IFERR(snprintf_s(full_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s", fs_name)); + } else { + PRTS_RETURN_IFERR(snprintf_s(full_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, file_path)); + } + dbs_device_info_t query_info = { .handle = -1, .type = DEV_TYPE_DBSTOR_FILE, .path = "" }; + MEMS_RETURN_IFERR(strncpy_s(query_info.path, MAX_DBS_FS_FILE_PATH_LEN, full_path, strlen(full_path))); + + uint32 vstore_id_uint = 0; + if (strlen(vstore_id) > 0) { + vstore_id_uint = (uint32)atoi(vstore_id); + } + if (dbs_global_handle()->dbs_file_get_list_detail != NULL) { + *info_version = DBS_FILE_INFO_VERSION_2; + } + if (cm_malloc_file_list_by_version_id(*info_version, vstore_id_uint, + file_list, query_info.path, file_num) != CT_SUCCESS) { + printf("Failed to allocate memory for file list.\n"); + return CT_ERROR; + } + status_t ret = cm_dbs_query_dir_vstore_id(vstore_id_uint, query_info.path, *file_list, file_num); + if (ret != CT_SUCCESS) { + printf("Failed to query files in directory: %s with vstore-id: %u\n", query_info.path, vstore_id_uint); + cm_free_file_list(file_list); + return CT_ERROR; + } + printf("finish dbs query file\n"); + return CT_SUCCESS; +} + +// dbstor --create-file --fs-name=xxx [--file-dir=xxx] [--file-name=xxx] +// 创建文件或目录('/'结尾)。如果指定了 source-dir 参数,则从 source-dir 复制(覆盖)文件内容到目标位置。 +status_t dbs_create_path_or_file(int32 argc, char *argv[]) +{ + char fs_name[MAX_DBS_FS_NAME_LEN] = {0}; + char file_dir[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + char file_name[MAX_DBS_FILE_PATH_LEN] = {0}; + + const char *params[] = {DBS_TOOL_PARAM_FS_NAME, DBS_TOOL_PARAM_FILE_NAME, DBS_TOOL_PARAM_FILE_DIR}; + char *results[] = {fs_name, file_name, file_dir}; + size_t result_lens[] = {MAX_DBS_FS_NAME_LEN, MAX_DBS_FILE_PATH_LEN, MAX_DBS_FS_FILE_PATH_LEN}; + params_check_list_t check_list[] = {{DBS_TOOL_PARAM_FS_NAME, fs_name}}; + params_list_t params_list = {params, results, result_lens, check_list, DBS_CRAETE_FILE_PRAMA_NUM, + DBS_CRAETE_FILE_CHECK_PRAMA_NUM}; + + if (parse_params_list(argc, argv, ¶ms_list) != CT_SUCCESS) { + printf("Invalid command.\nUsage: --creat-file --fs-name=xxx [--file-name=xxx] [--file-name=xxx]\n"); + return CT_ERROR; + } + if (strlen(file_dir) == 0 && strlen(file_name) == 0) { + printf("file_dir and file_name both is empty.\n"); + return CT_ERROR; + } + + char full_path[MAX_DBS_FS_FILE_PATH_LEN] = {0}; + dbs_device_info_t dst_info = { .handle = -1, .type = DEV_TYPE_DBSTOR_FILE, .path = "" }; + + if (strlen(file_dir) > 0 && strlen(file_name) == 0) { + PRTS_RETURN_IFERR(snprintf_s(full_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, file_dir)); + MEMS_RETURN_IFERR(strncpy_s(dst_info.path, MAX_DBS_FS_FILE_PATH_LEN, full_path, strlen(full_path))); + if (cm_dbs_exist_file(full_path, DIR_TYPE) == CT_TRUE) { + printf("Target directory is exist, file_path: %s.\n", full_path); + return CT_SUCCESS; + } + status_t ret = cm_create_device_dir(dst_info.type, dst_info.path); + if (ret != CT_SUCCESS) { + printf("Failed to create directory: %s\n", dst_info.path); + return CT_ERROR; + } + printf("Directory created successfully: %s\n", dst_info.path); + } else { + if (strlen(file_dir) == 0) { + PRTS_RETURN_IFERR(snprintf_s(full_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s", fs_name, file_name)); + } else { + PRTS_RETURN_IFERR(snprintf_s(full_path, MAX_DBS_FS_FILE_PATH_LEN, + MAX_DBS_FS_FILE_PATH_LEN - 1, "/%s/%s/%s", fs_name, file_dir, file_name)); + } + MEMS_RETURN_IFERR(strncpy_s(dst_info.path, MAX_DBS_FS_FILE_PATH_LEN, full_path, strlen(full_path))); + if (cm_dbs_exist_file(full_path, FILE_TYPE) == CT_TRUE) { + printf("Target file is exist, file_path: %s.\n", full_path); + return CT_SUCCESS; + } + status_t ret = cm_create_device(dst_info.path, dst_info.type, 0, &dst_info.handle); + if (ret != CT_SUCCESS) { + printf("Failed to create file: %s\n", dst_info.path); + return CT_ERROR; + } + cm_close_device(dst_info.type, &dst_info.handle); + printf("File created successfully: %s\n", dst_info.path); + } + + return CT_SUCCESS; +} \ No newline at end of file diff --git a/pkg/src/utils/ctbackup/ctbackup_dbs_operator.h b/pkg/src/utils/ctbackup/ctbackup_dbs_operator.h new file mode 100644 index 0000000000000000000000000000000000000000..c563599c9a9009d220053b9513e127290c8952df --- /dev/null +++ b/pkg/src/utils/ctbackup/ctbackup_dbs_operator.h @@ -0,0 +1,150 @@ +/* ------------------------------------------------------------------------- +* This file is part of the Cantian project. +* Copyright (c) 2024 Huawei Technologies Co.,Ltd. +* +* Cantian is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +* ------------------------------------------------------------------------- +* +* ctbackup_dbs_operator.h +* +* +* IDENTIFICATION +* src/utils/ctbackup/ctbackup_dbs_operator.h +* +* ------------------------------------------------------------------------- +*/ + +#ifndef CANTIANDB_CTBACKUP_DBS_OPERATOR_H +#define CANTIANDB_CTBACKUP_DBS_OPERATOR_H + +#include "cm_defs.h" +#include "ctbackup_info.h" +#include "cm_file.h" +#include "bak_common.h" +#include "cm_dbs_snapshot.h" +#include "ctbackup_common.h" +#include "cm_dbs_file.h" +#include "ctbackup_common.h" +#include "ctbackup_module.h" + +#define DBS_CONFIG_FILE_NAME_LEN 32 +#define DBS_WAIT_CONFIG_RETRY_NUM 2 +#define DBS_WAIT_CONFIG_INTERVAL_TIME 2000 +#define DBS_CONFIG_MAX_PARAM 256 +#define DBS_CLUSTER_UUID_LEN 37 + +#define DBS_TOOL_CONFIG_PATH "/opt/cantian/dbstor/conf/dbs" +#define DBS_CANTIAN_CONFIG_PATH "/mnt/dbdata/local/cantian/tmp/data/dbstor/conf/dbs/dbstor_config.ini" +#define DBS_CMS_CONFIG_PATH "/opt/cantian/cms/dbstor/conf/dbs/dbstor_config.ini" +#define DBS_HOME_PATH "/opt/cantian" +#define ARCHIVE_DEST_PATH "ARCHIVE_DEST_1" +#define CANTIAND_INI_FILE_NAME "cantiand.ini" +#define DEV_RW_BUFFER_SIZE (1 * 1024 * 1024) +#define DBS_TOOL_PARAM_SOURCE_DIR "--source-dir=" +#define DBS_TOOL_PARAM_TARGET_DIR "--target-dir=" +#define DBS_TOOL_PARAM_ARCH_FILE "--arch-file=" +#define DBS_TOOL_PARAM_FS_NAME "--fs-name=" +#define DBS_TOOL_PARAM_CLUSTER_NAME "--cluster-name=" +#define DBS_TOOL_PARAM_FILE_NAME "--file-name=" +#define DBS_TOOL_PARAM_FILE_DIR "--file-dir=" +#define DBS_TOOL_PARAM_VSTORE_ID "--vstore_id=" +#define DBS_PERF_SHOW_INTERVAL "--interval=" +#define DBS_PERF_SHOW_TIMES "--times=" +#define DBS_TOOL_PARAM_OVERWRITE "--overwrite" +#define MAX_VALUE_UINT32 "4294967295" +#define DBS_LINK_CHECK_CNT "LINK_CHECK_CNT" +#define BOOL_FALSE "false" +#define BOOL_FALSE_LEN 5 +#define BOOL_TRUE "true" +#define BOOL_TRUE_LEN 4 +#define DBS_FILE_TYPE_DIR "dir" +#define DBS_FILE_TYPE_FILE "file" +#define DBS_FILE_TYPE_UNKNOWN "unknown" +#define DBS_TOOL_PARAM_BOOL_LEN 6 +#define DBS_LINK_CHECK_PARAM_LEN 64 +#define DBS_LINK_TIMEOUT_MIN 3 +#define DBS_LINK_TIMEOUT_MAX 10 + +#define DBS_COPY_FILE_PARAM "--copy-file" +#define DBS_IMPORT_PARAM "--import" +#define DBS_EXPORT_PARAM "--export" + +#define BACKUP_PAGE_DIR_NAME "page" +#define BACKUP_REDO_DIR_NAME "ulog" + + +#define DBS_ARCH_QUERY_PRAMA_NUM 1 +#define DBS_ARCH_CLEAN_PRAMA_NUM 1 +#define DBS_ARCH_EXPORT_PRAMA_NUM 3 +#define DBS_ARCH_IMPORT_PRAMA_NUM 3 +#define DBS_ULOG_CLEAN_PRAMA_NUM 3 +#define DBS_PGPOOL_CLEAN_PRAMA_NUM 2 +#define DBS_CRAETE_FILE_PRAMA_NUM 3 +#define DBS_COPY_FILE_PRAMA_NUM 6 +#define DBS_DELETE_FILE_PRAMA_NUM 2 +#define DBS_QUERY_FILE_PRAMA_NUM 4 +#define DBS_QUERY_FS_INFO_PRAMA_NUM 2 +#define DBS_CREATE_DIR_PRAMA_NUM 3 + + +#define DBS_NO_CHECK_PRAMA_NUM 0 +#define DBS_ARCH_EXPORT_PRAMA_CHECK_NUM 1 +#define DBS_ARCH_IMPORT_PRAMA_CHECK_NUM 1 +#define DBS_ULOG_CLEAN_CHECK_PRAMA_NUM 3 +#define DBS_PGPOOL_CLEAN_CHECK_PRAMA_NUM 2 +#define DBS_CRAETE_FILE_CHECK_PRAMA_NUM 1 +#define DBS_COPY_FILE_CHECK_PRAMA_NUM 3 +#define DBS_DELETE_FILE_CHECK_PRAMA_NUM 2 +#define DBS_QUERY_FS_INFO_CHECK_PRAMA_NUM 2 +#define DBS_PERF_SHOW_PRAMA_NUM 2 +#define DBS_QUERY_FILE_CHECK_PRAMA_NUM 1 + +#define MODE_STR_LEN 10 +#define USER_NAME_LEN 32 +#define GROUP_NAME_LEN 255 +#define TIME_STR_LEN 25 +#define DBS_WAIT_CGW_LINK_INIT_TIME_SECOND 2 + +#define DBS_COPY_FILE_DBSTOR_PRAMA 0 +#define DBS_COPY_FILE_COPY_FILE_PRAMA 1 +#define DBS_COPY_FILE_OP_PRAMA 2 +#define DBS_COPY_FILE_FS_NAME_PRAMA 3 +#define DBS_COPY_FILE_SOURCE_DIR_PRAMA 4 +#define DBS_COPY_FILE_TARGET_DIR_PRAMA 5 +#define DBS_COPY_FILE_FILE_NAME_PRAMA 6 + +#define DBS_QUERY_FILE_DBSTOR_PRAMA 0 +#define DBS_QUERY_FILE_QUERY_FILE_PRAMA 1 +#define DBS_QUERY_FILE_FS_NAME_PRAMA 2 +#define DBS_QUERY_FILE_FILE_DIR_PRAMA 3 +#define DBS_QUERY_FILE_VSTORE_ID_PRAMA 4 + +#define DBS_CREATE_DIR_DBSTOR_PRAMA 0 +#define DBS_CREATE_DIR_CREATE_DIR_PRAMA 1 +#define DBS_CREATE_DIR_FS_NAME_PRAMA 2 +#define DBS_CREATE_DIR_FILE_DIR_PRAMA 3 + +typedef bool32 (*file_filter_func)(const char *); + +//extern dbs_fs_info_t g_dbs_fs_info; + +//status_t dbstool_init(); +int32 dbs_arch_clean(); +int32 dbs_ulog_clean(); +int32 dbs_pagepool_clean(); +int32 dbs_copy_file(int32 argc, char *argv[]); +status_t dbs_query_file(int32 argc, char *argv[], void **file_list, uint32 *file_num, file_info_version_t *info_version); +status_t dbs_set_ns_io_forbidden(bool32 is_forbidden); +int32 dbs_query_fs_info(int32 argc, char *argv[]); +int32 dbs_perf_show(int32 argc, char *argv[]); +status_t dbs_create_path_or_file(int32 argc, char *argv[]); +#endif // CANTIANDB_CTBACKUP_DBS_OPERATOR_H diff --git a/pkg/src/utils/ctbackup/ctbackup_info.h b/pkg/src/utils/ctbackup/ctbackup_info.h index 7a3672fbd94af6e45795bb81befe12b5c70983a9..043a435df2d3780b5e14af5be74e683d8144673f 100644 --- a/pkg/src/utils/ctbackup/ctbackup_info.h +++ b/pkg/src/utils/ctbackup/ctbackup_info.h @@ -42,12 +42,22 @@ extern "C" { #define CTBAK_ARG_RECONCIEL_MYSQL "--reconciel-mysql" #define CTBAK_ARG_QUERY_INCREMENTAL_MODE "--query-incremental-mode" #define CTBAK_ARG_PURGE_LOGS "--purge-logs" +#define CTBAK_ARG_SNAPSHOT "--snapshot" +#define CTBAK_ARG_SNAPSHOT_BACKUP "--snapshot-backup" +#define CTBAK_ARG_SNAPSHOT_RESTORE "--snapshot-restore" #define CTBAK_PARSE_OPTION_COMMON 0 +#define CTBAK_PARSE_OPTION_SNAPSHOT 1 +#define CTBAK_PARSE_OPTION_SNAPSHOT_BACKUP 2 +#define CTBAK_PARSE_OPTION_SNAPSHOT_RESTORE 3 #define CTBAK_PARSE_OPTION_ERR (-1) // long options for ctbackup #define CTBAK_LONG_OPTION_BACKUP "backup" +#define CTBAK_LONG_OPTION_SNAPSHOT "snapshot" +#define CTBAK_LONG_OPTION_SNAPSHOT_BACKUP "snapshot-backup" +#define CTBAK_LONG_OPTION_SNAPSHOT_RESTORE "snapshot-restore" +#define CTBAK_LONG_OPTION_NOTDELETE "notdelete" #define CTBAK_LONG_OPTION_PREPARE "prepare" #define CTBAK_LONG_OPTION_COPYBACK "copy-back" #define CTBAK_LONG_OPTION_ARCHIVELOG "archivelog" @@ -111,6 +121,7 @@ extern "C" { #define CTBAK_SHORT_OPTION_FORCE_DDL 'F' #define CTBAK_SHORT_OPTION_SKIP_BADBLOCK 'k' #define CTBAK_SHORT_OPTION_REPAIR_TYPE 'a' +#define CTBAK_SHORT_OPTION_NOTDELETE 'n' typedef enum en_ctbak_topic { CTBAK_INVALID, @@ -123,6 +134,9 @@ typedef enum en_ctbak_topic { CTBAK_RECONCIEL_MYSQL, CTBAK_QUERY_INCREMENTAL_MODE, CTBAK_PURGE_LOGS, + CTBAK_SNAPSHOT, + CTBAK_SNAPSHOT_BACKUP, + CTBAK_SNAPSHOT_RESTORE, } ctbak_topic_t; typedef struct ctbak_param { @@ -142,6 +156,12 @@ typedef struct ctbak_param { text_t buffer_size; text_t repair_type; text_t databases_exclude; + text_t share_fs_name; + text_t page_fs_name; + text_t log_fs_name; + text_t archive_fs_name; + text_t page_fs_vstore_id; + text_t log_fs_vstore_id; uint8 is_decompress; uint8 is_pitr_cancel; uint8 is_restore; @@ -153,6 +173,9 @@ typedef struct ctbak_param { uint8 is_force_ddl; uint8 skip_badblock; uint8 is_mysql_metadata_in_cantian; + uint8 is_snapshot; + uint8 is_snapshot_backup; + uint8 is_notdelete; } ctbak_param_t; typedef status_t (* ctbak_execute_t)(ctbak_param_t* ctbak_param); diff --git a/pkg/src/utils/ctbackup/ctbackup_prepare.c b/pkg/src/utils/ctbackup/ctbackup_prepare.c index afc884f7973262beafa9ad122d61b516098de500..f4aee2411c7ae170ed0e1332b791f57f955decf2 100644 --- a/pkg/src/utils/ctbackup/ctbackup_prepare.c +++ b/pkg/src/utils/ctbackup/ctbackup_prepare.c @@ -206,7 +206,7 @@ status_t ctbak_do_prepare(ctbak_param_t* ctbak_param) if (check_common_params(ctbak_param) != CT_SUCCESS) { return CT_ERROR; } - CT_RETURN_IFERR(check_cantiand_status()); + CT_RETURN_IFERR(check_cantiand_status(CT_FALSE)); CT_RETURN_IFERR(start_cantiand_server()); CT_RETURN_IFERR(ctbak_check_ctsql_online(CTSQL_CHECK_CONN_MAX_TIME_S)); if (ctbackup_set_metadata_mode(ctbak_param) != CT_SUCCESS) { diff --git a/pkg/src/utils/ctsql/ctsql.h b/pkg/src/utils/ctsql/ctsql.h index 1c2c803dfcd812188930bc3b3e419e165f163345..37260a2777725ad9b11d1719c75da40f8caeada3 100644 --- a/pkg/src/utils/ctsql/ctsql.h +++ b/pkg/src/utils/ctsql/ctsql.h @@ -313,6 +313,15 @@ status_t ctsql_execute_sql(void); } \ } while (0) +#define CTSQL_PRINT_AND_RETURN_IFERR(ret, format, ...) \ + do { \ + status_t _status_ = (ret); \ + if (SECUREC_UNLIKELY(_status_ == -1)) { \ + printf(format, ##__VA_ARGS__); \ + return _status_; \ + } \ + } while (0) + static inline void ctsql_print_disconn_error(void) { ctsql_printf("CT-%05d, %s\n", ERR_CLT_CONN_CLOSE, "connect is not established"); diff --git a/pkg/src/utils/ctsql/ctsql_wsr.c b/pkg/src/utils/ctsql/ctsql_wsr.c index 4d3688a53d2ecb34ed347d6afe63eef8bc9faa59..bf1b8e677d8e58fc7b30cf506d53b912b307d989 100644 --- a/pkg/src/utils/ctsql/ctsql_wsr.c +++ b/pkg/src/utils/ctsql/ctsql_wsr.c @@ -518,61 +518,68 @@ static inline int wsr_prepare(wsr_options_t *wsr_opts, const char *node_name) static int wsr_build_report(wsr_options_t *wsr_opts, wsr_info_t *wsr_info, char *node_name) { - CT_RETURN_IFERR(wsr_prepare(wsr_opts, node_name)); - CT_RETURN_IFERR(wsr_get_dbinfo(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_prepare(wsr_opts, node_name), "wsr_prepare failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_get_dbinfo(wsr_opts, wsr_info), "wsr_get_dbinfo failed\n"); if (wsr_opts->input_snap_id) { - CT_RETURN_IFERR(wsr_build_wsr_info_t(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_wsr_info_t(wsr_opts, wsr_info), "wsr_build_wsr_info_t failed\n"); } - CT_RETURN_IFERR(wsr_build_header(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_header(wsr_opts, wsr_info), "wsr_build_header failed\n"); if (wsr_opts->input_snap_id) { - CT_RETURN_IFERR(wsr_build_load_profile(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_efficiency(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_top_events(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_host_cpu(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_host_mem(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_load_profile(wsr_opts, wsr_info), "wsr_build_load_profile failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_efficiency(wsr_opts, wsr_info), "wsr_build_efficiency failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_top_events(wsr_opts, wsr_info), "wsr_build_top_events failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_host_cpu(wsr_opts, wsr_info), "wsr_build_host_cpu failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_host_mem(wsr_opts, wsr_info), "wsr_build_host_mem failed\n"); } - CT_RETURN_IFERR(wsr_build_instance_snap(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_instance_buffer(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_instance_snap(wsr_opts, wsr_info), "wsr_build_instance_snap failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_instance_buffer(wsr_opts, wsr_info), "wsr_build_instance_buffer failed\n"); if (wsr_opts->input_snap_id) { - CT_RETURN_IFERR(wsr_build_top_session(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_top_session(wsr_opts, wsr_info), "wsr_build_top_session failed\n"); } - CT_RETURN_IFERR(wsr_build_top_session_sql(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_top_session_trans(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_top_session_sql(wsr_opts, wsr_info), "wsr_build_top_session_sql failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_top_session_trans(wsr_opts, wsr_info), "wsr_bild_top_session_trans failed\n"); if (wsr_opts->input_snap_id) { - CT_RETURN_IFERR(wsr_build_top_session_cursors_start(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_top_session_cursors_end(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_top_session_cursors_start(wsr_opts, wsr_info), "wsr_build_top_session_cursors_start failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_top_session_cursors_end(wsr_opts, wsr_info), "wsr_build_top_session_cursors_end failed\n"); } if (wsr_opts->input_snap_id) { - CT_RETURN_IFERR(wsr_build_sql_elapsed(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_longsql_time(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_cpu_time(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_io_wait(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_sql_gets(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_sql_reads(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_sql_executions(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_sql_parses(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_6)); - CT_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_10)); - CT_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_15)); - CT_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_20)); - CT_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_30)); - CT_RETURN_IFERR(wsr_build_long_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_15)); - CT_RETURN_IFERR(wsr_build_long_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_30)); - } - CT_RETURN_IFERR(wsr_build_sql_content(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_elapsed(wsr_opts, wsr_info), "wsr_build_sql_elapsed failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_longsql_time(wsr_opts, wsr_info), "wsr_build_longsql_time failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_cpu_time(wsr_opts, wsr_info), "wsr_build_cpu_time failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_io_wait(wsr_opts, wsr_info), "wsr_build_io_wait failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_gets(wsr_opts, wsr_info), "wsr_build_sql_gets failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_reads(wsr_opts, wsr_info), "wsr_build_sql_reads failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_executions(wsr_opts, wsr_info), "wsr_build_sql_executions failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_parses(wsr_opts, wsr_info), "wsr_build_sql_parses failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_6), + "wsr_build_sql_first_letters failed, prefix num: %d\n", CT_WSR_SQL_PREFIX_6); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_10), + "wsr_build_sql_first_letters failed, prefix num: %d\n", CT_WSR_SQL_PREFIX_10); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_15), + "wsr_build_sql_first_letters failed, prefix num: %d\n", CT_WSR_SQL_PREFIX_15); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_20), + "wsr_build_sql_first_letters failed, prefix num: %d\n", CT_WSR_SQL_PREFIX_20); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_30), + "wsr_build_sql_first_letters failed, prefix num: %d\n", CT_WSR_SQL_PREFIX_30); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_long_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_15), + "wsr_build_long_sql_first_letters failed, prefix num: %d\n", CT_WSR_SQL_PREFIX_15); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_long_sql_first_letters(wsr_opts, wsr_info, CT_WSR_SQL_PREFIX_30), + "wsr_build_long_sql_first_letters failed, prefix num: %d\n", CT_WSR_SQL_PREFIX_30); + } + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_sql_content(wsr_opts, wsr_info), "wsr_build_sql_content failed\n"); if (wsr_opts->input_snap_id) { - CT_RETURN_IFERR(wsr_build_segment_stat(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_report_summary(wsr_opts, wsr_info)); - CT_RETURN_IFERR(wsr_build_parameter(wsr_opts, wsr_info)); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_segment_stat(wsr_opts, wsr_info), "wsr_build_segment_stat failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_report_summary(wsr_opts, wsr_info), "wsr_build_report_summary failed\n"); + CTSQL_PRINT_AND_RETURN_IFERR(wsr_build_parameter(wsr_opts, wsr_info), "wsr_build_parameter failed\n"); } return CTCONN_SUCCESS; } diff --git a/pkg/test/ct_regress/README.en.md b/pkg/test/ct_regress/README.en.md new file mode 100644 index 0000000000000000000000000000000000000000..f10418ee06063d4d41aa037bfe970882aca98ca8 --- /dev/null +++ b/pkg/test/ct_regress/README.en.md @@ -0,0 +1,18 @@ +## Problems Solved +1. openGauss_cantian ensures that the code submitted through the one-click compilation, deployment, and running example of the gatekeeper will not affect the basic functions. +2. Developers can ensure code quality by executing mtr. +## Usage +Run the test script: +```bash +bash xxx/pkg/test/ct_regress/do_all_test.sh need_compile +``` +Note: need_compile is used to trigger the compilation process. If the code has already been compiled, this parameter can be omitted. +Output: +Test Result: ERROR -- Test case execution failed. You can see which test cases failed at the top of the console. +Test Result: SUCCESS -- Test case execution succeeded. +## Adding or Removing Test Cases +Developers can add or remove test cases in the directory "xxx/pkg/test/ct_regress/ct_schedule_part1". For example: +To run multiple test cases in parallel: test: ct_union_all ct_union ct_datatype +To run a single test case: test: ct_having +## Results +The test results are saved in the directory "xxx/pkg/test/ct_regress/results", while the expected results are stored in the directory "xxx/pkg/test/ct_regress/expected". diff --git a/pkg/test/ct_regress/README.md b/pkg/test/ct_regress/README.md new file mode 100644 index 0000000000000000000000000000000000000000..30c0def37cac5ea01e7ab9155175c0d0116cfa47 --- /dev/null +++ b/pkg/test/ct_regress/README.md @@ -0,0 +1,23 @@ +## 解决的问题 +1、openGauss_cantian通过门禁一键编译部署运行样例能够保证提交合入代码不会影响基础功能 +2、开发者能够通过执行mtr保证代码质量 + +## 使用方法 +运行``测试``脚本 + +```bash +bash xxx/pkg/test/ct_regress/do_all_test.sh need_compile +``` +注 need_compile:执行编译,当已经编译过可不加此参数 +输出: +Test Result: ERROR --样例执行失败,出现执行失败,在控制台上方可以看到哪些样例失败 +Test Result: SUCCESS --样例执行成功 + + +## 增减样例 +开发者通过往"xxx/pkg/test/ct_regress/ct_schedule_part1"中增减样例,例如: +test: ct_union_all ct_union ct_datatype (多个样例并行执行) +test: ct_having (单次执行单个用例) + +## 结果 +样例执行结果保存在"xxx/pkg/test/ct_regress/results"中,预期结果存在"xxx/pkg/test/ct_regress/expected" diff --git a/pkg/test/ct_regress/ct_regress_main.c b/pkg/test/ct_regress/ct_regress_main.c index 8e4a29f873d175959e2e89772b970b1ebbe4f9cd..8f07024ea6a57b0d137be1e24c31db84d08daf51 100644 --- a/pkg/test/ct_regress/ct_regress_main.c +++ b/pkg/test/ct_regress/ct_regress_main.c @@ -1,8 +1,26 @@ -/*! - * This program is used for regressing test for Cantian SQL engine +/* ------------------------------------------------------------------------- + * This file is part of the Cantian project. + * Copyright (c) 2024 Huawei Technologies Co.,Ltd. * - * \author Created by pufuan p00421579 - * \date 2017/09/23 + * Cantian is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ct_regress_main.c + * + * + * IDENTIFICATION + * test/ct_regress/ct_regress_main.c + * + * ------------------------------------------------------------------------- */ #include #include @@ -987,7 +1005,7 @@ spawn_process(const char *cmdline) #endif } - +#ifdef WIN32 static long file_size(const char *file) { long r; @@ -1003,6 +1021,7 @@ static long file_size(const char *file) fclose(f); return r; } +#endif #ifndef WIFEXITED #define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0) diff --git a/pkg/test/ct_regress/ct_schedule_part1 b/pkg/test/ct_regress/ct_schedule_part1 index f86ad03337196a7e3c99b31fe143f01a64ae2e6e..959d915728f6ba4d261fed9afd70d013b7e4056e 100644 --- a/pkg/test/ct_regress/ct_schedule_part1 +++ b/pkg/test/ct_regress/ct_schedule_part1 @@ -4,7 +4,7 @@ test: ct_stat test: ct_withas ct_newpage ct_colsep_long test: ct_alter_session test: ct_expression ct_rownum -test: ct_decimal ct_number ct_number2 +test: ct_decimal ct_number test: ct_json_3 ct_json_4 ct_json_5 test: ct_jsonb_1 ct_jsonb_2 ct_jsonb_table test: ct_having diff --git a/pkg/test/ct_regress/do_all_test.sh b/pkg/test/ct_regress/do_all_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..11f3841876dd8e1906e9d0273f00db46a7e6ea84 --- /dev/null +++ b/pkg/test/ct_regress/do_all_test.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +DIR_PATH=$(cd `dirname $0`;pwd) +ROOT_PATH=$(cd ${DIR_PATH}/../../../;pwd) + +function do_regress() { + rm -rf ${ROOT_PATH}/../cantian_data/* + set +e + yum install lcov -y + export part_name=$1 + export local_build=1 + time bash ${ROOT_PATH}/CI/build/script/run_cantian_test.sh ${part_name} 2>&1 | tee ${ROOT_PATH}/regress_output/LLT_log_\${part_name}.txt + regress_start="========================= Run Regression =======================" + regress_end="********************* END: ct_regress *********************" + awk "/\${regress_start}/{flag=1; next} /\${regress_end}/{flag=0} flag" ${ROOT_PATH}/regress_output/LLT_log_\${part_name}.txt > ${ROOT_PATH}/regress_output/LLT_result_\${part_name}.txt + cat ${ROOT_PATH}/regress_output/LLT_result_\${part_name}.txt >> ${ROOT_PATH}/regress_output/LLT_result_all.txt + set -e +} + + +mkdir -p ${ROOT_PATH}/regress_output/ +rm -rf ${ROOT_PATH}/regress_output/* +mkdir -p ${ROOT_PATH}/lcov_output/ +rm -rf ${ROOT_PATH}/lcov_output/* +echo "" > ${ROOT_PATH}/regress_output/LLT_result_all.txt +if [ -n "$1" ]; then + # 如果存在,将其赋值给环境变量 pass_build + export pass_build="$1" + echo "pass_build is set to: $pass_build" +fi +regress_result="Test Result: ERROR"; +echo ${regress_result} > ${ROOT_PATH}/regress_output/test_result.txt +do_regress "part1" + +echo '=====================================' +echo '[+] all LLT test has completed!' +echo '[+] testcase results:' +echo ' - $regress_output/LLT_results_part1/*' +echo '[+] script outputs:' +echo ' - regress_output/LLT_log_part1.txt' +echo '[+] LLT results:' +echo ' - regress_output/LLT_result_part1.txt' +echo ' - regress_output/LLT_result_all.txt' +regress_result=$(tail -n 1 $ROOT_PATH/regress_output/test_result.txt) +echo ${regress_result} \ No newline at end of file diff --git a/pkg/test/unit_test/ut/CMakeLists.txt b/pkg/test/unit_test/ut/CMakeLists.txt index c68e69baaffdb626ea252d7bad602a8a2438819d..777480c81fa0f0125d2f870e919fb05e64597e7b 100644 --- a/pkg/test/unit_test/ut/CMakeLists.txt +++ b/pkg/test/unit_test/ut/CMakeLists.txt @@ -5,4 +5,5 @@ add_subdirectory(mes) add_subdirectory(cms) add_subdirectory(server) add_subdirectory(common) -add_subdirectory(ctbackup) \ No newline at end of file +add_subdirectory(ctbackup) +add_subdirectory(ctsql) \ No newline at end of file diff --git a/pkg/test/unit_test/ut/cms/cms_disk_lock_test.cpp b/pkg/test/unit_test/ut/cms/cms_disk_lock_test.cpp index 45e0c00768176dbd9d12156d4c4e69f270d100ae..cc5a38b83587dca56e015a8185becbed497f98e3 100644 --- a/pkg/test/unit_test/ut/cms/cms_disk_lock_test.cpp +++ b/pkg/test/unit_test/ut/cms/cms_disk_lock_test.cpp @@ -35,7 +35,8 @@ TEST_F(CmsDiskLockTest, lock_init_fail) { cms_disk_lock_t lock = {0}; int64 node_id = 0; - lock.flock = malloc(sizeof(cms_flock_t)); + lock.flock = (cms_flock_t*)malloc(sizeof(cms_flock_t)); + EXPECT_NE(lock.flock, nullptr); int ret = cms_disk_lock_init_file(&lock, "./fake", 0, node_id, false); EXPECT_EQ(ret, CT_SUCCESS); } \ No newline at end of file diff --git a/pkg/test/unit_test/ut/common/common_device_test.cpp b/pkg/test/unit_test/ut/common/common_device_test.cpp index e0c25f38ccd9c883f9519236cd082d5edfbc6b57..4d091fd3d96019db1b485d87064cac973f37a58c 100644 --- a/pkg/test/unit_test/ut/common/common_device_test.cpp +++ b/pkg/test/unit_test/ut/common/common_device_test.cpp @@ -31,6 +31,7 @@ TEST_F(CMDeviceTest, CreateDeviceTest) { MOCKER(cm_dbs_pg_create).stubs().will(returnValue(CT_SUCCESS)); status_t ret = cm_create_device("TEST", DEV_TYPE_PGPOOL, 0, NULL); + EXPECT_EQ(ret, CT_SUCCESS); GlobalMockObject::reset(); record_io_stat_print(); } \ No newline at end of file diff --git a/pkg/test/unit_test/ut/ctsql/CMakeLists.txt b/pkg/test/unit_test/ut/ctsql/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..10d2b4db11129960835c08ceb1857ac5d69727e8 --- /dev/null +++ b/pkg/test/unit_test/ut/ctsql/CMakeLists.txt @@ -0,0 +1,29 @@ +message(STATUS "build ctsql_ut...") + +include_directories(${CMAKE_SOURCE_DIR}/pkg/src/ctsql) +include_directories(${CMAKE_SOURCE_DIR}/pkg/src/common) +include_directories(${CMAKE_SOURCE_DIR}/pkg/src/kernel/include) +include_directories(${CMAKE_SOURCE_DIR}/pkg/src/cms/interface) +include_directories(${CMAKE_SOURCE_DIR}/pkg/test/unit_test/ut/ctsql/stub) + +set(SERVER_UT_SOURCE ${CMAKE_SOURCE_DIR}/pkg/test/unit_test/ut/ctsql/stub/stub_ctsql.cpp + ${CMAKE_SOURCE_DIR}/pkg/test/unit_test/ut/ctsql/test_expl_execute.cpp + ${CMAKE_SOURCE_DIR}/pkg/test/unit_test/ut/ctsql/test_ctsql_main.cpp) + +#add_compile_options(-fno-common) +add_compile_options(-Wall -fpermissive) + +#add_link_options(-Wl, -Bsymbolic) +add_link_options(-rdynamic) + +add_executable(ctsql_test ${SERVER_UT_SOURCE}) +set_target_properties(ctsql_test PROPERTIES LINKER_LANGUAGE "CXX") + +target_link_libraries(ctsql_test + -Wl,--start-group + gtest mockcpp gmock zetms zecms zeprotocol m rt dl z zstd lz4 + zecmssrc zeclient zerc zecommon zemes zefdsa zectc zecluster + zeserver zekernel zesql pthread z + -Wl,--end-group) +target_compile_definitions(ctsql_test PRIVATE -DCT_LIB_VERSION=${DD_CT_LIB_VERSION}) +target_compile_definitions(ctsql_test PRIVATE -DGETDBVERSION=${DD_GETDBVERSION}) diff --git a/pkg/test/unit_test/ut/ctsql/stub/stub_ctsql.cpp b/pkg/test/unit_test/ut/ctsql/stub/stub_ctsql.cpp new file mode 100644 index 0000000000000000000000000000000000000000..86c7bd35504a54e78c849e54682c724729cbbca9 --- /dev/null +++ b/pkg/test/unit_test/ut/ctsql/stub/stub_ctsql.cpp @@ -0,0 +1,152 @@ +#include "gtest/gtest.h" +#include + +#include +#include "stub_ctsql.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern instance_t *g_instance; + +#ifdef __cplusplus +} +#endif + +#define STUB_SYS_USER "SYS" +#define STUB_DEFAULT_USER "utdb" +#define STUB_CTDB_HOME (char *)"/tmp/cantian_ut" + +session_t *g_ut_session; +std::string g_ctdb_home; +std::string g_data_dir; + +status_t stub_init_runtime_env() +{ + g_ctdb_home = std::string(STUB_CTDB_HOME); + g_data_dir = g_ctdb_home + SEPERATOR + std::string("data"); + // todo : + // clear environment and make environment. + system("rm -rf /tmp/cantian_ut/data/*"); + system("mkdir -p /tmp/cantian_ut/data"); + // set.. + if (setenv(CMS_ENV_CMS_HOME, STUB_CTDB_HOME, 1) != 0) { + return CT_ERROR; + } + + if (setenv(CT_ENV_HOME, STUB_CTDB_HOME, 1) != 0) { + return CT_ERROR; + } + return CT_SUCCESS; +} + +status_t stub_alloc_new_session(session_t **session) +{ + session_t *tmp_session = NULL; + + agent_t *agent = (agent_t *)malloc(sizeof(agent_t)); + if (agent == NULL) { + CT_THROW_ERROR(ERR_ALLOC_MEMORY, (uint64)sizeof(agent_t), "replcia agent"); + return CT_ERROR; + } + (void)memset_s(agent, sizeof(agent_t), 0, sizeof(agent_t)); + status_t ret = srv_create_agent_private_area(agent); + if (ret != CT_SUCCESS) { + free(agent); + return ret; + } + + ret = srv_alloc_session(&tmp_session, NULL, SESSION_TYPE_USER); + if (ret != CT_SUCCESS) { + free(agent->area_buf); + free(agent); + return ret; + } + + srv_bind_sess_agent(tmp_session, agent); + *session = tmp_session; + + return CT_SUCCESS; +} + +status_t stub_srv_start_lsnr(void) +{ + g_instance->lsnr.tcp_service.type = LSNR_TYPE_SERVICE; + g_instance->lsnr.uds_service.type = LSNR_TYPE_UDS; + return CT_SUCCESS; +} + +status_t stub_instance_init() +{ + status_t ret = CT_ERROR; + if (g_ut_session != NULL) { + return CT_SUCCESS; + } + + ret = stub_init_runtime_env(); + if (ret != CT_SUCCESS) { + return ret; + } + + ret = srv_instance_startup(STARTUP_NOMOUNT, CT_FALSE, CT_FALSE, CT_FALSE); + if (ret != CT_SUCCESS) { + return ret; + } + cms_set_cluster_no_cms_switch(CT_TRUE); + + ret = stub_alloc_new_session(&g_ut_session); + if (ret != CT_SUCCESS) { + return ret; + } + + (void)sprintf_s(g_ut_session->curr_schema, CT_NAME_BUFFER_SIZE, STUB_SYS_USER); + (void)sprintf_s(g_ut_session->db_user, CT_NAME_BUFFER_SIZE, STUB_SYS_USER); + cm_str2text(g_ut_session->db_user, &g_ut_session->curr_user); + + return CT_SUCCESS; +} + +status_t stub_create_database() +{ + sql_stmt_t *stmt = nullptr; + text_t cmd_create_db = {0}; + std::string str_create_db = std::string("CREATE DATABASE CLUSTERED ") + std::string(STUB_DEFAULT_USER) + std::string(" ") + + std::string("CONTROLFILE ('") + g_data_dir + SEPERATOR + std::string("ctrl1', '") + g_data_dir + SEPERATOR + + std::string("ctrl2', '") + g_data_dir + SEPERATOR + std::string("ctrl3') ") + + std::string("system TABLESPACE DATAFILE '") + g_data_dir + SEPERATOR + std::string("system' SIZE 128M ") + + std::string("nologging TABLESPACE TEMPFILE '") + g_data_dir + SEPERATOR + std::string("temp2_01' SIZE 128M ") + + std::string("nologging undo TABLESPACE TEMPFILE '") + g_data_dir + SEPERATOR + std::string("temp2_undo' SIZE 128M ") + + std::string("default TABLESPACE DATAFILE '") + g_data_dir + SEPERATOR + std::string("user1' SIZE 16M ") + + std::string("instance node 0 ") + + std::string("undo TABLESPACE DATAFILE '") + g_data_dir + SEPERATOR + std::string("undo01' SIZE 128M ") + + std::string("temporary TABLESPACE TEMPFILE '") + g_data_dir + SEPERATOR + std::string("temp1_01' SIZE 16M ") + + std::string("nologging undo TABLESPACE TEMPFILE '") + g_data_dir + SEPERATOR + std::string("temp2_undo_01' SIZE 128M ") + + std::string("LOGFILE ('") + g_data_dir + SEPERATOR + std::string("log1' SIZE 256M, '") + g_data_dir + SEPERATOR + + std::string("log2' SIZE 256M, '") + g_data_dir + SEPERATOR + std::string("log3' SIZE 256M)"); + + knl_set_curr_sess2tls((void *)&g_ut_session->knl_session); + status_t ret = sql_alloc_stmt(g_ut_session, &stmt); + if (ret != CT_SUCCESS) { + return ret; + } + + text_t sql = {0}; + source_location_t loc = {1, 1}; + sql.str = (char *)str_create_db.c_str(); + sql.len = str_create_db.length(); + ret = sql_parse(stmt, &sql, &loc); + if (ret != CT_SUCCESS) { + sql_free_stmt(stmt); + return ret; + } + g_ut_session->current_stmt = stmt; + stmt->is_verifying = CT_TRUE; + + ret = sql_execute(stmt); + if (ret != CT_SUCCESS) { + sql_free_stmt(stmt); + return ret; + } + return CT_SUCCESS; +} diff --git a/pkg/test/unit_test/ut/ctsql/stub/stub_ctsql.h b/pkg/test/unit_test/ut/ctsql/stub/stub_ctsql.h new file mode 100644 index 0000000000000000000000000000000000000000..d5459d031b99e8f754fccf87bfd53235f5e00d44 --- /dev/null +++ b/pkg/test/unit_test/ut/ctsql/stub/stub_ctsql.h @@ -0,0 +1,22 @@ + +#ifndef __UT_STUB_CTSQL_H__ +#define __UT_STUB_CTSQL_H__ + +#include "knl_interface.h" +#include "cms_interface.h" +#include "cm_defs.h" +#include "db_defs.h" +#include "srv_session.h" +#include "srv_instance.h" +#include "ctsql_parser.h" + +#ifdef WIN32 +#define SEPERATOR "\\" +#else +#define SEPERATOR "/" +#endif + +status_t stub_instance_init(); +status_t stub_create_database(); + +#endif diff --git a/pkg/test/unit_test/ut/ctsql/test_ctsql_main.cpp b/pkg/test/unit_test/ut/ctsql/test_ctsql_main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6818cf5fc40493123d9c157f87306cf7396b62b3 --- /dev/null +++ b/pkg/test/unit_test/ut/ctsql/test_ctsql_main.cpp @@ -0,0 +1,32 @@ +#include "gtest/gtest.h" +#include + +#include "stub_ctsql.h" +#include "cm_defs.h" +#include "ctsql_parser.h" + +#ifdef __cplusplus +extern "C" { +#endif + +char *cantiand_get_dbversion() +{ + return "NONE"; +} + +#ifdef __cplusplus +} +#endif + +int main(int argc, char **argv) +{ + testing::InitGoogleTest(&argc, argv); + + status_t ret = stub_instance_init(); + EXPECT_EQ(ret, CT_SUCCESS); + + ret = stub_create_database(); + EXPECT_EQ(ret, CT_SUCCESS); + + return RUN_ALL_TESTS(); +} diff --git a/pkg/test/unit_test/ut/ctsql/test_expl_execute.cpp b/pkg/test/unit_test/ut/ctsql/test_expl_execute.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3f8a95475522f9f4e7c7ed0a1af8c2db28b29417 --- /dev/null +++ b/pkg/test/unit_test/ut/ctsql/test_expl_execute.cpp @@ -0,0 +1,29 @@ +#include "gtest/gtest.h" +#include + +#include + +#include "knl_interface.h" +#include "cm_defs.h" +#include "db_defs.h" +#include "srv_session.h" +#include "srv_instance.h" +#include "ctsql_parser.h" + +extern instance_t *g_instance; +extern session_t *g_ut_session; + +TEST(expl_test, test_explain_parse) +{ + text_t sql = {0}; + source_location_t loc = {1, 1}; + sql_stmt_t *stmt = nullptr; + + status_t ret = sql_alloc_stmt(g_ut_session, &stmt); + EXPECT_EQ(ret, CT_SUCCESS); + + sql.str = "explain select * from t1"; + sql.len = strlen(sql.str); + ret = sql_parse(stmt, &sql, &loc); + EXPECT_EQ(ret, CT_SUCCESS); +} diff --git a/pkg/test/unit_test/ut/message_queue/message_queue_test_main.cpp b/pkg/test/unit_test/ut/message_queue/message_queue_test_main.cpp index 23a28ad499948c5e710b0892462d531f7c3232ea..9ac368b7046c28d5ff59fa0f7e3e3b5251cca1dd 100644 --- a/pkg/test/unit_test/ut/message_queue/message_queue_test_main.cpp +++ b/pkg/test/unit_test/ut/message_queue/message_queue_test_main.cpp @@ -32,12 +32,12 @@ protected: TEST_F(TestShm, shm_get_shm_type_and_addr_should_return_ok_when_normal) { - char *tokens[] = {"mmap", "test1", "test2"}; + const char *tokens[] = {"mmap", "test1", "test2"}; struct key_map_s *key_map = (key_map_s *)malloc(sizeof(key_map_s)); - int *count; - int *is_count; + int count = 0; + int is_count = 0; MOCKER(strcpy_s).stubs().will(returnValue(0)); - int ret = shm_get_shm_type_and_addr(tokens, key_map, 0, 0, count, is_count); + int ret = shm_get_shm_type_and_addr(tokens, key_map, 0, 0, &count, &is_count); EXPECT_EQ(ret, -1); free(key_map); }