From 052fa2fda50b52270e2906f6b3fabdd111ec3865 Mon Sep 17 00:00:00 2001 From: movead Date: Tue, 22 Aug 2023 20:15:14 +0800 Subject: [PATCH] merge redo performance patch from master to 2.0 --- src/bin/gs_guc/cluster_guc.conf | 4 ++ .../backend/utils/misc/guc/guc_storage.cpp | 56 +++++++++++++++++++ .../access/transam/extreme_rto/dispatcher.cpp | 3 +- .../transam/parallel_recovery/dispatcher.cpp | 53 +++++++++++++++--- .../transam/parallel_recovery/page_redo.cpp | 52 +++++++++++++++++ .../transam/parallel_recovery/txn_redo.cpp | 18 ++++++ src/gausskernel/storage/ipc/procarray.cpp | 3 +- src/include/access/multi_redo_api.h | 11 +++- .../access/parallel_recovery/dispatcher.h | 3 + .../access/parallel_recovery/txn_redo.h | 2 + .../knl/knl_guc/knl_instance_attr_storage.h | 4 ++ .../regress/output/recovery_2pc_tools.source | 4 ++ 12 files changed, 201 insertions(+), 12 deletions(-) diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 1e1c43a9d9..1321d31dac 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -33,6 +33,8 @@ alarm_component|string|0,0|NULL|NULL| alarm_report_interval|int|0,2147483647|NULL|NULL| allow_concurrent_tuple_update|bool|0,0|NULL|NULL| enable_huge_pages|bool|0,0|NULL|NULL| +parallel_recovery_cost_record|bool|0,0|NULL|NULL| +enable_batch_dispatch|bool|0,0|NULL|NULL| allow_create_sysobject|bool|0,0|NULL|NULL| allow_system_table_mods|bool|0,0|NULL|NULL| application_name|string|0,0|NULL|NULL| @@ -646,6 +648,8 @@ pagewriter_thread_num|int|1,16|NULL|NULL| audit_thread_num|int|1,48|NULL|NULL| dw_file_num|int|1,16|NULL|NULL| dw_file_size|int|32,256|NULL|NULL| +parallel_recovery_batch|int|1,100000|NULL|NULL| +parallel_recovery_timeout|int|1,1000|ms|NULL| incremental_checkpoint_timeout|int|1,3600|s|NULL| enable_incremental_checkpoint|bool|0,0|NULL|NULL| enable_double_write|bool|0,0|NULL|NULL| diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index 602328ef0a..71896a8b7c 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -1024,6 +1024,31 @@ static void InitStorageConfigureNamesBool() NULL, NULL, NULL}, + + {{"parallel_recovery_cost_record", + PGC_POSTMASTER, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("Record process time in every stage of parallel reovery"), + NULL}, + &g_instance.attr.attr_storage.parallel_recovery_cost_record, + false, + NULL, + NULL, + NULL}, + + {{"enable_batch_dispatch", + PGC_POSTMASTER, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("Enable batch dispatch for parallel reovery"), + NULL}, + &g_instance.attr.attr_storage.enable_batch_dispatch, + true, + NULL, + NULL, + NULL}, + /* End-of-list marker */ {{NULL, (GucContext)0, @@ -2892,6 +2917,22 @@ static void InitStorageConfigureNamesInt() NULL, NULL, NULL}, + + {{"parallel_recovery_batch", + PGC_SIGHUP, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("Set the batch that starup thread hold in parallel recovery."), + NULL, + 0}, + &g_instance.attr.attr_storage.parallel_recovery_batch, + 1000, + 1, + 100000, + NULL, + NULL, + NULL}, + {{"incremental_checkpoint_timeout", PGC_SIGHUP, NODE_ALL, @@ -3300,6 +3341,20 @@ static void InitStorageConfigureNamesInt() NULL, NULL, NULL}, + {{"parallel_recovery_timeout", + PGC_SIGHUP, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("parallel recovery timeout."), + NULL, + GUC_UNIT_MS}, + &g_instance.attr.attr_storage.parallel_recovery_timeout, + 300, + 1, + 1000, + NULL, + NULL, + NULL}, /* End-of-list marker */ {{NULL, (GucContext)0, @@ -3546,6 +3601,7 @@ static void InitStorageConfigureNamesInt64() NULL, NULL, NULL}, + /* End-of-list marker */ {{NULL, (GucContext)0, diff --git a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp index 4cf7168e50..3c4e255886 100755 --- a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp @@ -96,7 +96,6 @@ LogDispatcher *g_dispatcher = NULL; static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; -static const int32 MAX_PENDING_STANDBY = 1; static const int32 ITEM_QUQUE_SIZE_RATIO = 5; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ @@ -2040,7 +2039,7 @@ void redo_get_wroker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, knl_parallel_redo_state state = g_instance.comm_cxt.predo_cxt.state; SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (state != REDO_IN_PROGRESS) { + if (state != REDO_IN_PROGRESS || !g_instance.attr.attr_storage.parallel_recovery_cost_record) { *realNum = 0; return; } diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp index bea539fbe3..2639ec997f 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp @@ -96,7 +96,6 @@ LogDispatcher *g_dispatcher = NULL; static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; -static const int32 MAX_PENDING_STANDBY = 1; static const int32 ITEM_QUQUE_SIZE_RATIO = 5; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ @@ -166,6 +165,7 @@ static bool DispatchUndoActionRecord(XLogReaderState *record, List *expectedTLIs static bool DispatchRollbackFinishRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime); static uint32 GetUndoSpaceWorkerId(int zid); static void HandleStartupProcInterruptsForParallelRedo(void); +static bool timeoutForDispatch(void); RedoWaitInfo redo_get_io_event(int32 event_id); @@ -381,16 +381,14 @@ static LogDispatcher *CreateDispatcher() SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); g_instance.comm_cxt.predo_cxt.state = REDO_STARTING_BEGIN; SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (OnHotStandBy()) - newDispatcher->pendingMax = MAX_PENDING_STANDBY; - else - newDispatcher->pendingMax = MAX_PENDING; /* one batch, one recorder */ + newDispatcher->totalCostTime = 0; newDispatcher->txnCostTime = 0; newDispatcher->pprCostTime = 0; newDispatcher->dispatchReadRecPtr = 0; newDispatcher->dispatchEndRecPtr = 0; newDispatcher->startupTimeCost = t_thrd.xlog_cxt.timeCost; + newDispatcher->full_sync_dispatch = !g_instance.attr.attr_storage.enable_batch_dispatch; return newDispatcher; } @@ -557,6 +555,22 @@ static bool RmgrGistRecordInfoValid(XLogReaderState *record, uint8 minInfo, uint return false; } +static bool timeoutForDispatch(void) +{ + int parallel_recovery_timeout = 0; + TimestampTz current_time = 0; + TimestampTz dispatch_limit_time = 0; + + current_time = GetCurrentTimestamp(); + + parallel_recovery_timeout = g_instance.attr.attr_storage.parallel_recovery_timeout; + dispatch_limit_time = TimestampTzPlusMilliseconds(g_dispatcher->lastDispatchTime, + parallel_recovery_timeout); + if(current_time >= dispatch_limit_time) + return true; + return false; +} + void CheckDispatchCount(XLogRecPtr lastCheckLsn) { uint64 maxCount = 0; @@ -589,6 +603,8 @@ void CheckDispatchCount(XLogRecPtr lastCheckLsn) } } + + /* Run from the dispatcher thread. */ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { @@ -597,6 +613,8 @@ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, Times uint32 indexid = (uint32)-1; uint32 rmid = XLogRecGetRmid(record); uint32 term = XLogRecGetTerm(record); + int dispatch_batch = 0; + if (term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { g_instance.comm_cxt.localinfo_cxt.term_from_xlog = term; } @@ -626,9 +644,11 @@ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, Times g_dispatcher->dispatchReadRecPtr = record->ReadRecPtr; g_dispatcher->dispatchEndRecPtr = record->EndRecPtr; + dispatch_batch = g_instance.attr.attr_storage.enable_batch_dispatch ? + g_instance.attr.attr_storage.parallel_recovery_batch : 1; if (isNeedFullSync) ProcessPendingRecords(true); - else if (++g_dispatcher->pendingCount >= g_dispatcher->pendingMax) + else if (++g_dispatcher->pendingCount >= dispatch_batch || timeoutForDispatch()) ProcessPendingRecords(); if (fatalerror == true) { @@ -1463,6 +1483,10 @@ void ProcessPendingRecords(bool fullSync) // consider a simple transaction, at least 4 xlog, a heap insert ,a btree insert , a committing and a commit uint64 redoTxnMask = 0x3; static uint64 redoTxnCount = 0; + + if(fullSync) + g_dispatcher->full_sync_dispatch = true; + g_dispatcher->lastDispatchTime = GetCurrentTimestamp(); for (uint32 i = 0; i < g_dispatcher->pageWorkerCount; i++) { uint64 blockcnt = 0; pgstat_report_waitevent(WAIT_EVENT_PREDO_PROCESS_PENDING); @@ -1490,6 +1514,8 @@ void ProcessPendingRecords(bool fullSync) } g_dispatcher->pendingCount = 0; + if(fullSync) + g_dispatcher->full_sync_dispatch = false; } /* Run from the dispatcher thread. */ @@ -1498,7 +1524,10 @@ void ProcessPendingRecords(bool fullSync) void ProcessTrxnRecords(bool fullSync) { if ((get_real_recovery_parallelism() > 1) && (GetPageWorkerCount() > 0)) { - ApplyReadyTxnLogRecords(g_dispatcher->txnWorker, fullSync); + if (g_instance.attr.attr_storage.enable_batch_dispatch) + ProcessPendingRecords(fullSync); + else + ApplyReadyTxnLogRecords(g_dispatcher->txnWorker, fullSync); if (fullSync && (IsTxnWorkerIdle(g_dispatcher->txnWorker))) { /* notify pageworker sleep long time */ @@ -1952,7 +1981,7 @@ void redo_get_wroker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, knl_parallel_redo_state state = g_instance.comm_cxt.predo_cxt.state; SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (state != REDO_IN_PROGRESS) { + if (state != REDO_IN_PROGRESS || !g_instance.attr.attr_storage.parallel_recovery_cost_record) { *realNum = 0; return; } @@ -2468,4 +2497,12 @@ static void HandleStartupProcInterruptsForParallelRedo(void) if (IsUnderPostmaster && !PostmasterIsAlive()) gs_thread_exit(1); } + +bool in_full_sync_dispatch(void) +{ + if (!g_dispatcher || !g_instance.attr.attr_storage.enable_batch_dispatch) + return true; + return g_dispatcher->full_sync_dispatch; +} + } diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp index e71724552d..60feeb9789 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp @@ -578,12 +578,64 @@ static void ApplyRecordWithoutSyncUndoLog(RedoItem *item) } } +/* + * If woker do a page vacuum redo, it should wait if it's operator + * may cause snapshot invalid. + */ +static void wait_valid_snapshot(XLogReaderState *record) +{ + RmgrId rm_id = XLogRecGetRmid(record); + uint8 info = (XLogRecGetInfo(record) & ~XLR_INFO_MASK) & XLOG_HEAP_OPMASK; + xl_heap_clean* xlrec = NULL; + uint64 blockcnt = 0; + XLogRecPtr cur_transed_lsn = InvalidXLogRecPtr; + XLogRecPtr txn_trying_lsn = InvalidXLogRecPtr; + + if(rm_id != RM_HEAP2_ID || info != XLOG_HEAP2_CLEAN) + return; + + xlrec = (xl_heap_clean*)XLogRecGetData(record); + + /* + * If xlrec->latestRemovedXid <= t_thrd.xact_cxt.ShmemVariableCache->standbyXmin then + * it will not incluence current snapshot, so it can exec redo. + */ + while(t_thrd.xact_cxt.ShmemVariableCache->standbyXmin < xlrec->latestRemovedXid && + !in_full_sync_dispatch()) { + if(cur_transed_lsn == InvalidXLogRecPtr) + cur_transed_lsn = getTransedTxnLsn(g_dispatcher->txnWorker); + txn_trying_lsn = getTryingTxnLsn(g_dispatcher->txnWorker); + /* + * Normaly, it need wait for startup thread handle xact wal records, but there be a case + * that if a very old xid commit and no new xact comes then xlrec->latestRemovedXid > + * t_thrd.xact_cxt.ShmemVariableCache->standbyXmin all the time. + * + * So if we do not have new xact work in startup thread, it avoid wait. + * And if startup go fast then here on lsn, it can avoid wait too. + */ + if (cur_transed_lsn <= GetXLogReplayRecPtr(NULL) || txn_trying_lsn >= record->EndRecPtr) + return; + + blockcnt++; + if ((blockcnt & OUTPUT_WAIT_COUNT) == OUTPUT_WAIT_COUNT) { + XLogRecPtr LatestReplayedRecPtr = GetXLogReplayRecPtr(NULL); + ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[REDO_LOG_TRACE]wait_valid_snapshot:recordEndLsn:%lu, blockcnt:%lu, " + "Workerid:%u, LatestReplayedRecPtr:%lu", + record->EndRecPtr, blockcnt, g_redoWorker->id, LatestReplayedRecPtr))); + } + RedoInterruptCallBack(); + } +} + /* Run from the worker thread. */ static void ApplySinglePageRecord(RedoItem *item, bool replayUndo) { XLogReaderState *record = &item->record; long readbufcountbefore = u_sess->instr_cxt.pg_buffer_usage->local_blks_read; MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + + wait_valid_snapshot(record); ApplyRedoRecord(record); (void)MemoryContextSwitchTo(oldCtx); record->readblocks = u_sess->instr_cxt.pg_buffer_usage->local_blks_read - readbufcountbefore; diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp index 0c707c40fa..0079b4a61b 100644 --- a/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp @@ -61,8 +61,21 @@ struct TxnRedoWorker { RedoItem *pendingTail; /* The tail of the RedoItem list. */ RedoItem *procHead; RedoItem *procTail; + XLogRecPtr dispatched_txn_lsn; /* Max lsn dispatched to txn worker*/ + XLogRecPtr transed_txn_lsn; /* Max lsn transfer to txn worker list*/ + XLogRecPtr txn_trying_lsn; /* EndPtr of trying record on txn worker*/ }; +XLogRecPtr getTransedTxnLsn(TxnRedoWorker *worker) +{ + return (XLogRecPtr)pg_atomic_read_u64((volatile uint64*)&worker->transed_txn_lsn); +} + +XLogRecPtr getTryingTxnLsn(TxnRedoWorker *worker) +{ + return (XLogRecPtr)pg_atomic_read_u64((volatile uint64*)&worker->txn_trying_lsn); +} + TxnRedoWorker *StartTxnRedoWorker() { TxnRedoWorker *worker = (TxnRedoWorker *)palloc(sizeof(TxnRedoWorker)); @@ -71,6 +84,8 @@ TxnRedoWorker *StartTxnRedoWorker() worker->procHead = NULL; worker->procTail = NULL; + worker->dispatched_txn_lsn = 0; + worker->transed_txn_lsn = 0; return worker; } @@ -90,6 +105,7 @@ void AddTxnRedoItem(TxnRedoWorker *worker, RedoItem *item) * TxnRedoItems are never shared with other workers. * Simply use the next pointer for worker 0. */ + worker->dispatched_txn_lsn = item->record.EndRecPtr; if (worker->pendingHead == NULL) { worker->pendingHead = item; } else { @@ -210,6 +226,7 @@ void MoveTxnItemToApplyQueue(TxnRedoWorker *worker) worker->procTail = worker->pendingTail; worker->pendingHead = NULL; worker->pendingTail = NULL; + pg_atomic_write_u64(&worker->transed_txn_lsn, worker->dispatched_txn_lsn); } static RedoItem *ProcTxnItem(RedoItem *item) @@ -256,6 +273,7 @@ void ApplyReadyTxnLogRecords(TxnRedoWorker *worker, bool forceAll) XLogReaderState *record = &item->record; XLogRecPtr lrEnd; + pg_atomic_write_u64(&worker->txn_trying_lsn, record->EndRecPtr); if (forceAll) { GetRedoStartTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_6]); XLogRecPtr lrRead; /* lastReplayedReadPtr */ diff --git a/src/gausskernel/storage/ipc/procarray.cpp b/src/gausskernel/storage/ipc/procarray.cpp index 935a4bf76e..af01874b5c 100755 --- a/src/gausskernel/storage/ipc/procarray.cpp +++ b/src/gausskernel/storage/ipc/procarray.cpp @@ -2136,7 +2136,8 @@ RETRY_GET: } else if (LWLockConditionalAcquire(ProcArrayLock, LW_EXCLUSIVE)) { if ((t_thrd.xact_cxt.ShmemVariableCache->standbyXmin <= t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin) && - (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn > redoEndLsn)) { + (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn > redoEndLsn) && + parallel_recovery::in_full_sync_dispatch()) { LWLockRelease(ProcArrayLock); retry_get = true; goto RETRY_GET; diff --git a/src/include/access/multi_redo_api.h b/src/include/access/multi_redo_api.h index 9f9da40fc1..1d8c504135 100644 --- a/src/include/access/multi_redo_api.h +++ b/src/include/access/multi_redo_api.h @@ -124,18 +124,27 @@ void ResetXLogStatics(); static inline void GetRedoStartTime(RedoTimeCost &cost) { + if(!g_instance.attr.attr_storage.parallel_recovery_cost_record) + return; cost.startTime = GetCurrentTimestamp(); } static inline void CountRedoTime(RedoTimeCost &cost) { + if(!g_instance.attr.attr_storage.parallel_recovery_cost_record) + return; cost.totalDuration += GetCurrentTimestamp() - cost.startTime; cost.counter += 1; } static inline void CountAndGetRedoTime(RedoTimeCost &curCost, RedoTimeCost &nextCost) { - uint64 curTime = GetCurrentTimestamp(); + uint64 curTime = 0; + + if(!g_instance.attr.attr_storage.parallel_recovery_cost_record) + return; + + curTime = GetCurrentTimestamp(); curCost.totalDuration += curTime - curCost.startTime; curCost.counter += 1; nextCost.startTime = curTime; diff --git a/src/include/access/parallel_recovery/dispatcher.h b/src/include/access/parallel_recovery/dispatcher.h index 45474bcf69..03d9fc9b6c 100644 --- a/src/include/access/parallel_recovery/dispatcher.h +++ b/src/include/access/parallel_recovery/dispatcher.h @@ -64,6 +64,7 @@ typedef struct LogDispatcher { uint64 pprCostTime; uint32 maxItemNum; uint32 curItemNum; + TimestampTz lastDispatchTime; /* last time we dispatch record list to works */ uint32* chosedWorkerIds; uint32 chosedWorkerCount; @@ -75,6 +76,7 @@ typedef struct LogDispatcher { XLogRedoNumStatics xlogStatics[RM_NEXT_ID][MAX_XLOG_INFO_NUM]; RedoTimeCost *startupTimeCost; DispatchFix dispatchFix; + bool full_sync_dispatch; } LogDispatcher; extern LogDispatcher* g_dispatcher; @@ -140,6 +142,7 @@ extern void CopyDataFromOldReader(XLogReaderState *newReaderState, XLogReaderSta bool TxnQueueIsEmpty(TxnRedoWorker* worker); void redo_get_wroker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum); +bool in_full_sync_dispatch(void); } diff --git a/src/include/access/parallel_recovery/txn_redo.h b/src/include/access/parallel_recovery/txn_redo.h index bfbd04b1ba..3b758cea61 100644 --- a/src/include/access/parallel_recovery/txn_redo.h +++ b/src/include/access/parallel_recovery/txn_redo.h @@ -38,5 +38,7 @@ void ApplyReadyTxnLogRecords(TxnRedoWorker* worker, bool forceAll); void MoveTxnItemToApplyQueue(TxnRedoWorker* worker); void DumpTxnWorker(TxnRedoWorker* txnWorker); bool IsTxnWorkerIdle(TxnRedoWorker* worker); +XLogRecPtr getTransedTxnLsn(TxnRedoWorker *worker); +XLogRecPtr getTryingTxnLsn(TxnRedoWorker *worker); } #endif diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h index cf5287235b..97fc06e557 100755 --- a/src/include/knl/knl_guc/knl_instance_attr_storage.h +++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h @@ -161,6 +161,10 @@ typedef struct knl_instance_attr_storage { int max_active_gtt; bool enable_huge_pages; int huge_page_size; + bool parallel_recovery_cost_record; + bool enable_batch_dispatch; + int parallel_recovery_timeout; + int parallel_recovery_batch; } knl_instance_attr_storage; #endif /* SRC_INCLUDE_KNL_KNL_INSTANCE_ATTR_STORAGE_H_ */ diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index b697766fd7..e52d790021 100755 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -216,6 +216,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c enable_auto_clean_unique_sql | bool | | | enable_auto_explain | bool | | | enable_availablezone | bool | | | + enable_batch_dispatch | bool | | | enable_bbox_dump | bool | | | enable_beta_features | bool | | | enable_beta_opfusion | bool | | | @@ -478,6 +479,9 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c opfusion_debug_mode | enum | | | pagewriter_sleep | integer | ms | 0 | 3600000 pagewriter_thread_num | integer | | 1 | 16 + parallel_recovery_batch | integer | | 1 | 100000 + parallel_recovery_cost_record | bool | | | + parallel_recovery_timeout | integer | ms | 1 | 1000 partition_lock_upgrade_timeout | integer | | -1 | 3000 partition_max_cache_size | integer | kB | 4096 | 1073741823 partition_mem_batch | integer | | 1 | 65535 -- Gitee