diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index 11841fdb3cbdb48d49b8230c7ce082cc31ec2f24..c7874de7a1c30edb9be2a33c254d507dc0667a94 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -33,6 +33,8 @@ alarm_component|string|0,0|NULL|NULL| alarm_report_interval|int|0,2147483647|NULL|NULL| allow_concurrent_tuple_update|bool|0,0|NULL|NULL| enable_huge_pages|bool|0,0|NULL|NULL| +enable_time_report|bool|0,0|NULL|NULL| +enable_batch_dispatch|bool|0,0|NULL|NULL| allow_create_sysobject|bool|0,0|NULL|NULL| allow_system_table_mods|bool|0,0|NULL|NULL| application_name|string|0,0|NULL|NULL| @@ -665,6 +667,8 @@ pagewriter_thread_num|int|1,16|NULL|NULL| audit_thread_num|int|1,48|NULL|NULL| dw_file_num|int|1,16|NULL|NULL| dw_file_size|int|32,256|NULL|NULL| +parallel_recovery_batch|int|1,100000|NULL|NULL| +parallel_recovery_timeout|int|1,1000|ms|NULL| incremental_checkpoint_timeout|int|1,3600|s|NULL| enable_incremental_checkpoint|bool|0,0|NULL|NULL| enable_double_write|bool|0,0|NULL|NULL| diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index f3692badfaccaf66b94f23c1fb62a64c2ac081c1..16a6b71744c6a1b2f0ee2f5ffb4abaa299a91c44 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -1221,6 +1221,31 @@ static void InitStorageConfigureNamesBool() NULL, NULL, NULL}, + + {{"enable_time_report", + PGC_POSTMASTER, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("Record process time in every stage of parallel reovery"), + NULL}, + &g_instance.attr.attr_storage.enable_time_report, + false, + NULL, + NULL, + NULL}, + + {{"enable_batch_dispatch", + PGC_POSTMASTER, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("Enable batch dispatch for parallel reovery"), + NULL}, + &g_instance.attr.attr_storage.enable_batch_dispatch, + true, + NULL, + NULL, + NULL}, + /* End-of-list marker */ {{NULL, (GucContext)0, @@ -3117,6 +3142,22 @@ static void InitStorageConfigureNamesInt() NULL, NULL, NULL}, + + {{"parallel_recovery_batch", + PGC_SIGHUP, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("Set the batch that starup thread hold in parallel recovery."), + NULL, + 0}, + &g_instance.attr.attr_storage.parallel_recovery_batch, + 1000, + 1, + 100000, + NULL, + NULL, + NULL}, + {{"incremental_checkpoint_timeout", PGC_SIGHUP, NODE_ALL, @@ -3707,6 +3748,20 @@ static void InitStorageConfigureNamesInt() check_ss_txnstatus_cache_size, NULL, NULL}, + {{"parallel_recovery_timeout", + PGC_SIGHUP, + NODE_SINGLENODE, + RESOURCES_RECOVERY, + gettext_noop("parallel recovery timeout."), + NULL, + GUC_UNIT_MS}, + &g_instance.attr.attr_storage.parallel_recovery_timeout, + 300, + 1, + 1000, + NULL, + NULL, + NULL}, /* End-of-list marker */ {{NULL, (GucContext)0, @@ -3855,6 +3910,7 @@ static void InitStorageConfigureNamesReal() NULL, NULL, NULL}, + /* End-of-list marker */ {{NULL, (GucContext)0, diff --git a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp index 42382170c72e6d83eb0d0c86d0f7e6902f7812a0..9a1daeb2b7b9af5dae7b7e66e99992d0f7a39afe 100755 --- a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp @@ -96,7 +96,6 @@ LogDispatcher *g_dispatcher = NULL; static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; -static const int32 MAX_PENDING_STANDBY = 1; static const int32 ITEM_QUQUE_SIZE_RATIO = 5; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ @@ -2139,7 +2138,7 @@ void redo_get_worker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, knl_parallel_redo_state state = g_instance.comm_cxt.predo_cxt.state; SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (state != REDO_IN_PROGRESS) { + if (state != REDO_IN_PROGRESS || !g_instance.attr.attr_storage.enable_time_report) { *realNum = 0; return; } diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp index 5a23e140d5dc2361a9327abe77e7c0488910ed56..7c5af05b58e076771bc9f108d188cf303ed60ac2 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp @@ -96,7 +96,6 @@ LogDispatcher *g_dispatcher = NULL; static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; -static const int32 MAX_PENDING_STANDBY = 1; static const int32 ITEM_QUQUE_SIZE_RATIO = 5; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ @@ -167,6 +166,7 @@ static bool DispatchUndoActionRecord(XLogReaderState *record, List *expectedTLIs static bool DispatchRollbackFinishRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime); static uint32 GetUndoSpaceWorkerId(int zid); static void HandleStartupProcInterruptsForParallelRedo(void); +static bool timeoutForDispatch(void); RedoWaitInfo redo_get_io_event(int32 event_id); @@ -384,16 +384,14 @@ static LogDispatcher *CreateDispatcher() SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); g_instance.comm_cxt.predo_cxt.state = REDO_STARTING_BEGIN; SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (OnHotStandBy()) - newDispatcher->pendingMax = MAX_PENDING_STANDBY; - else - newDispatcher->pendingMax = MAX_PENDING; /* one batch, one recorder */ + newDispatcher->totalCostTime = 0; newDispatcher->txnCostTime = 0; newDispatcher->pprCostTime = 0; newDispatcher->dispatchReadRecPtr = 0; newDispatcher->dispatchEndRecPtr = 0; newDispatcher->startupTimeCost = t_thrd.xlog_cxt.timeCost; + newDispatcher->full_sync_dispatch = !g_instance.attr.attr_storage.enable_batch_dispatch; return newDispatcher; } @@ -560,6 +558,22 @@ static bool RmgrGistRecordInfoValid(XLogReaderState *record, uint8 minInfo, uint return false; } +static bool timeoutForDispatch(void) +{ + int parallel_recovery_timeout = 0; + TimestampTz current_time = 0; + TimestampTz dispatch_limit_time = 0; + + current_time = GetCurrentTimestamp(); + + parallel_recovery_timeout = g_instance.attr.attr_storage.parallel_recovery_timeout; + dispatch_limit_time = TimestampTzPlusMilliseconds(g_dispatcher->lastDispatchTime, + parallel_recovery_timeout); + if(current_time >= dispatch_limit_time) + return true; + return false; +} + void CheckDispatchCount(XLogRecPtr lastCheckLsn) { uint64 maxCount = 0; @@ -592,6 +606,8 @@ void CheckDispatchCount(XLogRecPtr lastCheckLsn) } } + + /* Run from the dispatcher thread. */ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { @@ -600,6 +616,8 @@ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, Times uint32 indexid = (uint32)-1; uint32 rmid = XLogRecGetRmid(record); uint32 term = XLogRecGetTerm(record); + int dispatch_batch = 0; + if (term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { g_instance.comm_cxt.localinfo_cxt.term_from_xlog = term; } @@ -629,9 +647,11 @@ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, Times g_dispatcher->dispatchReadRecPtr = record->ReadRecPtr; g_dispatcher->dispatchEndRecPtr = record->EndRecPtr; + dispatch_batch = g_instance.attr.attr_storage.enable_batch_dispatch ? + g_instance.attr.attr_storage.parallel_recovery_batch : 1; if (isNeedFullSync) ProcessPendingRecords(true); - else if (++g_dispatcher->pendingCount >= g_dispatcher->pendingMax) + else if (++g_dispatcher->pendingCount >= dispatch_batch || timeoutForDispatch()) ProcessPendingRecords(); if (fatalerror == true) { @@ -1491,6 +1511,9 @@ static bool StandbyWillChangeStandbyState(XLogReaderState *record) /* : false not wait for other workers */ void ProcessPendingRecords(bool fullSync) { + if(fullSync) + g_dispatcher->full_sync_dispatch = true; + g_dispatcher->lastDispatchTime = GetCurrentTimestamp(); if ((get_real_recovery_parallelism() > 1) && (GetPageWorkerCount() > 0)) { for (uint32 i = 0; i < g_dispatcher->pageWorkerCount; i++) { uint64 blockcnt = 0; @@ -1516,6 +1539,8 @@ void ProcessPendingRecords(bool fullSync) ApplyReadyTxnLogRecords(g_dispatcher->txnWorker, fullSync); g_dispatcher->pendingCount = 0; } + if(fullSync) + g_dispatcher->full_sync_dispatch = false; } /* Run from the dispatcher thread. */ @@ -1524,7 +1549,10 @@ void ProcessPendingRecords(bool fullSync) void ProcessTrxnRecords(bool fullSync) { if ((get_real_recovery_parallelism() > 1) && (GetPageWorkerCount() > 0)) { - ApplyReadyTxnLogRecords(g_dispatcher->txnWorker, fullSync); + if (g_instance.attr.attr_storage.enable_batch_dispatch) + ProcessPendingRecords(fullSync); + else + ApplyReadyTxnLogRecords(g_dispatcher->txnWorker, fullSync); if (fullSync && (IsTxnWorkerIdle(g_dispatcher->txnWorker))) { /* notify pageworker sleep long time */ @@ -1980,7 +2008,7 @@ void redo_get_worker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, knl_parallel_redo_state state = g_instance.comm_cxt.predo_cxt.state; SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (state != REDO_IN_PROGRESS) { + if (state != REDO_IN_PROGRESS || !g_instance.attr.attr_storage.enable_time_report) { *realNum = 0; return; } @@ -2501,4 +2529,12 @@ static void HandleStartupProcInterruptsForParallelRedo(void) if (IsUnderPostmaster && !PostmasterIsAlive()) gs_thread_exit(1); } + +bool in_full_sync_dispatch(void) +{ + if (!g_dispatcher || !g_instance.attr.attr_storage.enable_batch_dispatch) + return true; + return g_dispatcher->full_sync_dispatch; +} + } diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp index c9b465f22109c39c00a32f72a4eb9f3cf635692a..de16d88d19dcdaa4766e6a814720d7b0fc972258 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp @@ -555,12 +555,61 @@ static void ApplyRecordWithoutSyncUndoLog(RedoItem *item) } } +/* + * If woker do a page vacuum redo, it should wait if it's operator + * may cause snapshot invalid. + */ +static void wait_valid_snapshot(XLogReaderState *record) +{ + RmgrId rm_id = XLogRecGetRmid(record); + uint8 info = (XLogRecGetInfo(record) & ~XLR_INFO_MASK) & XLOG_HEAP_OPMASK; + xl_heap_clean* xlrec = NULL; + uint64 blockcnt = 0; + XLogRecPtr cur_transed_lsn = InvalidXLogRecPtr; + + if(rm_id != RM_HEAP2_ID || info != XLOG_HEAP2_CLEAN) + return; + + xlrec = (xl_heap_clean*)XLogRecGetData(record); + + /* + * If xlrec->latestRemovedXid <= t_thrd.xact_cxt.ShmemVariableCache->standbyXmin then + * it will not incluence current snapshot, so it can exec redo. + */ + while(t_thrd.xact_cxt.ShmemVariableCache->standbyXmin < xlrec->latestRemovedXid && + !in_full_sync_dispatch()) { + if(cur_transed_lsn == InvalidXLogRecPtr) + cur_transed_lsn = getTransedTxnLsn(g_dispatcher->txnWorker); + /* + * Normaly, it need wait for startup thread handle xact wal records, but there be a case + * that if a very old xid commit and no new xact comes then xlrec->latestRemovedXid > + * t_thrd.xact_cxt.ShmemVariableCache->standbyXmin all the time. + * + * So if all xact record before current vacuum record finished, then avoid wait. + */ + if (cur_transed_lsn <= GetXLogReplayRecPtr(NULL)) + return; + pg_usleep(10); + blockcnt++; + if ((blockcnt & OUTPUT_WAIT_COUNT) == OUTPUT_WAIT_COUNT) { + XLogRecPtr LatestReplayedRecPtr = GetXLogReplayRecPtr(NULL); + ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[REDO_LOG_TRACE]wait_valid_snapshot:recordEndLsn:%lu, blockcnt:%lu, " + "Workerid:%u, LatestReplayedRecPtr:%lu", + record->EndRecPtr, blockcnt, g_redoWorker->id, LatestReplayedRecPtr))); + } + RedoInterruptCallBack(); + } +} + /* Run from the worker thread. */ static void ApplySinglePageRecord(RedoItem *item, bool replayUndo) { XLogReaderState *record = &item->record; long readbufcountbefore = u_sess->instr_cxt.pg_buffer_usage->local_blks_read; MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + + wait_valid_snapshot(record); ApplyRedoRecord(record); (void)MemoryContextSwitchTo(oldCtx); record->readblocks = u_sess->instr_cxt.pg_buffer_usage->local_blks_read - readbufcountbefore; diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp index 0c707c40fa09f6258b7a4bee9236e2f11cf961c7..141671aa18e136a79d8a2a63099f9eb1f1144fc0 100644 --- a/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp @@ -61,8 +61,15 @@ struct TxnRedoWorker { RedoItem *pendingTail; /* The tail of the RedoItem list. */ RedoItem *procHead; RedoItem *procTail; + XLogRecPtr dispatched_txn_lsn; + XLogRecPtr transed_txn_lsn; }; +XLogRecPtr getTransedTxnLsn(TxnRedoWorker *worker) +{ + return (XLogRecPtr)pg_atomic_read_u64((volatile uint64*)&worker->transed_txn_lsn); +} + TxnRedoWorker *StartTxnRedoWorker() { TxnRedoWorker *worker = (TxnRedoWorker *)palloc(sizeof(TxnRedoWorker)); @@ -71,6 +78,8 @@ TxnRedoWorker *StartTxnRedoWorker() worker->procHead = NULL; worker->procTail = NULL; + worker->dispatched_txn_lsn = 0; + worker->transed_txn_lsn = 0; return worker; } @@ -90,6 +99,7 @@ void AddTxnRedoItem(TxnRedoWorker *worker, RedoItem *item) * TxnRedoItems are never shared with other workers. * Simply use the next pointer for worker 0. */ + worker->dispatched_txn_lsn = item->record.EndRecPtr; if (worker->pendingHead == NULL) { worker->pendingHead = item; } else { @@ -210,6 +220,7 @@ void MoveTxnItemToApplyQueue(TxnRedoWorker *worker) worker->procTail = worker->pendingTail; worker->pendingHead = NULL; worker->pendingTail = NULL; + pg_atomic_write_u64(&worker->transed_txn_lsn, worker->dispatched_txn_lsn); } static RedoItem *ProcTxnItem(RedoItem *item) diff --git a/src/gausskernel/storage/ipc/procarray.cpp b/src/gausskernel/storage/ipc/procarray.cpp index 89da5a7dbcb7047a18a1b9ba777a596128981a4d..defe71e22a2fc808ff030de2bccd0cd1932dbe64 100755 --- a/src/gausskernel/storage/ipc/procarray.cpp +++ b/src/gausskernel/storage/ipc/procarray.cpp @@ -2161,7 +2161,8 @@ RETRY_GET: } else if (LWLockConditionalAcquire(ProcArrayLock, LW_EXCLUSIVE)) { if ((t_thrd.xact_cxt.ShmemVariableCache->standbyXmin <= t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXmin) && - (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn > redoEndLsn)) { + (t_thrd.xact_cxt.ShmemVariableCache->standbyRedoCleanupXminLsn > redoEndLsn) && + parallel_recovery::in_full_sync_dispatch()) { LWLockRelease(ProcArrayLock); retry_get = true; goto RETRY_GET; diff --git a/src/include/access/multi_redo_api.h b/src/include/access/multi_redo_api.h index 1471c37e63c88a1147f2456abcd0b8bd12037411..750e5b4518f49c7593eb5d45709d1ca13aee704b 100644 --- a/src/include/access/multi_redo_api.h +++ b/src/include/access/multi_redo_api.h @@ -127,18 +127,27 @@ void ResetXLogStatics(); static inline void GetRedoStartTime(RedoTimeCost &cost) { + if(!g_instance.attr.attr_storage.enable_time_report) + return; cost.startTime = GetCurrentTimestamp(); } static inline void CountRedoTime(RedoTimeCost &cost) { + if(!g_instance.attr.attr_storage.enable_time_report) + return; cost.totalDuration += GetCurrentTimestamp() - cost.startTime; cost.counter += 1; } static inline void CountAndGetRedoTime(RedoTimeCost &curCost, RedoTimeCost &nextCost) { - uint64 curTime = GetCurrentTimestamp(); + uint64 curTime = 0; + + if(!g_instance.attr.attr_storage.enable_time_report) + return; + + curTime = GetCurrentTimestamp(); curCost.totalDuration += curTime - curCost.startTime; curCost.counter += 1; nextCost.startTime = curTime; diff --git a/src/include/access/parallel_recovery/dispatcher.h b/src/include/access/parallel_recovery/dispatcher.h index cad5d9481aef597045da4e017da3957049ec7d8c..047c36002e05b9fca36af18b02b63fe33f860bf3 100644 --- a/src/include/access/parallel_recovery/dispatcher.h +++ b/src/include/access/parallel_recovery/dispatcher.h @@ -64,6 +64,7 @@ typedef struct LogDispatcher { uint64 pprCostTime; uint32 maxItemNum; uint32 curItemNum; + TimestampTz lastDispatchTime; /* last time we dispatch record list to works */ uint32* chosedWorkerIds; uint32 chosedWorkerCount; @@ -75,6 +76,7 @@ typedef struct LogDispatcher { XLogRedoNumStatics xlogStatics[RM_NEXT_ID][MAX_XLOG_INFO_NUM]; RedoTimeCost *startupTimeCost; DispatchFix dispatchFix; + bool full_sync_dispatch; } LogDispatcher; extern LogDispatcher* g_dispatcher; @@ -140,7 +142,7 @@ extern void CopyDataFromOldReader(XLogReaderState *newReaderState, XLogReaderSta bool TxnQueueIsEmpty(TxnRedoWorker* worker); void redo_get_worker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum); - +bool in_full_sync_dispatch(void); } #endif diff --git a/src/include/access/parallel_recovery/txn_redo.h b/src/include/access/parallel_recovery/txn_redo.h index bfbd04b1ba3410500e7b64a041ef8f210d672d6f..6c1cb3fe41fbbc7dc8db53b0a3be60208e3990c8 100644 --- a/src/include/access/parallel_recovery/txn_redo.h +++ b/src/include/access/parallel_recovery/txn_redo.h @@ -38,5 +38,6 @@ void ApplyReadyTxnLogRecords(TxnRedoWorker* worker, bool forceAll); void MoveTxnItemToApplyQueue(TxnRedoWorker* worker); void DumpTxnWorker(TxnRedoWorker* txnWorker); bool IsTxnWorkerIdle(TxnRedoWorker* worker); +XLogRecPtr getTransedTxnLsn(TxnRedoWorker *worker); } #endif diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h index cbb8ec287a2b0e606062fdb8527e6c8fcb27b50d..7161e8d1152ba728fe5a21ec00b5b3fa0eb21e1b 100755 --- a/src/include/knl/knl_guc/knl_instance_attr_storage.h +++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h @@ -213,6 +213,10 @@ typedef struct knl_instance_attr_storage { #endif bool enable_huge_pages; int huge_page_size; + bool enable_time_report; + bool enable_batch_dispatch; + int parallel_recovery_timeout; + int parallel_recovery_batch; bool enable_ss_dorado; } knl_instance_attr_storage; diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index 991902d047068f1e6053bde310ed2e676ffa9c7a..9c4c59000dd0a731889c83e4347565bc7f3cf8e1 100644 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -225,6 +225,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c enable_auto_clean_unique_sql | bool | | | enable_auto_explain | bool | | | enable_availablezone | bool | | | + enable_batch_dispatch | bool | | | enable_bbox_dump | bool | | | enable_beta_features | bool | | | enable_beta_opfusion | bool | | | @@ -331,6 +332,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c enable_tde | bool | | | enable_thread_pool | bool | | | enable_tidscan | bool | | | + enable_time_report | bool | | | enable_union_all_subquery_orderby | bool | | | enable_upgrade_merge_lock_mode | bool | | | enable_user_metric_persistent | bool | | | @@ -513,6 +515,8 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c opfusion_debug_mode | enum | | | pagewriter_sleep | integer | ms | 0 | 3600000 pagewriter_thread_num | integer | | 1 | 16 + parallel_recovery_batch | integer | | 1 | 100000 + parallel_recovery_timeout | integer | ms | 1 | 1000 partition_iterator_elimination | bool | | | partition_lock_upgrade_timeout | integer | | -1 | 3000 partition_max_cache_size | integer | kB | 4096 | 1073741823