From 25207a5d21147dc2c57341448a1062a9eaf3f160 Mon Sep 17 00:00:00 2001 From: yangge Date: Mon, 21 Aug 2023 14:21:18 -0400 Subject: [PATCH] anolis: mm,compaction: Don't use ALLOC_CMA in long term gup path ANBZ: #7026 When user allocates massive amounts of pages using long term gup, __compaction_suitable() routine may return COMPACT_CONTINUE and enter direct reclaim path with high probability. As a result, the high direct reclaim latency makes memory allocation very slow. In the scenario that migration source pages are longterm pinned and these pages cannot be migrated to CMA regions, it is unresonable to return COMPACT_CONTINUE in __compaction_suitable() routine. The test setup is that 4 numa nodes in the hardware and 32G memory in each numa node. Launching a CSV guest with 64G memory which exceeds the memory size in one node costs about 500 to 1000 seconds. While with this patch, the launching time is reduced to 20 seconds. This patch simply removes ALLOC_CMA in __compaction_suitable() routine in long term gup path to avoid the issue. Signed-off-by: yangge Signed-off-by: Xin Jiang --- mm/compaction.c | 4 +++- mm/page_alloc.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index b0b52562a46d..e36b3e3f7b30 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2166,6 +2166,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, unsigned long wmark_target) { unsigned long watermark; + bool nocma; if (is_via_compact_memory(order)) return COMPACT_CONTINUE; @@ -2196,8 +2197,9 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); + nocma = !!(current->flags & PF_MEMALLOC_NOCMA); if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - ALLOC_CMA, wmark_target)) + nocma ? 0 : ALLOC_CMA, wmark_target)) return COMPACT_SKIPPED; return COMPACT_CONTINUE; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index becfc32f5243..5720d9671e4a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3528,6 +3528,7 @@ int __isolate_free_page(struct page *page, unsigned int order) unsigned long watermark; struct zone *zone; int mt; + bool nocma; BUG_ON(!PageBuddy(page)); @@ -3542,7 +3543,8 @@ int __isolate_free_page(struct page *page, unsigned int order) * exists. */ watermark = zone->_watermark[WMARK_MIN] + (1UL << order); - if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) + nocma = !!(current->flags & PF_MEMALLOC_NOCMA); + if (!zone_watermark_ok(zone, 0, watermark, 0, nocma ? 0 : ALLOC_CMA)) return 0; __mod_zone_freepage_state(zone, -(1UL << order), mt); -- Gitee