From ef3bc96a4b525923c6109d5dd6928240dc3fea33 Mon Sep 17 00:00:00 2001 From: Zelin Deng Date: Thu, 18 Jul 2024 10:37:27 +0800 Subject: [PATCH] anolis: mm: Use MAX_ORDER_NR_PAGES to check page alignment for deferred mem init ANBZ: #9469 MAX_ORDER_NR_PAGES is fixed value even though hugetlb page is enabled, while pageblock_nr_pages is not as it can be either 9 or 10 depends on kernel config. In additional, when 'unaccepted memory' feature is backported, MAX_ORDER is assumed to be 10, however on current code base MAX_ORDER is 11. Hence correct relevant codes which used 'MAX_ORDER' to 'MAX_ORDER - 1' Signed-off-by: Zelin Deng --- mm/page_alloc.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 307f83f585a5..cc8f8c8413a1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1779,7 +1779,7 @@ void __free_pages_core(struct page *page, unsigned int order) atomic_long_add(nr_pages, &page_zone(page)->managed_pages); if (page_contains_unaccepted(page, order)) { - if (order == MAX_ORDER && __free_unaccepted(page)) + if (order == MAX_ORDER - 1 && __free_unaccepted(page)) return; accept_page(page, order); @@ -1930,10 +1930,11 @@ static void __init deferred_free_range(unsigned long pfn, page = pfn_to_page(pfn); /* Free a large naturally-aligned chunk if possible */ - if (nr_pages == pageblock_nr_pages && - (pfn & (pageblock_nr_pages - 1)) == 0) { - set_pageblock_migratetype(page, MIGRATE_MOVABLE); - __free_pages_core(page, pageblock_order); + if (nr_pages == MAX_ORDER_NR_PAGES && + (pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { + for (i = 0; i < nr_pages; i += pageblock_nr_pages) + set_pageblock_migratetype(page + i, MIGRATE_MOVABLE); + __free_pages_core(page, MAX_ORDER - 1); return; } @@ -1971,7 +1972,7 @@ static inline bool __init deferred_pfn_valid(unsigned long pfn) { if (!pfn_valid_within(pfn)) return false; - if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) + if (!(pfn & (MAX_ORDER_NR_PAGES - 1)) && !pfn_valid(pfn)) return false; return true; } @@ -1983,7 +1984,7 @@ static inline bool __init deferred_pfn_valid(unsigned long pfn) static void __init deferred_free_pages(unsigned long pfn, unsigned long end_pfn) { - unsigned long nr_pgmask = pageblock_nr_pages - 1; + unsigned long nr_pgmask = MAX_ORDER_NR_PAGES - 1; unsigned long nr_free = 0; for (; pfn < end_pfn; pfn++) { @@ -9634,9 +9635,9 @@ static bool try_to_accept_memory_one(struct zone *zone) __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); spin_unlock_irqrestore(&zone->lock, flags); - accept_page(page, MAX_ORDER); + accept_page(page, MAX_ORDER - 1); - __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); + __free_pages_ok(page, MAX_ORDER - 1, FPI_TO_TAIL); if (last) static_branch_dec(&zones_with_unaccepted_pages); -- Gitee