From 343837c53177c124c2019e08a4ada5cecf2e45a6 Mon Sep 17 00:00:00 2001 From: yangge Date: Wed, 12 Jul 2023 15:54:01 +0800 Subject: [PATCH] anolis: mm: release hugetlb pages to buddy allocator after migrate them ANBZ: #7040 alloc_contig_range() will fail if in-use HugeTLB pages are released to static HugeTLB pool instead of buddy allocator. Currently, in-use HugeTLB pages may be released to static HugeTLB pool after migration, and test_pages_isolated() will fail if the pages are not in buddy. So, we add function replace_free_huge_page() to release the HugeTLB pages in static HugeTLB pool to buddy allocator after migration. Signed-off-by: yangge Signed-off-by: Xin Jiang --- include/linux/hugetlb.h | 5 +++++ mm/hugetlb.c | 28 ++++++++++++++++++++++++++++ mm/migrate.c | 6 ++++-- 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 97fc88c892ea..4bfc818f583b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -740,6 +740,7 @@ static inline int hstate_index(struct hstate *h) extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); +extern void replace_free_huge_page(struct page *page); #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifndef arch_hugetlb_migration_supported @@ -994,6 +995,10 @@ static inline int dissolve_free_huge_pages(unsigned long start_pfn, return 0; } +static inline void replace_free_huge_page(struct page *page) +{ +} + static inline bool hugepage_migration_supported(struct hstate *h) { return false; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4f97fbc334ed..89116d939c40 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -43,6 +43,7 @@ #include #include "internal.h" #include "hugetlb_vmemmap.h" +#include int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; @@ -2514,6 +2515,33 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } +void replace_free_huge_page(struct page *page) +{ + struct hstate *h; + struct page *head; + + LIST_HEAD(isolate_list); + + spin_lock_irq(&hugetlb_lock); + if (PageHuge(page)) { + head = compound_head(page); + h = page_hstate(head); + } else { + spin_unlock_irq(&hugetlb_lock); + return; + } + spin_unlock_irq(&hugetlb_lock); + + if (hstate_is_gigantic(h)) + return; + + if (!page_count(head)) { + alloc_and_dissolve_huge_page(h, head, &isolate_list); + if (!list_empty(&isolate_list)) + putback_movable_pages(&isolate_list); + } +} + struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { diff --git a/mm/migrate.c b/mm/migrate.c index b16ea665bbbd..246f063e13bb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1470,9 +1470,11 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, out_unlock: unlock_page(hpage); out: - if (rc == MIGRATEPAGE_SUCCESS) + if (rc == MIGRATEPAGE_SUCCESS) { putback_active_hugepage(hpage); - else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS) + if (reason == MR_CONTIG_RANGE) + replace_free_huge_page(hpage); + } else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS) list_move_tail(&hpage->lru, ret); /* -- Gitee