From d14a814ba89c988562895300e75b75c99c7552c5 Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Tue, 2 Sep 2025 09:00:42 +0800 Subject: [PATCH 1/6] anolis: x86/csv: Allocate memory cyclically from different csv_cma ANBZ: #24405 Currently, when starting a csv3 virtual machine, the function csv_alloc_from_contiguous() is called to allocate memory, starting from csv_cma[0], then csv_cma[1], and so on in sequence. When multiple csv3 virtual machines are started concurrently, this easily causes contention on the same csv_cma, which affects performance. It is necessary to adjust the logic so that each time the csv_alloc_from_contiguous() function is called for memory allocation, it allocates from the csv_cma that follows the one used in the previous allocation. This adjustment can significantly improve performance. The specific comparison data is as follows: Average memory allocation time (when starting 100 4G csv3 virtual machines concurrently) before 30s after 10s Signed-off-by: Ge Yang --- arch/x86/mm/csv.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c index 288afb372f7d..3a03534dfd8f 100644 --- a/arch/x86/mm/csv.c +++ b/arch/x86/mm/csv.c @@ -95,6 +95,7 @@ struct csv_cma { struct cma_array { unsigned long count; + unsigned int index; atomic64_t csv_free_size; struct csv_cma csv_cma[]; }; @@ -167,6 +168,7 @@ void __init csv_cma_reserve_mem(void) } array->count = 0; + array->index = 0; atomic64_set(&array->csv_free_size, 0); csv_contiguous_pernuma_area[node] = array; @@ -302,7 +304,8 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, count = array->count; while (count) { - csv_cma = &array->csv_cma[count - 1]; + array->index = (array->index + 1) % count; + csv_cma = &array->csv_cma[array->index]; /* * The value check of csv_cma->fast is lockless, but -- Gitee From 8b09426eb549bf78c59c75707e44f21bc4e95712 Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Thu, 14 Aug 2025 20:34:50 +0800 Subject: [PATCH 2/6] anolis: revert: mm: hugetlb: wait hugetlb pages to be released to buddy allocator ANBZ: #24405 The commit bf06164cafb1 ("anolis: mm: hugetlb: wait hugetlb pages to be released to buddy allocator") has its corresponding upstream version. We'll backport the upstream version to this repo. Signed-off-by: Ge Yang --- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 5 +---- mm/migrate.c | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f4e8e46179e0..354638dbe682 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -741,7 +741,7 @@ static inline int hstate_index(struct hstate *h) extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); -extern void replace_or_wait_free_huge_page(struct page *page); +extern void replace_free_huge_page(struct page *page); #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifndef arch_hugetlb_migration_supported @@ -996,7 +996,7 @@ static inline int dissolve_free_huge_pages(unsigned long start_pfn, return 0; } -static inline void replace_or_wait_free_huge_page(struct page *page) +static inline void replace_free_huge_page(struct page *page) { } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 79a25d560e4e..0fc25cefb961 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2515,7 +2515,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } -void replace_or_wait_free_huge_page(struct page *page) +void replace_free_huge_page(struct page *page) { struct hstate *h; struct page *head; @@ -2528,9 +2528,6 @@ void replace_or_wait_free_huge_page(struct page *page) h = page_hstate(head); } else { spin_unlock_irq(&hugetlb_lock); - - /* Wait hugetlb pages to be released to buddy allocator */ - flush_work(&free_hpage_work); return; } spin_unlock_irq(&hugetlb_lock); diff --git a/mm/migrate.c b/mm/migrate.c index 29a25f6faf6d..019475f2941e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1520,7 +1520,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (rc == MIGRATEPAGE_SUCCESS) { putback_active_hugepage(hpage); if (reason == MR_CONTIG_RANGE) - replace_or_wait_free_huge_page(hpage); + replace_free_huge_page(hpage); } else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS) list_move_tail(&hpage->lru, ret); -- Gitee From d05d23bb76d01bdd0c155594d33f978102bbb133 Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Thu, 14 Aug 2025 20:49:53 +0800 Subject: [PATCH 3/6] anolis: revert: mm: release hugetlb pages to buddy allocator after migrate them ANBZ: #24405 The commit 1b4d4cce1b1a ("anolis: mm: release hugetlb pages to buddy allocator after migrate them") has its corresponding upstream version. We'll backport the upstream version to this repo. Signed-off-by: Ge Yang --- include/linux/hugetlb.h | 5 ----- mm/hugetlb.c | 28 ---------------------------- mm/migrate.c | 6 ++---- 3 files changed, 2 insertions(+), 37 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 354638dbe682..60eee54e7126 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -741,7 +741,6 @@ static inline int hstate_index(struct hstate *h) extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); -extern void replace_free_huge_page(struct page *page); #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifndef arch_hugetlb_migration_supported @@ -996,10 +995,6 @@ static inline int dissolve_free_huge_pages(unsigned long start_pfn, return 0; } -static inline void replace_free_huge_page(struct page *page) -{ -} - static inline bool hugepage_migration_supported(struct hstate *h) { return false; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0fc25cefb961..36bcc7859a90 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -43,7 +43,6 @@ #include #include "internal.h" #include "hugetlb_vmemmap.h" -#include int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; @@ -2515,33 +2514,6 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } -void replace_free_huge_page(struct page *page) -{ - struct hstate *h; - struct page *head; - - LIST_HEAD(isolate_list); - - spin_lock_irq(&hugetlb_lock); - if (PageHuge(page)) { - head = compound_head(page); - h = page_hstate(head); - } else { - spin_unlock_irq(&hugetlb_lock); - return; - } - spin_unlock_irq(&hugetlb_lock); - - if (hstate_is_gigantic(h)) - return; - - if (!page_count(head)) { - alloc_and_dissolve_huge_page(h, head, &isolate_list); - if (!list_empty(&isolate_list)) - putback_movable_pages(&isolate_list); - } -} - struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { diff --git a/mm/migrate.c b/mm/migrate.c index 019475f2941e..0bf313356a34 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1517,11 +1517,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, out_unlock: unlock_page(hpage); out: - if (rc == MIGRATEPAGE_SUCCESS) { + if (rc == MIGRATEPAGE_SUCCESS) putback_active_hugepage(hpage); - if (reason == MR_CONTIG_RANGE) - replace_free_huge_page(hpage); - } else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS) + else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS) list_move_tail(&hpage->lru, ret); /* -- Gitee From bae76f7586ee092ddc90c3e9479924a2644872f7 Mon Sep 17 00:00:00 2001 From: yangge Date: Sat, 11 Jan 2025 15:58:20 +0800 Subject: [PATCH 4/6] mm: replace free hugepage folios after migration ANBZ: #24405 commit 04f13d241b8b146b23038bffd907cb8278391d07 upstream. My machine has 4 NUMA nodes, each equipped with 32GB of memory. I have configured each NUMA node with 16GB of CMA and 16GB of in-use hugetlb pages. The allocation of contiguous memory via cma_alloc() can fail probabilistically. When there are free hugetlb folios in the hugetlb pool, during the migration of in-use hugetlb folios, new folios are allocated from the free hugetlb pool. After the migration is completed, the old folios are released back to the free hugetlb pool instead of being returned to the buddy system. This can cause test_pages_isolated() check to fail, ultimately leading to the failure of cma_alloc(). Call trace: cma_alloc() __alloc_contig_migrate_range() // migrate in-use hugepage test_pages_isolated() __test_page_isolated_in_pageblock() PageBuddy(page) // check if the page is in buddy To address this issue, we introduce a function named replace_free_hugepage_folios(). This function will replace the hugepage in the free hugepage pool with a new one and release the old one to the buddy system. After the migration of in-use hugetlb pages is completed, we will invoke replace_free_hugepage_folios() to ensure that these hugepages are properly released to the buddy system. Following this step, when test_pages_isolated() is executed for inspection, it will successfully pass. Additionally, when alloc_contig_range() is used to migrate multiple in-use hugetlb pages, it can result in some in-use hugetlb pages being released back to the free hugetlb pool and subsequently being reallocated and used again. For example: [huge 0] [huge 1] To migrate huge 0, we obtain huge x from the pool. After the migration is completed, we return the now-freed huge 0 back to the pool. When it's time to migrate huge 1, we can simply reuse the now-freed huge 0 from the pool. As a result, when replace_free_hugepage_folios() is executed, it cannot release huge 0 back to the buddy system. To address this issue, we should prevent the reuse of isolated free hugepages during the migration process. Link: https://lkml.kernel.org/r/1734503588-16254-1-git-send-email-yangge1116@126.com Link: https://lkml.kernel.org/r/1736582300-11364-1-git-send-email-yangge1116@126.com Signed-off-by: yangge Cc: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 7 +++++++ mm/hugetlb.c | 43 +++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 12 +++++++++++- 3 files changed, 61 insertions(+), 1 deletion(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 60eee54e7126..3583466d2ccd 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -601,6 +601,7 @@ struct huge_bootmem_page { }; int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); +int replace_free_hugepage_pages(unsigned long start_pfn, unsigned long end_pfn); struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, @@ -888,6 +889,12 @@ static inline int isolate_or_dissolve_huge_page(struct page *page, return -ENOMEM; } +static inline int replace_free_hugepage_pages(unsigned long start_pfn, + unsigned long end_pfn) +{ + return 0; +} + static inline struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 36bcc7859a90..b97f706dcc97 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -43,6 +43,8 @@ #include #include "internal.h" #include "hugetlb_vmemmap.h" +#include +#include int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; @@ -1081,6 +1083,9 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) if (PageHWPoison(page)) continue; + if (is_migrate_isolate_page(page)) + continue; + list_move(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); ClearHPageFreed(page); @@ -2514,6 +2519,44 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } +/* + * replace_free_hugepage_pages - Replace free hugepage pages in a given pfn + * range with new pages. + * @start_pfn: start pfn of the given pfn range + * @end_pfn: end pfn of the given pfn range + * Returns 0 on success, otherwise negated error. + */ +int replace_free_hugepage_pages(unsigned long start_pfn, unsigned long end_pfn) +{ + struct hstate *h; + struct page *page; + int ret = 0; + + LIST_HEAD(isolate_list); + + while (start_pfn < end_pfn) { + page = pfn_to_page(start_pfn); + if (PageHuge(page)) { + h = page_hstate(page); + } else { + start_pfn++; + continue; + } + + if (!page_count(page)) { + ret = alloc_and_dissolve_huge_page(h, page, + &isolate_list); + if (ret) + break; + + putback_movable_pages(&isolate_list); + } + start_pfn++; + } + + return ret; +} + struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 392c92e594a4..63b5c1bfd492 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -9291,7 +9291,17 @@ int alloc_contig_range(unsigned long start, unsigned long end, ret = __alloc_contig_migrate_range(&cc, start, end); if (ret && ret != -EBUSY) goto done; - ret =0; + + /* + * When in-use hugetlb pages are migrated, they may simply be released + * back into the free hugepage pool instead of being returned to the + * buddy system. After the migration of in-use huge pages is completed, + * we will invoke replace_free_hugepage_pages() to ensure that these + * hugepages are properly released to the buddy system. + */ + ret = replace_free_hugepage_pages(start, end); + if (ret) + goto done; /* * Pages from [start, end) are within a MAX_ORDER_NR_PAGES -- Gitee From 308b36ea15349dd8350e43f34fc155d4e517066a Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Wed, 19 Feb 2025 11:46:44 +0800 Subject: [PATCH 5/6] mm/hugetlb: wait for hugetlb folios to be freed ANBZ: #24405 commit 67bab13307c83fb742c2556b06cdc39dbad27f07 upstream. Since the introduction of commit c77c0a8ac4c52 ("mm/hugetlb: defer freeing of huge pages if in non-task context"), which supports deferring the freeing of hugetlb pages, the allocation of contiguous memory through cma_alloc() may fail probabilistically. In the CMA allocation process, if it is found that the CMA area is occupied by in-use hugetlb folios, these in-use hugetlb folios need to be migrated to another location. When there are no available hugetlb folios in the free hugetlb pool during the migration of in-use hugetlb folios, new folios are allocated from the buddy system. A temporary state is set on the newly allocated folio. Upon completion of the hugetlb folio migration, the temporary state is transferred from the new folios to the old folios. Normally, when the old folios with the temporary state are freed, it is directly released back to the buddy system. However, due to the deferred freeing of hugetlb pages, the PageBuddy() check fails, ultimately leading to the failure of cma_alloc(). Here is a simplified call trace illustrating the process: cma_alloc() ->__alloc_contig_migrate_range() // Migrate in-use hugetlb folios ->unmap_and_move_huge_page() ->folio_putback_hugetlb() // Free old folios ->test_pages_isolated() ->__test_page_isolated_in_pageblock() ->PageBuddy(page) // Check if the page is in buddy To resolve this issue, we have implemented a function named wait_for_freed_hugetlb_folios(). This function ensures that the hugetlb folios are properly released back to the buddy system after their migration is completed. By invoking wait_for_freed_hugetlb_folios() before calling PageBuddy(), we ensure that PageBuddy() will succeed. Link: https://lkml.kernel.org/r/1739936804-18199-1-git-send-email-yangge1116@126.com Fixes: c77c0a8ac4c5 ("mm/hugetlb: defer freeing of huge pages if in non-task context") Signed-off-by: Ge Yang Reviewed-by: Muchun Song Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: Oscar Salvador Cc: Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 5 +++++ mm/hugetlb.c | 8 ++++++++ mm/page_isolation.c | 10 ++++++++++ 3 files changed, 23 insertions(+) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3583466d2ccd..dfaf2d39d3b0 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -602,6 +602,7 @@ struct huge_bootmem_page { int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_pages(unsigned long start_pfn, unsigned long end_pfn); +void wait_for_freed_hugetlb_pages(void); struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, @@ -895,6 +896,10 @@ static inline int replace_free_hugepage_pages(unsigned long start_pfn, return 0; } +static inline void wait_for_freed_hugetlb_pages(void) +{ +} + static inline struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b97f706dcc97..4d1fdf6fef52 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2519,6 +2519,14 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } +void wait_for_freed_hugetlb_pages(void) +{ + if (llist_empty(&hpage_freelist)) + return; + + flush_work(&free_hpage_work); +} + /* * replace_free_hugepage_pages - Replace free hugepage pages in a given pfn * range with new pages. diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 35f9e143fbdb..f4c660208f93 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -661,6 +661,16 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, struct page *page; struct zone *zone; + /* + * Due to the deferred freeing of hugetlb folios, the hugepage folios may + * not immediately release to the buddy system. This can cause PageBuddy() + * to fail in __test_page_isolated_in_pageblock(). To ensure that the + * hugetlb folios are properly released back to the buddy system, we + * invoke the wait_for_freed_hugetlb_folios() function to wait for the + * release to complete. + */ + wait_for_freed_hugetlb_pages(); + /* * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages * are not aligned to pageblock_nr_pages. -- Gitee From 33336f6ba38e2eeecdd4bd0231355ec24d8a6fa1 Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Thu, 22 May 2025 11:22:17 +0800 Subject: [PATCH 6/6] mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios ANBZ: #24405 commit 113ed54ad276c352ee5ce109bdcf0df118a43bda upstream. A kernel crash was observed when replacing free hugetlb folios: BUG: kernel NULL pointer dereference, address: 0000000000000028 PGD 0 P4D 0 Oops: Oops: 0000 [#1] SMP NOPTI CPU: 28 UID: 0 PID: 29639 Comm: test_cma.sh Tainted 6.15.0-rc6-zp #41 PREEMPT(voluntary) RIP: 0010:alloc_and_dissolve_hugetlb_folio+0x1d/0x1f0 RSP: 0018:ffffc9000b30fa90 EFLAGS: 00010286 RAX: 0000000000000000 RBX: 0000000000342cca RCX: ffffea0043000000 RDX: ffffc9000b30fb08 RSI: ffffea0043000000 RDI: 0000000000000000 RBP: ffffc9000b30fb20 R08: 0000000000001000 R09: 0000000000000000 R10: ffff88886f92eb00 R11: 0000000000000000 R12: ffffea0043000000 R13: 0000000000000000 R14: 00000000010c0200 R15: 0000000000000004 FS: 00007fcda5f14740(0000) GS:ffff8888ec1d8000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000028 CR3: 0000000391402000 CR4: 0000000000350ef0 Call Trace: replace_free_hugepage_folios+0xb6/0x100 alloc_contig_range_noprof+0x18a/0x590 ? srso_return_thunk+0x5/0x5f ? down_read+0x12/0xa0 ? srso_return_thunk+0x5/0x5f cma_range_alloc.constprop.0+0x131/0x290 __cma_alloc+0xcf/0x2c0 cma_alloc_write+0x43/0xb0 simple_attr_write_xsigned.constprop.0.isra.0+0xb2/0x110 debugfs_attr_write+0x46/0x70 full_proxy_write+0x62/0xa0 vfs_write+0xf8/0x420 ? srso_return_thunk+0x5/0x5f ? filp_flush+0x86/0xa0 ? srso_return_thunk+0x5/0x5f ? filp_close+0x1f/0x30 ? srso_return_thunk+0x5/0x5f ? do_dup2+0xaf/0x160 ? srso_return_thunk+0x5/0x5f ksys_write+0x65/0xe0 do_syscall_64+0x64/0x170 entry_SYSCALL_64_after_hwframe+0x76/0x7e There is a potential race between __update_and_free_hugetlb_folio() and replace_free_hugepage_folios(): CPU1 CPU2 __update_and_free_hugetlb_folio replace_free_hugepage_folios folio_test_hugetlb(folio) -- It's still hugetlb folio. __folio_clear_hugetlb(folio) hugetlb_free_folio(folio) h = folio_hstate(folio) -- Here, h is NULL pointer When the above race condition occurs, folio_hstate(folio) returns NULL, and subsequent access to this NULL pointer will cause the system to crash. To resolve this issue, execute folio_hstate(folio) under the protection of the hugetlb_lock lock, ensuring that folio_hstate(folio) does not return NULL. Link: https://lkml.kernel.org/r/1747884137-26685-1-git-send-email-yangge1116@126.com Fixes: 04f13d241b8b ("mm: replace free hugepage folios after migration") Signed-off-by: Ge Yang Reviewed-by: Muchun Song Reviewed-by: Oscar Salvador Cc: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4d1fdf6fef52..279109dd5f97 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2544,12 +2544,20 @@ int replace_free_hugepage_pages(unsigned long start_pfn, unsigned long end_pfn) while (start_pfn < end_pfn) { page = pfn_to_page(start_pfn); + + /* + * The page might have been dissolved from under our feet, so make sure + * to carefully check the state under the lock. + */ + spin_lock_irq(&hugetlb_lock); if (PageHuge(page)) { h = page_hstate(page); } else { + spin_unlock_irq(&hugetlb_lock); start_pfn++; continue; } + spin_unlock_irq(&hugetlb_lock); if (!page_count(page)) { ret = alloc_and_dissolve_huge_page(h, page, -- Gitee