diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 82a758bb8ac9e0c18e630979993a1024fc7085fc..191ed8a6b0d0a906df76a55ee415b6c9ace6a33e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3575,8 +3575,8 @@ static void add_new_huge_page_to_pool(struct dhugetlb_pool *hpool, list_add_tail(&page->lru, &hpool->dhugetlb_1G_freelists); hpool->free_unreserved_1G++; } else { - prep_new_page(page, PMD_SHIFT - PAGE_SHIFT, __GFP_COMP, 0); - set_page_count(page, 0); + prep_new_frozen_free_page(page, PMD_SHIFT - PAGE_SHIFT, + __GFP_COMP); list_add_tail(&page->lru, &hpool->dhugetlb_2M_freelists); hpool->free_unreserved_2M++; } diff --git a/mm/internal.h b/mm/internal.h index 72f77379b58f330174b37b0846966374a0ce1e53..e50363ddb4167b2cbb54baa322bcede6cd9c9eca 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -185,6 +185,10 @@ extern void prep_compound_page(struct page *page, unsigned int order); extern int check_new_page(struct page *page); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); +#ifdef CONFIG_DYNAMIC_HUGETLB +extern void prep_new_frozen_free_page(struct page *page, unsigned int order, + gfp_t gfp_flags); +#endif extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e2360fe3616d63b892c054ce7d59b42244fea256..787bbabc29d0b4f9e5278bf25aa66a1371dec6bc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2100,6 +2100,32 @@ void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, clear_page_pfmemalloc(page); } +#ifdef CONFIG_DYNAMIC_HUGETLB +/* Similar with prep_new_page without set refcount to 1 */ +void prep_new_frozen_free_page(struct page *page, unsigned int order, + gfp_t gfp_flags) +{ + int i; + + set_page_private(page, 0); + + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + kasan_alloc_pages(page, order); + kernel_poison_pages(page, 1 << order, 1); + set_page_owner(page, order, gfp_flags); + + if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO)) + for (i = 0; i < (1 << order); i++) + clear_highpage(page + i); + + if (order && (gfp_flags & __GFP_COMP)) + prep_compound_page(page, order); + + clear_page_pfmemalloc(page); +} +#endif + /* * Go through the free lists for the given migratetype and remove * the smallest available page from the freelists