diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8b7086cfd1edf3473691c5c0c64455a3ad51483c..17409e5d54458c21cb244e8132a62da35b3fc212 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2907,13 +2907,20 @@ void free_transhuge_page(struct page *page) struct deferred_split ds_queue; unsigned long flags; + /* + * At this point, there is no one trying to add the folio to + * deferred_list. If folio is not in deferred_list, it's safe + * to check without acquiring the split_queue_lock. + */ get_deferred_split_queue(page, &ds_queue); - spin_lock_irqsave(ds_queue.split_queue_lock, flags); if (!list_empty(page_deferred_list(page))) { - (*ds_queue.split_queue_len)--; - list_del(page_deferred_list(page)); + spin_lock_irqsave(ds_queue.split_queue_lock, flags); + if (!list_empty(page_deferred_list(page))) { + (*ds_queue.split_queue_len)--; + list_del(page_deferred_list(page)); + } + spin_unlock_irqrestore(ds_queue.split_queue_lock, flags); } - spin_unlock_irqrestore(ds_queue.split_queue_lock, flags); free_compound_page(page); } diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index a44cf211ffeed997fb2df8f9d112954ad776760d..2b3f6967176fbd076e266f8a7c9da199c8c3ba91 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -71,8 +71,20 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb) tlb_table_flush(tlb); #endif for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { - free_pages_and_swap_cache(batch->pages, batch->nr); - batch->nr = 0; + struct page **pages = batch->pages; + + do { + /* + * limit free batch count when PAGE_SIZE > 4K + */ + unsigned int nr = min(512U, batch->nr); + + free_pages_and_swap_cache(pages, nr); + pages += nr; + batch->nr -= nr; + + cond_resched(); + } while (batch->nr); } tlb->active = &tlb->local; } diff --git a/mm/rmap.c b/mm/rmap.c index c336dacfac52db8c777d91d362c89b83adc12af0..bf26f9c8edacfd43308c4b38eee0f2835b93d871 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1281,12 +1281,20 @@ static void page_remove_anon_compound_rmap(struct page *page) if (TestClearPageDoubleMap(page)) { /* * Subpages can be mapped with PTEs too. Check how many of - * themi are still mapped. + * them are still mapped. */ for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { if (atomic_add_negative(-1, &page[i]._mapcount)) nr++; } + + /* + * Queue the page for deferred split if at least one small + * page of the compound page is unmapped, but at least one + * small page is still mapped. + */ + if (nr && nr < HPAGE_PMD_NR) + deferred_split_huge_page(page); } else { nr = HPAGE_PMD_NR; } @@ -1294,10 +1302,8 @@ static void page_remove_anon_compound_rmap(struct page *page) if (unlikely(PageMlocked(page))) clear_page_mlock(page); - if (nr) { + if (nr) __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); - deferred_split_huge_page(page); - } } /**