diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 7a67d997eecea5b97b482b8e0606420a09733a06..6878081cbbc1a58a7aa794e711deefa2cff76c80 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -312,7 +312,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n) { struct page *page = &folio->page; - VM_BUG_ON_PGFLAGS(PageTail(page), page); + VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } diff --git a/mm/debug.c b/mm/debug.c index 0dd516a6640e03ef069d81ba1b87299622a54c07..df3949e49eb307615300c4eaebb50139ba130ecd 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -111,19 +111,22 @@ static void __dump_page(const struct page *page) { struct folio *foliop, folio; struct page precise; + unsigned long head; unsigned long pfn = page_to_pfn(page); unsigned long idx, nr_pages = 1; int loops = 5; again: memcpy(&precise, page, sizeof(*page)); - foliop = page_folio(&precise); - if (foliop == (struct folio *)&precise) { + head = precise.compound_head; + if ((head & 1) == 0) { + foliop = (struct folio *)&precise; idx = 0; if (!folio_test_large(foliop)) goto dump; foliop = (struct folio *)page; } else { + foliop = (struct folio *)(head - 1); idx = folio_page_idx(foliop, page); } diff --git a/mm/filemap.c b/mm/filemap.c index 77a8947b8e5e55c9b28fed1a9241dae7e9742e28..5dbd03d52bcae4b5a7f780977753bcd3767c2ad9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3547,10 +3547,10 @@ static struct folio *next_uptodate_folio(struct xa_state *xas, continue; if (xa_is_value(folio)) continue; - if (folio_test_locked(folio)) - continue; if (!folio_try_get(folio)) continue; + if (folio_test_locked(folio)) + goto skip; /* Has the page moved or been split? */ if (unlikely(folio != xas_reload(xas))) goto skip; diff --git a/mm/mmap.c b/mm/mmap.c index dfa3d2bfe289101325e6e69ef46a054020ea15f3..9fbc02eec7bacc876047d36e57917e7ae6f3f849 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1900,7 +1900,8 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, * so use shmem's get_unmapped_area in case it can be huge. */ get_area = shmem_get_unmapped_area; - } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) + && !addr /* no hint */) { /* Ensures that larger anonymous mappings are THP aligned. */ get_area = thp_get_unmapped_area; } diff --git a/mm/shmem.c b/mm/shmem.c index 1440f17c5d02d94036b3f44c96b8c8f25ffa3f94..5b39db2f6cfeacd5421e058ca0206ec56b9340e7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -778,6 +778,14 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static void shmem_update_stats(struct folio *folio, int nr_pages) +{ + if (folio_test_pmd_mappable(folio)) + __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages); + __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages); + __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages); +} + /* * Somewhat like filemap_add_folio, but error if expected item has gone. */ @@ -812,10 +820,7 @@ static int shmem_add_to_page_cache(struct folio *folio, xas_store(&xas, folio); if (xas_error(&xas)) goto unlock; - if (folio_test_pmd_mappable(folio)) - __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); - __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); - __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); + shmem_update_stats(folio, nr); shmem_reliable_folio_add(folio, nr); mapping->nrpages += nr; unlock: @@ -844,8 +849,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) error = shmem_replace_entry(mapping, folio->index, folio, radswap); folio->mapping = NULL; mapping->nrpages -= nr; - __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); - __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); + shmem_update_stats(folio, -nr); shmem_reliable_folio_add(folio, -nr); xa_unlock_irq(&mapping->i_pages); folio_put_refs(folio, nr); @@ -1977,11 +1981,9 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, } if (!error) { mem_cgroup_replace_folio(old, new); - __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages); - __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages); + shmem_update_stats(new, nr_pages); shmem_reliable_folio_add(new, nr_pages); - __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages); - __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages); + shmem_update_stats(old, -nr_pages); shmem_reliable_folio_add(old, -nr_pages); } xa_unlock_irq(&swap_mapping->i_pages);