diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index f9dc42f4451f5f90d7fdd1ad8a399cdf76bf48ad..e569df5c999d6dfd4d7173bee1426e7280ab5c4c 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -211,12 +211,10 @@ possible to enable/disable it by configurate the corresponding bit:: echo 0x2 >/sys/kernel/mm/transparent_hugepage/thp_exec_enabled echo 0x3 >/sys/kernel/mm/transparent_hugepage/thp_exec_enabled -The kernel could try to enable mappings for different sizes, eg, 64K on -arm64, BIT0 for file mapping, BIT1 for anonymous mapping, and THP size -page, BIT2 for anonymous mapping, where 2M anonymous mapping for arm64 -is dependent on BIT2 being turned on, the above feature are disabled by -default, and could enable the above feature by writing the corresponding -bit to 1:: +The kernel could try to enable mappings for different sizes, BIT0 for +64K file mapping, BIT1 for 64K anonymous mapping, and BIT2 for PMD size +anonymous mapping, the above feature are disabled by default, and could +enable the above feature by writing the corresponding bit to 1:: echo 0x1 >/sys/kernel/mm/transparent_hugepage/thp_mapping_align echo 0x4 >/sys/kernel/mm/transparent_hugepage/thp_mapping_align @@ -232,6 +230,12 @@ it back by writing 0:: echo 0 >/sys/kernel/mm/transparent_hugepage/pcp_allow_high_order echo 4 >/sys/kernel/mm/transparent_hugepage/pcp_allow_high_order +The kernel could enable or disable file-backed hugepages, which has no +effect on existed pagecache:: + + echo always >/sys/kernel/mm/transparent_hugepage/file_enabled + echo never >/sys/kernel/mm/transparent_hugepage/file_enabled + khugepaged will be automatically started when PMD-sized THP is enabled (either of the per-size anon control or the top-level control are set to "always" or "madvise"), and it'll be automatically shutdown when diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1474fd9c63ad2cc58cf6cf5c41da4aa3cfbc679f..ae45391c693dcf1ea8c471f34d8a2686882e2bd3 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -51,6 +51,7 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, + TRANSPARENT_HUGEPAGE_FILE_MTHP_FLAG, TRANSPARENT_HUGEPAGE_FILE_EXEC_THP_FLAG, TRANSPARENT_HUGEPAGE_FILE_EXEC_MTHP_FLAG, TRANSPARENT_HUGEPAGE_FILE_MAPPING_ALIGN_FLAG, @@ -308,9 +309,9 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item) (transparent_hugepage_flags & \ (1<mm)) order = 0; if (order > MAX_PAGECACHE_ORDER) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a62c4dc2b9da750a9acf4fee8b2c01681ab3d3ec..862e1c13596717d546253de72abf07d3e7c53111 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -65,7 +65,8 @@ unsigned long transparent_hugepage_flags __read_mostly = #endif (1<f_mapping; - if (!mapping || !mapping_large_folio_support(mapping)) + if (!mthp_file_mapping_align()) return false; - return true; -} - -static bool anon_mapping_align_enabled(int order) -{ - unsigned long mask; - - if (!thp_anon_mapping_align_enabled()) - return 0; - - mask = READ_ONCE(huge_anon_orders_always) | - READ_ONCE(huge_anon_orders_madvise); - - if (hugepage_global_enabled()) - mask |= READ_ONCE(huge_anon_orders_inherit); - - mask = BIT(order) & mask; - if (!mask) - return false; - - return true; -} - -static unsigned long folio_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - int order = arch_wants_exec_folio_order(); - - if (order < 0) - return 0; + mapping = filp->f_mapping; - if (file_mapping_align_enabled(filp) || - (!filp && anon_mapping_align_enabled(order))) - return __thp_get_unmapped_area(filp, addr, len, pgoff, flags, - PAGE_SIZE << order); - return 0; + return mapping && mapping_large_folio_support(mapping); } unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, @@ -1118,13 +1120,23 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long ret; loff_t off = (loff_t)pgoff << PAGE_SHIFT; - ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); - if (ret) - return ret; + if (filp || thp_anon_mapping_align()) { + ret = __thp_get_unmapped_area(filp, addr, len, off, flags, + PMD_SIZE); + if (ret) + return ret; + } - ret = folio_get_unmapped_area(filp, addr, len, off, flags); - if (ret) - return ret; + if (mthp_mapping_align_enabled(filp)) { + int order = arch_wants_exec_folio_order(); + + if (order >= 0) { + ret = __thp_get_unmapped_area(filp, addr, len, off, + flags, PAGE_SIZE << order); + if (ret) + return ret; + } + } return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); } diff --git a/mm/mmap.c b/mm/mmap.c index 27ba0bb1acde01911457c71ffd2f99e4b676c519..230065e77fc382deb38aa397a24a559b7b4e26d1 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1859,8 +1859,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, * so use shmem's get_unmapped_area in case it can be huge. */ get_area = shmem_get_unmapped_area; - } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - thp_anon_mapping_pmd_align()) { + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { /* Ensures that larger anonymous mappings are THP aligned. */ get_area = thp_get_unmapped_area; } diff --git a/mm/readahead.c b/mm/readahead.c index d0b3de43cf23b821b580c092f8a7c68bac5efb6b..ab1c61f0c0360911dceb5ac9ba2ff88d734f8cc3 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -504,6 +504,8 @@ void page_cache_ra_order(struct readahead_control *ractl, if (!mapping_large_folio_support(mapping) || ra->size < 4) goto fallback; + if (!file_mthp_enabled()) + goto fallback; if (mm_in_dynamic_pool(current->mm)) goto fallback;