diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 70176fc3f94c3aeecf8a58067171aeaeea2f404f..0c80f5948f1036c46425f6f2ea310418e32cee22 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -203,6 +203,14 @@ page by writing 0 or enable it back by writing 1:: echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page +By default kernel enable zero page full CoW. When it's enabled, kernel +will try to allocate PMD page for PMD-mappable zero anonymous mapping +on WP mode. It's possible to disable huge zero page by writing 0 or +enable it back by writing 1:: + + echo 0 >/sys/kernel/mm/transparent_hugepage/zero_page_full_cow + echo 1 >/sys/kernel/mm/transparent_hugepage/zero_page_full_cow + Some userspace (such as a test program, or an optimized memory allocation library) may want to know the size (in bytes) of a PMD-mappable transparent hugepage:: diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cfe42c43b55b679b02b35f362acee6f28d1d145c..47a6634e74c4837597343151644ac52e5f4e9c70 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -57,6 +57,7 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FILE_MAPPING_ALIGN_FLAG, TRANSPARENT_HUGEPAGE_ANON_MAPPING_ALIGN_FLAG, TRANSPARENT_HUGEPAGE_ANON_MAPPING_PMD_ALIGN_FLAG, + TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG, }; struct kobject; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a28dda79997820f95a241fed211ce489cd80dc19..11162cf63f449a3e14b170b517e162dc9f7ca218 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -603,6 +603,22 @@ static ssize_t thp_mapping_align_store(struct kobject *kobj, static struct kobj_attribute thp_mapping_align_attr = __ATTR_RW(thp_mapping_align); +static ssize_t zero_page_full_cow_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return single_hugepage_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG); +} +static ssize_t zero_page_full_cow_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + return single_hugepage_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG); +} + +static struct kobj_attribute zero_page_full_cow_attr = + __ATTR_RW(zero_page_full_cow); + static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, @@ -615,6 +631,7 @@ static struct attribute *hugepage_attr[] = { &file_enabled_attr.attr, &thp_exec_enabled_attr.attr, &thp_mapping_align_attr.attr, + &zero_page_full_cow_attr.attr, NULL, }; @@ -2014,6 +2031,10 @@ static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf) struct folio *folio; vm_fault_t ret = 0; + if (!test_bit(TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG, + &transparent_hugepage_flags)) + return VM_FAULT_FALLBACK; + folio = vma_alloc_anon_folio_pmd(vma, vmf->address); if (unlikely(!folio)) return VM_FAULT_FALLBACK;