From 88469a11aa1397cf6a65a501eabab3315e2cdef7 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 26 Aug 2025 19:12:18 +0800 Subject: [PATCH 1/2] fs/proc/task_mmu: convert smaps_hugetlb_range() to work on folios mainline inclusion from mainline-v6.10-rc1 commit 6401a2e6900843a77a27873c0529dea68f61193d category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICRU7W Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=6401a2e6900843a77a27873c0529dea68f61193d ------------------------------------------- Let's get rid of another page_mapcount() check and simply use folio_likely_mapped_shared(), which is precise for hugetlb folios. While at it, use huge_ptep_get() + pte_page() instead of ptep_get() + vm_normal_page(), just like we do in pagemap_hugetlb_range(). No functional change intended. Link: https://lkml.kernel.org/r/20240417092313.753919-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Muchun Song Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 46b4c39a12db..98e1a2ef25af 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -727,19 +727,20 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; - struct page *page = NULL; - pte_t ptent = ptep_get(pte); + pte_t ptent = huge_ptep_get(pte); + struct folio *folio = NULL; if (pte_present(ptent)) { - page = vm_normal_page(vma, addr, ptent); + folio = page_folio(pte_page(ptent)); } else if (is_swap_pte(ptent)) { swp_entry_t swpent = pte_to_swp_entry(ptent); if (is_pfn_swap_entry(swpent)) - page = pfn_swap_entry_to_page(swpent); + folio = pfn_swap_entry_folio(swpent); } - if (page) { - if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) + if (folio) { + if (folio_likely_mapped_shared(folio) || + hugetlb_pmd_shared(pte)) mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); else mss->private_hugetlb += huge_page_size(hstate_vma(vma)); -- Gitee From 537b86446b51c40ffe116fa28ad86a0d377f2ddc Mon Sep 17 00:00:00 2001 From: Jinjiang Tu Date: Tue, 26 Aug 2025 19:12:19 +0800 Subject: [PATCH 2/2] mm/smaps: fix race between smaps_hugetlb_range and migration mainline inclusion from mainline-v6.17-rc2 commit 45d19b4b6c2d422771c29b83462d84afcbb33f01 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICRU7W Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=45d19b4b6c2d422771c29b83462d84afcbb33f01 -------------------------------- smaps_hugetlb_range() handles the pte without holdling ptl, and may be concurrenct with migration, leaing to BUG_ON in pfn_swap_entry_to_page(). The race is as follows. smaps_hugetlb_range migrate_pages huge_ptep_get remove_migration_ptes folio_unlock pfn_swap_entry_folio BUG_ON To fix it, hold ptl lock in smaps_hugetlb_range(). Link: https://lkml.kernel.org/r/20250724090958.455887-1-tujinjiang@huawei.com Link: https://lkml.kernel.org/r/20250724090958.455887-2-tujinjiang@huawei.com Fixes: 25ee01a2fca0 ("mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps") Signed-off-by: Jinjiang Tu Acked-by: David Hildenbrand Cc: Andrei Vagin Cc: Andrii Nakryiko Cc: Baolin Wang Cc: Brahmajit Das Cc: Catalin Marinas Cc: Christophe Leroy Cc: David Rientjes Cc: Dev Jain Cc: Hugh Dickins Cc: Joern Engel Cc: Kefeng Wang Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Ryan Roberts Cc: Thiago Jung Bauermann Signed-off-by: Andrew Morton Conflicts: fs/proc/task_mmu.c [Context conflicts] Signed-off-by: Jinjiang Tu --- fs/proc/task_mmu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 98e1a2ef25af..8a691365061c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -727,9 +727,12 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; - pte_t ptent = huge_ptep_get(pte); struct folio *folio = NULL; + spinlock_t *ptl; + pte_t ptent; + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); + ptent = huge_ptep_get(pte); if (pte_present(ptent)) { folio = page_folio(pte_page(ptent)); } else if (is_swap_pte(ptent)) { @@ -745,6 +748,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, else mss->private_hugetlb += huge_page_size(hstate_vma(vma)); } + spin_unlock(ptl); return 0; } #else -- Gitee