diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index 2bc8b1f69c93cbff94520d98a3e15302bc87f367..7395db4f44b09319f061080cdb8d2cff38687055 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -3,6 +3,7 @@ #define __LINUX_BOOTMEM_INFO_H #include +#include /* * Types for free bootmem stored in page->lru.next. These have to be in @@ -59,6 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page, static inline void free_bootmem_page(struct page *page) { + kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); free_reserved_page(page); } #endif diff --git a/mm/compaction.c b/mm/compaction.c index 94ab4e9d1eb644766d4518feeb1fad7dce5bf963..6035aa46c8ac85d762fa815f0a9511505b70d342 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -599,10 +599,11 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (PageCompound(page)) { const unsigned int order = compound_order(page); - if (likely(order < MAX_ORDER)) { + if (blockpfn + (1UL << order) <= end_pfn) { blockpfn += (1UL << order) - 1; cursor += (1UL << order) - 1; } + goto isolate_fail; } @@ -657,8 +658,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, spin_unlock_irqrestore(&cc->zone->lock, flags); /* - * There is a tiny chance that we have read bogus compound_order(), - * so be careful to not go outside of the pageblock. + * Be careful to not go outside of the pageblock. */ if (unlikely(blockpfn > end_pfn)) blockpfn = end_pfn; @@ -965,29 +965,29 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail; } + /* + * Be careful not to clear PageLRU until after we're + * sure the page is not being freed elsewhere -- the + * page release code relies on it. + */ + if (unlikely(!get_page_unless_zero(page))) + goto isolate_fail; + /* * Migration will fail if an anonymous page is pinned in memory, * so avoid taking lru_lock and isolating it unnecessarily in an * admittedly racy check. */ if (!page_mapping(page) && - page_count(page) > page_mapcount(page)) - goto isolate_fail; + (page_count(page) - 1) > total_mapcount(page)) + goto isolate_fail_put; /* * Only allow to migrate anonymous pages in GFP_NOFS context * because those do not depend on fs locks. */ if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) - goto isolate_fail; - - /* - * Be careful not to clear PageLRU until after we're - * sure the page is not being freed elsewhere -- the - * page release code relies on it. - */ - if (unlikely(!get_page_unless_zero(page))) - goto isolate_fail; + goto isolate_fail_put; if (__isolate_lru_page_prepare(page, isolate_mode) != 0) goto isolate_fail_put; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index eb293d17a1049f37c7286e93e822aec5911a531a..e41be42456673f90d17c5d78a2609913ddca14eb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2882,11 +2882,8 @@ static int split_huge_pages_set(void *data, u64 val) for_each_populated_zone(zone) { max_zone_pfn = zone_end_pfn(zone); for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { - if (!pfn_valid(pfn)) - continue; - - page = pfn_to_page(pfn); - if (!get_page_unless_zero(page)) + page = pfn_to_online_page(pfn); + if (!page || !get_page_unless_zero(page)) continue; if (zone != page_zone(page)) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 54e2eefdf0b46b61a25fe32416106048f0813251..5f04adac38bb9a2972cbb88ff6fcf74599d48376 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3048,7 +3048,9 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, if (nid != NUMA_NO_NODE) { unsigned long old_count = count; - count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; + count += persistent_huge_pages(h) - + (h->nr_huge_pages_node[nid] - + h->surplus_huge_pages_node[nid]); /* * User may have specified a large count value which caused the * above calculation to overflow. In this case, they wanted diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 7ec8560d267d7f205ea16f112c504b30da727233..460325ad729eab52c37840bb417bc3c1c62c5231 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -254,7 +254,7 @@ int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head) * discarded vmemmap pages must be allocated and remapping. */ ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse, - GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE); + GFP_KERNEL | __GFP_NORETRY); if (!ret) { ClearHPageVmemmapOptimized(head); static_branch_dec(&hugetlb_optimize_vmemmap_key);