diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7da9e84c2f95e0431fe20e091c956989977713e2..b798dcc1464b2454307a13310f0c03fbadbae9a7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3012,6 +3012,21 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, return 1; } +static void hugetlb_drain_movable_pcp(struct hstate *h, int nid) +{ + pg_data_t *pgdat = NODE_DATA(nid); + struct zone *zone; + + /* + * only zone movable is needed to drian as it is the only + * zone that can be exclusively used by hugetlb. + */ + zone = &pgdat->node_zones[ZONE_MOVABLE]; + + if (atomic_long_read(&zone->managed_pages)) + drain_all_pages(zone); +} + #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, nodemask_t *nodes_allowed) @@ -3094,6 +3109,13 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, break; } + /* + * drain pcp from movable zone to increase the success rate for + * hugetlb memory allocation + */ + if (count > persistent_huge_pages(h)) + hugetlb_drain_movable_pcp(h, nid); + while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the