diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2e5d1e238af958e8dcdd07b498f990421b27cc02..bfb5065fb99e18bdda029dec91d27f20217a2d7a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -940,7 +940,7 @@ NOKPROBE_SYMBOL(do_debug_exception); struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, unsigned long vaddr) { - gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; + gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_CMA; /* * If the page is mapped with PROT_MTE, initialise the tags at the diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 665f06675c834e45f9624b1a990d658f91eb99f3..ae6479d04887863244bc722df214a93bd603072d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,18 +15,24 @@ struct vm_area_struct; static inline int gfp_migratetype(const gfp_t gfp_flags) { + unsigned int ret_mt = 0; + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); - BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); - BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> - GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + ret_mt = (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + +#ifdef CONFIG_CMA_REUSE + if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) + return MIGRATE_CMA; +#endif + + return ret_mt; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 6583a58670c571050ad410e0dcf0718b6477292b..8da03639eab18cc450b0ac886f9e07384394ffde 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -53,8 +53,9 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_SKIP_ZERO 0 #define ___GFP_SKIP_KASAN 0 #endif +#define ___GFP_CMA 0x4000000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x4000000u +#define ___GFP_NOLOCKDEP 0x8000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -72,6 +73,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) /** * DOC: Page mobility and placement hints @@ -249,7 +251,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 75607d4ba26cb7b75802522445aba6cbb8d2b681..ccef475c0d444a481dd321d91e05bc87700c5d1d 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -226,7 +226,7 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, { struct folio *folio; - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_CMA, 0, vma, vaddr, false); if (folio) clear_user_highpage(&folio->page, vaddr); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0f62786269d0c1e789d4266ecf6d7d866fd546db..1c6a3256c43ceb8db2f6df72262cd83d74a34644 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -46,9 +46,12 @@ enum migratetype { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, +#ifdef CONFIG_CMA_REUSE + MIGRATE_CMA, +#endif MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated @@ -78,6 +81,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif +#ifdef CONFIG_CMA_REUSE +# define get_cma_migratetype() MIGRATE_CMA +#else +# define get_cma_migratetype() MIGRATE_MOVABLE +#endif + static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; diff --git a/mm/Kconfig b/mm/Kconfig index 264a2df5ecf5b91a2883e4594bd7707219ca309c..d4a44d608f868e2bb9141b2b827d95b38aedef91 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -35,6 +35,16 @@ config ZSWAP in the case where decompressing from RAM is faster than swap device reads, can also improve workload performance. +config CMA_REUSE + bool "CMA reuse feature" + depends on CMA + help + If enabled, it will add MIGRATE_CMA to pcp lists and movable + allocations with __GFP_CMA flag will use cma areas prior to + movable areas. + + It improves the utilization ratio of cma areas. + config ZSWAP_DEFAULT_ON bool "Enable the compressed cache for swap pages by default" depends on ZSWAP diff --git a/mm/compaction.c b/mm/compaction.c index 38c8d216c6a3bffd9d75fd430981558c66614750..080aaf0f80a7bae9c5525a2aa3337d5127e2ec14 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2235,7 +2235,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == MIGRATE_MOVABLE && + if (migratetype == get_cma_migratetype() && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index afed33fd876128ac3b53db3e8da303b353c4c991..667de7ed5c3fd71d5fa2e7c4227a09d02d84bee1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -275,8 +275,11 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", +#ifdef CONFIG_CMA_REUSE + "CMA", +#endif "HighAtomic", -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION @@ -2078,6 +2081,27 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, } +static __always_inline struct page * +__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, + int migratetype, unsigned int alloc_flags) +{ + struct page *page = NULL; +retry: + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page) && is_migrate_cma(migratetype)) { + migratetype = MIGRATE_MOVABLE; + alloc_flags &= ~ALLOC_CMA; + page = __rmqueue_smallest(zone, order, migratetype); + } + + if (unlikely(!page) && + __rmqueue_fallback(zone, order, migratetype, alloc_flags)) + goto retry; + + return page; +} + /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -2088,6 +2112,12 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; +#ifdef CONFIG_CMA_REUSE + page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); + if (page) + return page; +#endif + if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by @@ -3034,7 +3064,7 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags) { #ifdef CONFIG_CMA - if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (gfp_migratetype(gfp_mask) == get_cma_migratetype()) alloc_flags |= ALLOC_CMA; #endif return alloc_flags;