From 7c67a84f1ca40577e6a5fde2b8a4fd9f811b3cca Mon Sep 17 00:00:00 2001 From: nan_tu Date: Tue, 16 Jul 2024 14:34:17 +0800 Subject: [PATCH 01/10] add cma memory area reuse Signed-off-by: nan_tu --- include/linux/gfp.h | 12 ++++++++++- include/linux/gfp_types.h | 8 ++++++-- include/linux/mmzone.h | 11 +++++++++- mm/Kconfig | 10 ++++++++++ mm/compaction.c | 2 +- mm/page_alloc.c | 42 ++++++++++++++++++++++++++++++++++++--- 6 files changed, 77 insertions(+), 8 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 665f06675c83..827a7b7e3d27 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,6 +15,8 @@ struct vm_area_struct; static inline int gfp_migratetype(const gfp_t gfp_flags) { + unsigned int ret_mt = 0; + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); @@ -26,7 +28,15 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + /*return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;*/ + ret_mt = (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + +#ifdef CONFIG_CMA_REUSE + if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) + return MIGRATE_CMA; +#endif + + return ret_mt; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 6583a58670c5..16de62fc891c 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -46,6 +46,7 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u +#define ___GFP_CMA 0x800000u #ifdef CONFIG_KASAN_HW_TAGS #define ___GFP_SKIP_ZERO 0x1000000u #define ___GFP_SKIP_KASAN 0x2000000u @@ -54,7 +55,8 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_SKIP_KASAN 0 #endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x4000000u +/*#define ___GFP_NOLOCKDEP 0x4000000u*/ +#define ___GFP_NOLOCKDEP 0x1000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -71,6 +73,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** @@ -249,7 +252,8 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +/*#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP))*/ +#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0f62786269d0..58be39d6b951 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,8 +47,11 @@ enum migratetype { MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ +#ifdef CONFIG_CMA_REUSE + MIGRATE_CMA, +#endif MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated @@ -78,6 +81,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif +#ifdef CONFIG_CMA_REUSE +# define get_cma_migratetype() MIGRATE_CMA +#else +# define get_cma_migratetype() MIGRATE_MOVABLE +#endif + static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; diff --git a/mm/Kconfig b/mm/Kconfig index 264a2df5ecf5..414d96ee780c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -941,6 +941,16 @@ config CMA_AREAS If unsure, leave the default value "7" in UMA and "19" in NUMA. +config CMA_REUSE + bool "CMA reuse feature" + depends on CMA + help + If enabled, it will add MIGRATE_CMA to pcp lists and movable + allocations with __GFP_CMA flag will use cma areas prior to + movable areas. + + It improves the utilization ratio of cma areas. + config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS diff --git a/mm/compaction.c b/mm/compaction.c index 38c8d216c6a3..080aaf0f80a7 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2235,7 +2235,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == MIGRATE_MOVABLE && + if (migratetype == get_cma_migratetype() && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index afed33fd8761..923f296f9f3c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -275,8 +275,11 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", +#ifdef CONFIG_CMA_REUSE + "CMA", +#endif "HighAtomic", -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION @@ -2078,6 +2081,27 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, } +static __always_inline struct page * +__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, + int migratetype, unsigned int alloc_flags) +{ + struct page *page = NULL; +retry: + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page) && is_migrate_cma(migratetype)) { + migratetype = MIGRATE_MOVABLE; + alloc_flags &= ~ALLOC_CMA; + page = __rmqueue_smallest(zone, order, migratetype); + } + + if (unlikely(!page) && + __rmqueue_fallback(zone, order, migratetype, alloc_flags)) + goto retry; + + return page; +} + /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -2088,6 +2112,11 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; +#ifdef CONFIG_CMA_REUSE + page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); + goto out; +#endif + if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by @@ -2112,6 +2141,10 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, alloc_flags)) goto retry; } + /*return page;*/ +out: + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype, migratetype == MIGRATE_MOVABLE); return page; } @@ -2773,7 +2806,8 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if (likely(pcp_allowed_order(order))) { + /*if (likely(pcp_allowed_order(order)))*/ + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||migratetype != MIGRATE_MOVABLE ||IS_ENABLED(CONFIG_CMA_REUSE)) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) @@ -3034,7 +3068,9 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags) { #ifdef CONFIG_CMA - if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + unsigned int pflags = current->flags; + /*if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)*/ + if (!(pflags & PF_MEMALLOC_PIN) && gfp_migratetype(gfp_mask) == get_cma_migratetype()) alloc_flags |= ALLOC_CMA; #endif return alloc_flags; -- Gitee From e36207a82005431a1edbfa49d0f53918c1d339a4 Mon Sep 17 00:00:00 2001 From: nan_tu Date: Tue, 16 Jul 2024 14:34:17 +0800 Subject: [PATCH 02/10] add cma memory area reuse Signed-off-by: nan_tu --- include/linux/gfp.h | 12 ++++++++++- include/linux/gfp_types.h | 8 ++++++-- include/linux/mmzone.h | 11 +++++++++- mm/Kconfig | 10 ++++++++++ mm/compaction.c | 2 +- mm/page_alloc.c | 42 ++++++++++++++++++++++++++++++++++++--- 6 files changed, 77 insertions(+), 8 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 665f06675c83..827a7b7e3d27 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,6 +15,8 @@ struct vm_area_struct; static inline int gfp_migratetype(const gfp_t gfp_flags) { + unsigned int ret_mt = 0; + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); @@ -26,7 +28,15 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + /*return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;*/ + ret_mt = (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + +#ifdef CONFIG_CMA_REUSE + if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) + return MIGRATE_CMA; +#endif + + return ret_mt; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 6583a58670c5..16de62fc891c 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -46,6 +46,7 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u +#define ___GFP_CMA 0x800000u #ifdef CONFIG_KASAN_HW_TAGS #define ___GFP_SKIP_ZERO 0x1000000u #define ___GFP_SKIP_KASAN 0x2000000u @@ -54,7 +55,8 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_SKIP_KASAN 0 #endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x4000000u +/*#define ___GFP_NOLOCKDEP 0x4000000u*/ +#define ___GFP_NOLOCKDEP 0x1000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -71,6 +73,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** @@ -249,7 +252,8 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +/*#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP))*/ +#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0f62786269d0..58be39d6b951 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,8 +47,11 @@ enum migratetype { MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ +#ifdef CONFIG_CMA_REUSE + MIGRATE_CMA, +#endif MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated @@ -78,6 +81,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif +#ifdef CONFIG_CMA_REUSE +# define get_cma_migratetype() MIGRATE_CMA +#else +# define get_cma_migratetype() MIGRATE_MOVABLE +#endif + static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; diff --git a/mm/Kconfig b/mm/Kconfig index 264a2df5ecf5..414d96ee780c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -941,6 +941,16 @@ config CMA_AREAS If unsure, leave the default value "7" in UMA and "19" in NUMA. +config CMA_REUSE + bool "CMA reuse feature" + depends on CMA + help + If enabled, it will add MIGRATE_CMA to pcp lists and movable + allocations with __GFP_CMA flag will use cma areas prior to + movable areas. + + It improves the utilization ratio of cma areas. + config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS diff --git a/mm/compaction.c b/mm/compaction.c index 38c8d216c6a3..080aaf0f80a7 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2235,7 +2235,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == MIGRATE_MOVABLE && + if (migratetype == get_cma_migratetype() && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index afed33fd8761..923f296f9f3c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -275,8 +275,11 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", +#ifdef CONFIG_CMA_REUSE + "CMA", +#endif "HighAtomic", -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION @@ -2078,6 +2081,27 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, } +static __always_inline struct page * +__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, + int migratetype, unsigned int alloc_flags) +{ + struct page *page = NULL; +retry: + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page) && is_migrate_cma(migratetype)) { + migratetype = MIGRATE_MOVABLE; + alloc_flags &= ~ALLOC_CMA; + page = __rmqueue_smallest(zone, order, migratetype); + } + + if (unlikely(!page) && + __rmqueue_fallback(zone, order, migratetype, alloc_flags)) + goto retry; + + return page; +} + /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -2088,6 +2112,11 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; +#ifdef CONFIG_CMA_REUSE + page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); + goto out; +#endif + if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by @@ -2112,6 +2141,10 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, alloc_flags)) goto retry; } + /*return page;*/ +out: + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype, migratetype == MIGRATE_MOVABLE); return page; } @@ -2773,7 +2806,8 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if (likely(pcp_allowed_order(order))) { + /*if (likely(pcp_allowed_order(order)))*/ + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||migratetype != MIGRATE_MOVABLE ||IS_ENABLED(CONFIG_CMA_REUSE)) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) @@ -3034,7 +3068,9 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags) { #ifdef CONFIG_CMA - if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + unsigned int pflags = current->flags; + /*if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)*/ + if (!(pflags & PF_MEMALLOC_PIN) && gfp_migratetype(gfp_mask) == get_cma_migratetype()) alloc_flags |= ALLOC_CMA; #endif return alloc_flags; -- Gitee From 7b7b6f42eb792eae1b42352194df4435ad74b138 Mon Sep 17 00:00:00 2001 From: nan_tu Date: Thu, 18 Jul 2024 10:43:47 +0800 Subject: [PATCH 03/10] add cma memory area reuse --- include/linux/gfp.h | 7 +++---- include/linux/gfp_types.h | 4 +--- include/linux/mmzone.h | 2 +- mm/page_alloc.c | 27 +++++++++++++-------------- 4 files changed, 18 insertions(+), 22 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 827a7b7e3d27..7050f139cf24 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,7 +15,7 @@ struct vm_area_struct; static inline int gfp_migratetype(const gfp_t gfp_flags) { - unsigned int ret_mt = 0; + unsigned int ret_mt = 0; VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); @@ -28,12 +28,11 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - /*return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;*/ - ret_mt = (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + ret_mt = (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; #ifdef CONFIG_CMA_REUSE if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) - return MIGRATE_CMA; + return MIGRATE_CMA; #endif return ret_mt; diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index d2e210f2d650..336f1b9bef6c 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -56,7 +56,6 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_CMA 0x4000000u #ifdef CONFIG_LOCKDEP #define ___GFP_NOLOCKDEP 0x8000000u -/*#define ___GFP_NOLOCKDEP 0x4000000u*/ #else #define ___GFP_NOLOCKDEP 0 #endif @@ -252,8 +251,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -/*#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP))*/ -#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 58be39d6b951..00868f1ec926 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -48,7 +48,7 @@ enum migratetype { MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ #ifdef CONFIG_CMA_REUSE - MIGRATE_CMA, + MIGRATE_CMA, #endif MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 923f296f9f3c..50ea5fb902f1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2085,21 +2085,21 @@ static __always_inline struct page * __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) { - struct page *page = NULL; + struct page *page = NULL; retry: page = __rmqueue_smallest(zone, order, migratetype); - if (unlikely(!page) && is_migrate_cma(migratetype)) { + if (unlikely(!page) && is_migrate_cma(migratetype)) { migratetype = MIGRATE_MOVABLE; - alloc_flags &= ~ALLOC_CMA; - page = __rmqueue_smallest(zone, order, migratetype); + alloc_flags &= ~ALLOC_CMA; + page = __rmqueue_smallest(zone, order, migratetype); } - if (unlikely(!page) && + if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, alloc_flags)) - goto retry; + goto retry; - return page; + return page; } /* @@ -2113,8 +2113,8 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, struct page *page; #ifdef CONFIG_CMA_REUSE - page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); - goto out; + page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); + goto out; #endif if (IS_ENABLED(CONFIG_CMA)) { @@ -2141,10 +2141,9 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, alloc_flags)) goto retry; } - /*return page;*/ out: if (page) - trace_mm_page_alloc_zone_locked(page, order, migratetype, migratetype == MIGRATE_MOVABLE); + trace_mm_page_alloc_zone_locked(page, order, migratetype, migratetype == MIGRATE_MOVABLE); return page; } @@ -2806,8 +2805,9 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - /*if (likely(pcp_allowed_order(order)))*/ - if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||migratetype != MIGRATE_MOVABLE ||IS_ENABLED(CONFIG_CMA_REUSE)) { + if ((!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || + (migratetype != M_MOVABLE) || + IS_ENABLED(CONFIG_CMA_REUSE)) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) @@ -3069,7 +3069,6 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, { #ifdef CONFIG_CMA unsigned int pflags = current->flags; - /*if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)*/ if (!(pflags & PF_MEMALLOC_PIN) && gfp_migratetype(gfp_mask) == get_cma_migratetype()) alloc_flags |= ALLOC_CMA; #endif -- Gitee From 240b7cdd23ac650d4a21e4eff8c92fed24af0221 Mon Sep 17 00:00:00 2001 From: nan_tu Date: Thu, 18 Jul 2024 11:12:49 +0800 Subject: [PATCH 04/10] add cma memory area reuse --- include/linux/mmzone.h | 2 +- mm/page_alloc.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 00868f1ec926..890b01ea6fea 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -62,7 +62,7 @@ enum migratetype { * pageblocks to MIGRATE_CMA which can be done by * __free_pageblock_cma() function. */ - MIGRATE_CMA, + MIGRATE_CMA, #endif #ifdef CONFIG_MEMORY_ISOLATION MIGRATE_ISOLATE, /* can't allocate from here */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 50ea5fb902f1..1a8dfc7789de 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2090,10 +2090,10 @@ __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page) && is_migrate_cma(migratetype)) { - migratetype = MIGRATE_MOVABLE; + migratetype = MIGRATE_MOVABLE; alloc_flags &= ~ALLOC_CMA; page = __rmqueue_smallest(zone, order, migratetype); - } + } if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, alloc_flags)) @@ -2805,9 +2805,9 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if ((!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || + if ( (!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || (migratetype != M_MOVABLE) || - IS_ENABLED(CONFIG_CMA_REUSE)) { + IS_ENABLED(CONFIG_C_REUSE) ) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) -- Gitee From 87a122e2ba0b4717bd0bfb11f996d7f787fdcdfc Mon Sep 17 00:00:00 2001 From: nan_tu Date: Thu, 18 Jul 2024 11:30:53 +0800 Subject: [PATCH 05/10] add cma memory area reuse --- mm/page_alloc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1a8dfc7789de..9de05859665c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2805,11 +2805,11 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if ( (!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || - (migratetype != M_MOVABLE) || - IS_ENABLED(CONFIG_C_REUSE) ) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); + if ( (!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || + (migratetype != M_MOVABLE) || + IS_ENABLED(CONFIG_C_REUSE) ) { + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); if (likely(page)) goto out; } -- Gitee From 82c270c6978e5640e80719bb34a47c6a9c8b33da Mon Sep 17 00:00:00 2001 From: nan_tu Date: Thu, 18 Jul 2024 11:34:35 +0800 Subject: [PATCH 06/10] add cma memory area reuse --- mm/page_alloc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9de05859665c..4407aca85d8c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2805,9 +2805,9 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if ( (!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || - (migratetype != M_MOVABLE) || - IS_ENABLED(CONFIG_C_REUSE) ) { + if ((!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || + (migratetype != M_MOVABLE) || + IS_ENABLED(CONFIG_C_REUSE)) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) -- Gitee From b05e11be2b2345cf336d6084724df308a18b43d4 Mon Sep 17 00:00:00 2001 From: nan_tu Date: Thu, 18 Jul 2024 13:57:47 +0800 Subject: [PATCH 07/10] add cma memory area reuse --- mm/page_alloc.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4407aca85d8c..b1d74734348b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2805,11 +2805,12 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if ((!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || - (migratetype != M_MOVABLE) || - IS_ENABLED(CONFIG_C_REUSE)) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); + if( (!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || + (migratetype != M_MOVABLE) || + IS_ENABLED(CONFIG_C_REUSE) ) { + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); + if (likely(page)) goto out; } -- Gitee From ac947d6f7d07200fe7b6d46392661238cb93816e Mon Sep 17 00:00:00 2001 From: nan_tu Date: Thu, 18 Jul 2024 14:45:04 +0800 Subject: [PATCH 08/10] add cma memory area reuse --- mm/page_alloc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b1d74734348b..7d6584744a7d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2808,9 +2808,8 @@ struct page *rmqueue(struct zone *preferred_zone, if( (!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || (migratetype != M_MOVABLE) || IS_ENABLED(CONFIG_C_REUSE) ) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); - + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); if (likely(page)) goto out; } -- Gitee From cefb9121f2e4406cc33c1b72a0eaf56964fd161c Mon Sep 17 00:00:00 2001 From: nan_tu Date: Thu, 18 Jul 2024 15:38:55 +0800 Subject: [PATCH 09/10] add cma memory area reuse --- mm/page_alloc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7d6584744a7d..f74e575f1a12 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2805,11 +2805,11 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if( (!IS_ENABLED(CONFIG_CMA) || (alloc_flags & ALLOC_CMA)) || - (migratetype != M_MOVABLE) || - IS_ENABLED(CONFIG_C_REUSE) ) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || + migratetype != MIGRATE_MOVABLE || + IS_ENABLED(CONFIG_CMA_REUSE)) { + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); if (likely(page)) goto out; } -- Gitee From fe34939d1dd15461f4378dcb0529dd6fac162427 Mon Sep 17 00:00:00 2001 From: nan_tu Date: Fri, 19 Jul 2024 09:42:06 +0800 Subject: [PATCH 10/10] add cma memory area reuse --- include/linux/gfp_types.h | 2 +- include/linux/highmem.h | 2 +- mm/page_alloc.c | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 336f1b9bef6c..4bc68a46b97b 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -251,7 +251,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 75607d4ba26c..ccef475c0d44 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -226,7 +226,7 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, { struct folio *folio; - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_CMA, 0, vma, vaddr, false); if (folio) clear_user_highpage(&folio->page, vaddr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f74e575f1a12..d5f881f72308 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2805,9 +2805,7 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || - migratetype != MIGRATE_MOVABLE || - IS_ENABLED(CONFIG_CMA_REUSE)) { + if (likely(pcp_allowed_order(order))) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) -- Gitee