From b9227e4c53d5ef0e46eda6e19d8612937931b11b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 25 Apr 2025 17:42:33 +0800 Subject: [PATCH 1/3] anolis: Revert "anolis: x86/mm: CSV allows CMA allocation concurrently" ANBZ: #23276 The commit dbfcef4d5a1e ("anolis: x86/mm: CSV allows CMA allocation concurrently") has its corresponding upstream version. We'll backport the upstream version to this repo. Fixes: dbfcef4d5a1e ("anolis: x86/mm: CSV allows CMA allocation concurrently") Signed-off-by: hanliyang --- arch/x86/mm/csv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c index 7b735ea58622..7615d5b9ed08 100644 --- a/arch/x86/mm/csv.c +++ b/arch/x86/mm/csv.c @@ -178,7 +178,6 @@ void __init csv_cma_reserve_mem(void) 1 << CSV_CMA_SHIFT, node); break; } - cma_enable_concurrency(csv_cma->cma); if (start > cma_get_base(csv_cma->cma) || !start) start = cma_get_base(csv_cma->cma); -- Gitee From d6003e269c3a548d6349b99b978350084b1cf77c Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 25 Apr 2025 17:49:13 +0800 Subject: [PATCH 2/3] anolis: Revert "anolis: mm/cma: add API to enable concurrent allocation from the CMA" ANBZ: #23276 The commit 9fecbc883055 ("anolis: mm/cma: add API to enable concurrent allocation from the CMA") has its corresponding upstream version. We'll backport the upstream version to this repo. Fixes: 9fecbc883055 ("anolis: mm/cma: add API to enable concurrent allocation from the CMA") Signed-off-by: hanliyang --- include/linux/cma.h | 1 - mm/cma.c | 14 ++------------ mm/cma.h | 1 - 3 files changed, 2 insertions(+), 14 deletions(-) diff --git a/include/linux/cma.h b/include/linux/cma.h index 5a8a8ea0f70c..2af4ade5e0ce 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -50,5 +50,4 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern int __init cma_alloc_areas(unsigned int max_cma_size); -extern void cma_enable_concurrency(struct cma *cma); #endif diff --git a/mm/cma.c b/mm/cma.c index 4956bf727ab8..36ea6e059f92 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -476,12 +476,10 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - if (!cma->no_mutex) - mutex_lock(&cma_mutex); + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - if (!cma->no_mutex) - mutex_unlock(&cma_mutex); + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; @@ -565,11 +563,3 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } - -void cma_enable_concurrency(struct cma *cma) -{ - if (!cma) - return; - - cma->no_mutex = true; -} diff --git a/mm/cma.h b/mm/cma.h index 2b1a52d97164..1ad9afe5c5e8 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -10,7 +10,6 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; - bool no_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; -- Gitee From 56c2b5b120196affc41ec3e914acad721dedf00d Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Mon, 10 Feb 2025 09:56:06 +0800 Subject: [PATCH 3/3] mm/cma: using per-CMA locks to improve concurrent allocation performance ANBZ: #23276 commit 24ac6fb6e3647fff3646b3ea1811095441380560 upstream. For different CMAs, concurrent allocation of CMA memory ideally should not require synchronization using locks. Currently, a global cma_mutex lock is employed to synchronize all CMA allocations, which can impact the performance of concurrent allocations across different CMAs. To test the performance impact, follow these steps: 1. Boot the kernel with the command line argument hugetlb_cma=30G to allocate a 30GB CMA area specifically for huge page allocations. (note: on my machine, which has 3 nodes, each node is initialized with 10G of CMA) 2. Use the dd command with parameters if=/dev/zero of=/dev/shm/file bs=1G count=30 to fully utilize the CMA area by writing zeroes to a file in /dev/shm. 3. Open three terminals and execute the following commands simultaneously: (Note: Each of these commands attempts to allocate 10GB [2621440 * 4KB pages] of CMA memory.) On Terminal 1: time echo 2621440 > /sys/kernel/debug/cma/hugetlb1/alloc On Terminal 2: time echo 2621440 > /sys/kernel/debug/cma/hugetlb2/alloc On Terminal 3: time echo 2621440 > /sys/kernel/debug/cma/hugetlb3/alloc We attempt to allocate pages through the CMA debug interface and use the time command to measure the duration of each allocation. Performance comparison: Without this patch With this patch Terminal1 ~7s ~7s Terminal2 ~14s ~8s Terminal3 ~21s ~7s To solve problem above, we could use per-CMA locks to improve concurrent allocation performance. This would allow each CMA to be managed independently, reducing the need for a global lock and thus improving scalability and performance. Link: https://lkml.kernel.org/r/1739152566-744-1-git-send-email-yangge1116@126.com Signed-off-by: Ge Yang Reviewed-by: Barry Song Acked-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Aisheng Dong Cc: Baolin Wang Signed-off-by: Andrew Morton --- mm/cma.c | 7 ++++--- mm/cma.h | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 36ea6e059f92..a5b4db2b69aa 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -40,7 +40,6 @@ static unsigned int cma_areas_size = MAX_CMA_AREAS; struct cma *cma_areas = cma_areas_data; unsigned cma_area_count; -static DEFINE_MUTEX(cma_mutex); phys_addr_t cma_get_base(const struct cma *cma) { @@ -129,6 +128,8 @@ static void __init cma_activate_area(struct cma *cma) spin_lock_init(&cma->lock); + mutex_init(&cma->alloc_mutex); + #ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); @@ -476,10 +477,10 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + mutex_lock(&cma->alloc_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - mutex_unlock(&cma_mutex); + mutex_unlock(&cma->alloc_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; diff --git a/mm/cma.h b/mm/cma.h index 1ad9afe5c5e8..0e0f8daa4792 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -10,6 +10,7 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + struct mutex alloc_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; -- Gitee