From e60d6d49f47172431c857db8e454555ff4523664 Mon Sep 17 00:00:00 2001 From: Joe Damato Date: Tue, 1 Mar 2022 23:55:47 -0800 Subject: [PATCH 1/8] page_pool: Add allocation stats mainline inclusion from mainline-v5.18-rc1 commit 8610037e8106b48c79cfe0afb92b2b2466e51c3d category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8610037e8106b48c79cfe0afb92b2b2466e51c3d ---------------------------------------------------------------------- Add per-pool statistics counters for the allocation path of a page pool. These stats are incremented in softirq context, so no locking or per-cpu variables are needed. This code is disabled by default and a kernel config option is provided for users who wish to enable them. The statistics added are: - fast: successful fast path allocations - slow: slow path order-0 allocations - slow_high_order: slow path high order allocations - empty: ptr ring is empty, so a slow path allocation was forced. - refill: an allocation which triggered a refill of the cache - waive: pages obtained from the ptr ring that cannot be added to the cache due to a NUMA mismatch. Signed-off-by: Joe Damato Acked-by: Jesper Dangaard Brouer Reviewed-by: Ilias Apalodimas Signed-off-by: David S. Miller --- include/net/page_pool.h | 18 ++++++++++++++++++ net/Kconfig | 13 +++++++++++++ net/core/page_pool.c | 24 ++++++++++++++++++++---- 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/include/net/page_pool.h b/include/net/page_pool.h index b9ecabff2032..c5a9b0b5d417 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -83,6 +83,19 @@ struct page_pool_params { unsigned int offset; /* DMA addr offset */ }; +#ifdef CONFIG_PAGE_POOL_STATS +struct page_pool_alloc_stats { + u64 fast; /* fast path allocations */ + u64 slow; /* slow-path order 0 allocations */ + u64 slow_high_order; /* slow-path high order allocations */ + u64 empty; /* failed refills due to empty ptr ring, forcing + * slow path allocation + */ + u64 refill; /* allocations via successful refill */ + u64 waive; /* failed refills due to numa zone mismatch */ +}; +#endif + struct page_pool { struct page_pool_params p; @@ -96,6 +109,11 @@ struct page_pool { struct page *frag_page; long frag_users; +#ifdef CONFIG_PAGE_POOL_STATS + /* these stats are incremented while in softirq context */ + struct page_pool_alloc_stats alloc_stats; +#endif + /* * Data structure for allocation side * diff --git a/net/Kconfig b/net/Kconfig index a22c3fb88564..232075ae15e2 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -436,6 +436,19 @@ config NET_DEVLINK config PAGE_POOL bool +config PAGE_POOL_STATS + default n + bool "Page pool stats" + depends on PAGE_POOL + help + Enable page pool statistics to track page allocation and recycling + in page pools. This option incurs additional CPU cost in allocation + and recycle paths and additional memory cost to store the statistics. + These statistics are only available if this option is enabled and if + the driver using the page pool supports exporting this data. + + If unsure, say N. + config FAILOVER tristate "Generic failover module" help diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 9b60e4301a44..20f3dff81428 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -26,6 +26,13 @@ #define BIAS_MAX LONG_MAX +#ifdef CONFIG_PAGE_POOL_STATS +/* alloc_stat_inc is intended to be used in softirq context */ +#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) +#else +#define alloc_stat_inc(pool, __stat) +#endif + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -119,8 +126,10 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) int pref_nid; /* preferred NUMA node */ /* Quicker fallback, avoid locks when ring is empty */ - if (__ptr_ring_empty(r)) + if (__ptr_ring_empty(r)) { + alloc_stat_inc(pool, empty); return NULL; + } /* Softirq guarantee CPU and thus NUMA node is stable. This, * assumes CPU refilling driver RX-ring will also run RX-NAPI. @@ -150,14 +159,17 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) * This limit stress on page buddy alloactor. */ page_pool_return_page(pool, page); + alloc_stat_inc(pool, waive); page = NULL; break; } } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); /* Return last page */ - if (likely(pool->alloc.count > 0)) + if (likely(pool->alloc.count > 0)) { page = pool->alloc.cache[--pool->alloc.count]; + alloc_stat_inc(pool, refill); + } spin_unlock(&r->consumer_lock); return page; @@ -172,6 +184,7 @@ static struct page *__page_pool_get_cached(struct page_pool *pool) if (likely(pool->alloc.count)) { /* Fast-path */ page = pool->alloc.cache[--pool->alloc.count]; + alloc_stat_inc(pool, fast); } else { page = page_pool_refill_alloc_cache(pool); } @@ -243,6 +256,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool, return NULL; } + alloc_stat_inc(pool, slow_high_order); page_pool_set_pp_info(pool, page); /* Track how many pages are held 'in-flight' */ @@ -297,10 +311,12 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, } /* Return last page */ - if (likely(pool->alloc.count > 0)) + if (likely(pool->alloc.count > 0)) { page = pool->alloc.cache[--pool->alloc.count]; - else + alloc_stat_inc(pool, slow); + } else { page = NULL; + } /* When page just alloc'ed is should/must have refcnt 1. */ return page; -- Gitee From 3b1d33f8491a369b041a10ebbbb6f86249ce540b Mon Sep 17 00:00:00 2001 From: Joe Damato Date: Tue, 1 Mar 2022 23:55:48 -0800 Subject: [PATCH 2/8] page_pool: Add recycle stats mainline inclusion from mainline-v5.18-rc1 commit ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad ---------------------------------------------------------------------- Add per-cpu stats tracking page pool recycling events: - cached: recycling placed page in the page pool cache - cache_full: page pool cache was full - ring: page placed into the ptr ring - ring_full: page released from page pool because the ptr ring was full - released_refcnt: page released (and not recycled) because refcnt > 1 Signed-off-by: Joe Damato Acked-by: Jesper Dangaard Brouer Reviewed-by: Ilias Apalodimas Signed-off-by: David S. Miller --- include/net/page_pool.h | 16 ++++++++++++++++ net/core/page_pool.c | 30 ++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/include/net/page_pool.h b/include/net/page_pool.h index c5a9b0b5d417..a35bb97d8da7 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -94,6 +94,18 @@ struct page_pool_alloc_stats { u64 refill; /* allocations via successful refill */ u64 waive; /* failed refills due to numa zone mismatch */ }; + +struct page_pool_recycle_stats { + u64 cached; /* recycling placed page in the cache. */ + u64 cache_full; /* cache was full */ + u64 ring; /* recycling placed page back into ptr ring */ + u64 ring_full; /* page was released from page-pool because + * PTR ring was full. + */ + u64 released_refcnt; /* page released because of elevated + * refcnt + */ +}; #endif struct page_pool { @@ -142,6 +154,10 @@ struct page_pool { */ struct ptr_ring ring; +#ifdef CONFIG_PAGE_POOL_STATS + /* recycle stats are per-cpu to avoid locking */ + struct page_pool_recycle_stats __percpu *recycle_stats; +#endif atomic_t pages_state_release_cnt; /* A page_pool is strictly tied to a single RX-queue being diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 20f3dff81428..abb6e16d46cf 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -29,8 +29,15 @@ #ifdef CONFIG_PAGE_POOL_STATS /* alloc_stat_inc is intended to be used in softirq context */ #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) +/* recycle_stat_inc is safe to use when preemption is possible. */ +#define recycle_stat_inc(pool, __stat) \ + do { \ + struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ + this_cpu_inc(s->__stat); \ + } while (0) #else #define alloc_stat_inc(pool, __stat) +#define recycle_stat_inc(pool, __stat) #endif static int page_pool_init(struct page_pool *pool, @@ -82,6 +89,12 @@ static int page_pool_init(struct page_pool *pool, */ } +#ifdef CONFIG_PAGE_POOL_STATS + pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); + if (!pool->recycle_stats) + return -ENOMEM; +#endif + if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) return -ENOMEM; @@ -414,7 +427,12 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) else ret = ptr_ring_produce_bh(&pool->ring, page); - return (ret == 0) ? true : false; + if (!ret) { + recycle_stat_inc(pool, ring); + return true; + } + + return false; } /* Only allow direct recycling in special circumstances, into the @@ -425,11 +443,14 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) static bool page_pool_recycle_in_cache(struct page *page, struct page_pool *pool) { - if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) + if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { + recycle_stat_inc(pool, cache_full); return false; + } /* Caller MUST have verified/know (page_ref_count(page) == 1) */ pool->alloc.cache[pool->alloc.count++] = page; + recycle_stat_inc(pool, cached); return true; } @@ -484,6 +505,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, * doing refcnt based recycle tricks, meaning another process * will be invoking put_page. */ + recycle_stat_inc(pool, released_refcnt); /* Do not replace this with page_pool_return_page() */ page_pool_release_page(pool, page); put_page(page); @@ -497,6 +519,7 @@ void page_pool_put_page(struct page_pool *pool, struct page *page, page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); if (page && !page_pool_recycle_in_ring(pool, page)) { /* Cache full, fallback to free pages */ + recycle_stat_inc(pool, ring_full); page_pool_return_page(pool, page); } } @@ -643,6 +666,9 @@ static void page_pool_free(struct page_pool *pool) if (pool->p.flags & PP_FLAG_DMA_MAP) put_device(pool->p.dev); +#ifdef CONFIG_PAGE_POOL_STATS + free_percpu(pool->recycle_stats); +#endif kfree(pool); } -- Gitee From 5b7026e68c0f59b29093f153a7e281ece0deee36 Mon Sep 17 00:00:00 2001 From: Joe Damato Date: Tue, 1 Mar 2022 23:55:49 -0800 Subject: [PATCH 3/8] page_pool: Add function to batch and return stats mainline inclusion from mainline-v5.18-rc1 commit 6b95e3388b1ea0ca63500c5a6e39162dbf828433 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=6b95e3388b1ea0ca63500c5a6e39162dbf828433 ---------------------------------------------------------------------- Adds a function page_pool_get_stats which can be used by drivers to obtain stats for a specified page_pool. Signed-off-by: Joe Damato Acked-by: Jesper Dangaard Brouer Reviewed-by: Ilias Apalodimas Signed-off-by: David S. Miller --- include/net/page_pool.h | 17 +++++++++++++++++ net/core/page_pool.c | 25 +++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/include/net/page_pool.h b/include/net/page_pool.h index a35bb97d8da7..7b5fd51030d5 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -106,6 +106,23 @@ struct page_pool_recycle_stats { * refcnt */ }; + +/* This struct wraps the above stats structs so users of the + * page_pool_get_stats API can pass a single argument when requesting the + * stats for the page pool. + */ +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; + +/* + * Drivers that wish to harvest page pool stats and report them to users + * (perhaps via ethtool, debugfs, or another mechanism) can allocate a + * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool. + */ +bool page_pool_get_stats(struct page_pool *pool, + struct page_pool_stats *stats); #endif struct page_pool { diff --git a/net/core/page_pool.c b/net/core/page_pool.c index abb6e16d46cf..0cfc03c6441d 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -35,6 +35,31 @@ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ this_cpu_inc(s->__stat); \ } while (0) + +bool page_pool_get_stats(struct page_pool *pool, + struct page_pool_stats *stats) +{ + int cpu = 0; + + if (!stats) + return false; + + memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats)); + + for_each_possible_cpu(cpu) { + const struct page_pool_recycle_stats *pcpu = + per_cpu_ptr(pool->recycle_stats, cpu); + + stats->recycle_stats.cached += pcpu->cached; + stats->recycle_stats.cache_full += pcpu->cache_full; + stats->recycle_stats.ring += pcpu->ring; + stats->recycle_stats.ring_full += pcpu->ring_full; + stats->recycle_stats.released_refcnt += pcpu->released_refcnt; + } + + return true; +} +EXPORT_SYMBOL(page_pool_get_stats); #else #define alloc_stat_inc(pool, __stat) #define recycle_stat_inc(pool, __stat) -- Gitee From a5b48ff9f9420e4beedf262b4f525fde20b8b997 Mon Sep 17 00:00:00 2001 From: Joe Damato Date: Tue, 1 Mar 2022 23:55:50 -0800 Subject: [PATCH 4/8] Documentation: update networking/page_pool.rst mainline inclusion from mainline-v5.18-rc1 commit a3dd98281b9f265c7b89cb0c7a91739bff2e6506 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=a3dd98281b9f265c7b89cb0c7a91739bff2e6506 ---------------------------------------------------------------------- Add the new stats API, kernel config parameter, and stats structure information to the page_pool documentation. Signed-off-by: Joe Damato Signed-off-by: David S. Miller --- Documentation/networking/page_pool.rst | 56 ++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst index 43088ddf95e4..14cb1b727f95 100644 --- a/Documentation/networking/page_pool.rst +++ b/Documentation/networking/page_pool.rst @@ -97,6 +97,47 @@ a page will cause no race conditions is enough. * page_pool_get_dma_dir(): Retrieve the stored DMA direction. +* page_pool_get_stats(): Retrieve statistics about the page_pool. This API + is only available if the kernel has been configured with + ``CONFIG_PAGE_POOL_STATS=y``. A pointer to a caller allocated ``struct + page_pool_stats`` structure is passed to this API which is filled in. The + caller can then report those stats to the user (perhaps via ethtool, + debugfs, etc.). See below for an example usage of this API. + +Stats API and structures +------------------------ +If the kernel is configured with ``CONFIG_PAGE_POOL_STATS=y``, the API +``page_pool_get_stats()`` and structures described below are available. It +takes a pointer to a ``struct page_pool`` and a pointer to a ``struct +page_pool_stats`` allocated by the caller. + +The API will fill in the provided ``struct page_pool_stats`` with +statistics about the page_pool. + +The stats structure has the following fields:: + + struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; + }; + + +The ``struct page_pool_alloc_stats`` has the following fields: + * ``fast``: successful fast path allocations + * ``slow``: slow path order-0 allocations + * ``slow_high_order``: slow path high order allocations + * ``empty``: ptr ring is empty, so a slow path allocation was forced. + * ``refill``: an allocation which triggered a refill of the cache + * ``waive``: pages obtained from the ptr ring that cannot be added to + the cache due to a NUMA mismatch. + +The ``struct page_pool_recycle_stats`` has the following fields: + * ``cached``: recycling placed page in the page pool cache + * ``cache_full``: page pool cache was full + * ``ring``: page placed into the ptr ring + * ``ring_full``: page released from page pool because the ptr ring was full + * ``released_refcnt``: page released (and not recycled) because refcnt > 1 + Coding examples =============== @@ -149,6 +190,21 @@ NAPI poller } } +Stats +----- + +.. code-block:: c + + #ifdef CONFIG_PAGE_POOL_STATS + /* retrieve stats */ + struct page_pool_stats stats = { 0 }; + if (page_pool_get_stats(page_pool, &stats)) { + /* perhaps the driver reports statistics with ethool */ + ethtool_print_allocation_stats(&stats.alloc_stats); + ethtool_print_recycle_stats(&stats.recycle_stats); + } + #endif + Driver unload ------------- -- Gitee From d04f2a85beb60fca476f7dd683753bebff1a687c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 11 Apr 2022 16:05:26 +0200 Subject: [PATCH 5/8] page_pool: Add recycle stats to page_pool_put_page_bulk mainline inclusion from mainline-v5.19-rc1 commit 590032a4d2133ecc10d3078a8db1d85a4842f12c category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=590032a4d2133ecc10d3078a8db1d85a4842f12c ---------------------------------------------------------------------- Add missing recycle stats to page_pool_put_page_bulk routine. Reviewed-by: Joe Damato Signed-off-by: Lorenzo Bianconi Reviewed-by: Ilias Apalodimas Link: https://lore.kernel.org/r/3712178b51c007cfaed910ea80e68f00c916b1fa.1649685634.git.lorenzo@kernel.org Signed-off-by: Paolo Abeni --- net/core/page_pool.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 0cfc03c6441d..fd25636d6e33 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -36,6 +36,12 @@ this_cpu_inc(s->__stat); \ } while (0) +#define recycle_stat_add(pool, __stat, val) \ + do { \ + struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ + this_cpu_add(s->__stat, val); \ + } while (0) + bool page_pool_get_stats(struct page_pool *pool, struct page_pool_stats *stats) { @@ -63,6 +69,7 @@ EXPORT_SYMBOL(page_pool_get_stats); #else #define alloc_stat_inc(pool, __stat) #define recycle_stat_inc(pool, __stat) +#define recycle_stat_add(pool, __stat, val) #endif static int page_pool_init(struct page_pool *pool, @@ -571,9 +578,13 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, /* Bulk producer into ptr_ring page_pool cache */ page_pool_ring_lock(pool); for (i = 0; i < bulk_len; i++) { - if (__ptr_ring_produce(&pool->ring, data[i])) - break; /* ring full */ + if (__ptr_ring_produce(&pool->ring, data[i])) { + /* ring full */ + recycle_stat_inc(pool, ring_full); + break; + } } + recycle_stat_add(pool, ring, i); page_pool_ring_unlock(pool); /* Hopefully all pages was return into ptr_ring */ -- Gitee From 6eb24bf53c8f54c242d18220ff1f69e1b60fcfb4 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 12 Apr 2022 18:31:58 +0200 Subject: [PATCH 6/8] net: page_pool: introduce ethtool stats mainline inclusion from mainline-v5.19-rc1 commit f3c5264f452a5b0ac1de1f2f657efbabdea3c76a category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f3c5264f452a5b0ac1de1f2f657efbabdea3c76a ---------------------------------------------------------------------- Introduce page_pool APIs to report stats through ethtool and reduce duplicated code in each driver. Signed-off-by: Lorenzo Bianconi Reviewed-by: Jakub Kicinski Reviewed-by: Ilias Apalodimas Signed-off-by: David S. Miller --- include/net/page_pool.h | 21 ++++++++++++++ net/core/page_pool.c | 63 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 7b5fd51030d5..060c47974c45 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -116,6 +116,10 @@ struct page_pool_stats { struct page_pool_recycle_stats recycle_stats; }; +int page_pool_ethtool_stats_get_count(void); +u8 *page_pool_ethtool_stats_get_strings(u8 *data); +u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); + /* * Drivers that wish to harvest page pool stats and report them to users * (perhaps via ethtool, debugfs, or another mechanism) can allocate a @@ -123,6 +127,23 @@ struct page_pool_stats { */ bool page_pool_get_stats(struct page_pool *pool, struct page_pool_stats *stats); +#else + +static inline int page_pool_ethtool_stats_get_count(void) +{ + return 0; +} + +static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) +{ + return data; +} + +static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) +{ + return data; +} + #endif struct page_pool { diff --git a/net/core/page_pool.c b/net/core/page_pool.c index fd25636d6e33..51f852eed0de 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -18,6 +18,7 @@ #include #include /* for __put_page() */ #include +#include #include @@ -42,6 +43,20 @@ this_cpu_add(s->__stat, val); \ } while (0) +static const char pp_stats[][ETH_GSTRING_LEN] = { + "rx_pp_alloc_fast", + "rx_pp_alloc_slow", + "rx_pp_alloc_slow_ho", + "rx_pp_alloc_empty", + "rx_pp_alloc_refill", + "rx_pp_alloc_waive", + "rx_pp_recycle_cached", + "rx_pp_recycle_cache_full", + "rx_pp_recycle_ring", + "rx_pp_recycle_ring_full", + "rx_pp_recycle_released_ref", +}; + bool page_pool_get_stats(struct page_pool *pool, struct page_pool_stats *stats) { @@ -50,7 +65,13 @@ bool page_pool_get_stats(struct page_pool *pool, if (!stats) return false; - memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats)); + /* The caller is responsible to initialize stats. */ + stats->alloc_stats.fast += pool->alloc_stats.fast; + stats->alloc_stats.slow += pool->alloc_stats.slow; + stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; + stats->alloc_stats.empty += pool->alloc_stats.empty; + stats->alloc_stats.refill += pool->alloc_stats.refill; + stats->alloc_stats.waive += pool->alloc_stats.waive; for_each_possible_cpu(cpu) { const struct page_pool_recycle_stats *pcpu = @@ -66,6 +87,46 @@ bool page_pool_get_stats(struct page_pool *pool, return true; } EXPORT_SYMBOL(page_pool_get_stats); + +u8 *page_pool_ethtool_stats_get_strings(u8 *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { + memcpy(data, pp_stats[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + return data; +} +EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings); + +int page_pool_ethtool_stats_get_count(void) +{ + return ARRAY_SIZE(pp_stats); +} +EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); + +u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) +{ + struct page_pool_stats *pool_stats = stats; + + *data++ = pool_stats->alloc_stats.fast; + *data++ = pool_stats->alloc_stats.slow; + *data++ = pool_stats->alloc_stats.slow_high_order; + *data++ = pool_stats->alloc_stats.empty; + *data++ = pool_stats->alloc_stats.refill; + *data++ = pool_stats->alloc_stats.waive; + *data++ = pool_stats->recycle_stats.cached; + *data++ = pool_stats->recycle_stats.cache_full; + *data++ = pool_stats->recycle_stats.ring; + *data++ = pool_stats->recycle_stats.ring_full; + *data++ = pool_stats->recycle_stats.released_refcnt; + + return data; +} +EXPORT_SYMBOL(page_pool_ethtool_stats_get); + #else #define alloc_stat_inc(pool, __stat) #define recycle_stat_inc(pool, __stat) -- Gitee From 243babb8ef34ccc8874216bbe650e41522ac1726 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Thu, 12 May 2022 14:56:31 +0800 Subject: [PATCH 7/8] net: page_pool: add page allocation stats for two fast page allocate path mainline inclusion from mainline-v5.19-rc1 commit 0f6deac3a07958195173119627502350925dce78 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0f6deac3a07958195173119627502350925dce78 ---------------------------------------------------------------------- Currently If use page pool allocation stats to analysis a RX performance degradation problem. These stats only count for pages allocate from page_pool_alloc_pages. But nic drivers such as hns3 use page_pool_dev_alloc_frag to allocate pages, so page stats in this API should also be counted. Signed-off-by: Jie Wang Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- net/core/page_pool.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 51f852eed0de..b0ebdbbd361f 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -711,8 +711,10 @@ struct page *page_pool_alloc_frag(struct page_pool *pool, if (page && *offset + size > max_size) { page = page_pool_drain_frag(pool, page); - if (page) + if (page) { + alloc_stat_inc(pool, fast); goto frag_reset; + } } if (!page) { @@ -734,6 +736,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool, pool->frag_users++; pool->frag_offset = *offset + size; + alloc_stat_inc(pool, fast); return page; } EXPORT_SYMBOL(page_pool_alloc_frag); -- Gitee From ced5cc6a4197eb91bc7b3b1eceaeaad01a312910 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 27 Oct 2023 20:37:42 +0800 Subject: [PATCH 8/8] net: page_pool: fix kabi issue for page poos statistics driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE CVE: NA ---------------------------------------------------------------------- Currently the page pool statistisc member is place at middle part of struct page_pool, which may break the kabi. To fix it, replaced the KABI_RESERVE(1) field with a pointer refer to the page pool statistics. Signed-off-by: Jian Shen --- include/net/page_pool.h | 23 +++++++++++-------- net/core/page_pool.c | 50 ++++++++++++++++++++++++++--------------- 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 060c47974c45..15298098302e 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -116,6 +116,16 @@ struct page_pool_stats { struct page_pool_recycle_stats recycle_stats; }; +/* To solve the KABI issue, introduce the new statistics structure + * to store the member alloc_stats and recycle_stats. + */ +struct page_pool_raw_stats { + /* these stats are incremented while in softirq context */ + struct page_pool_alloc_stats alloc_stats; + /* recycle stats are per-cpu to avoid locking */ + struct page_pool_recycle_stats __percpu *recycle_stats; +}; + int page_pool_ethtool_stats_get_count(void); u8 *page_pool_ethtool_stats_get_strings(u8 *data); u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); @@ -159,11 +169,6 @@ struct page_pool { struct page *frag_page; long frag_users; -#ifdef CONFIG_PAGE_POOL_STATS - /* these stats are incremented while in softirq context */ - struct page_pool_alloc_stats alloc_stats; -#endif - /* * Data structure for allocation side * @@ -192,10 +197,6 @@ struct page_pool { */ struct ptr_ring ring; -#ifdef CONFIG_PAGE_POOL_STATS - /* recycle stats are per-cpu to avoid locking */ - struct page_pool_recycle_stats __percpu *recycle_stats; -#endif atomic_t pages_state_release_cnt; /* A page_pool is strictly tied to a single RX-queue being @@ -206,7 +207,11 @@ struct page_pool { u64 destroy_cnt; +#ifdef CONFIG_PAGE_POOL_STATS + KABI_USE(1, struct page_pool_raw_stats *stats) +#else KABI_RESERVE(1) +#endif }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index b0ebdbbd361f..3cb689989475 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -18,7 +18,6 @@ #include #include /* for __put_page() */ #include -#include #include @@ -29,21 +28,25 @@ #ifdef CONFIG_PAGE_POOL_STATS /* alloc_stat_inc is intended to be used in softirq context */ -#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) +#define alloc_stat_inc(pool, __stat) (pool->stats->alloc_stats.__stat++) /* recycle_stat_inc is safe to use when preemption is possible. */ #define recycle_stat_inc(pool, __stat) \ do { \ - struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ + struct page_pool_recycle_stats __percpu *s = pool->stats->recycle_stats;\ this_cpu_inc(s->__stat); \ } while (0) #define recycle_stat_add(pool, __stat, val) \ do { \ - struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ + struct page_pool_recycle_stats __percpu *s = pool->stats->recycle_stats;\ this_cpu_add(s->__stat, val); \ } while (0) -static const char pp_stats[][ETH_GSTRING_LEN] = { +/* workaround for macro ETH_GSTRING_LEN, for include the header file ethtool.h + * will cause KABI issue, so define a new one to replace it. + */ +#define PP_ETH_GSTRING_LEN 32 +static const char pp_stats[][PP_ETH_GSTRING_LEN] = { "rx_pp_alloc_fast", "rx_pp_alloc_slow", "rx_pp_alloc_slow_ho", @@ -66,16 +69,16 @@ bool page_pool_get_stats(struct page_pool *pool, return false; /* The caller is responsible to initialize stats. */ - stats->alloc_stats.fast += pool->alloc_stats.fast; - stats->alloc_stats.slow += pool->alloc_stats.slow; - stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; - stats->alloc_stats.empty += pool->alloc_stats.empty; - stats->alloc_stats.refill += pool->alloc_stats.refill; - stats->alloc_stats.waive += pool->alloc_stats.waive; + stats->alloc_stats.fast += pool->stats->alloc_stats.fast; + stats->alloc_stats.slow += pool->stats->alloc_stats.slow; + stats->alloc_stats.slow_high_order += pool->stats->alloc_stats.slow_high_order; + stats->alloc_stats.empty += pool->stats->alloc_stats.empty; + stats->alloc_stats.refill += pool->stats->alloc_stats.refill; + stats->alloc_stats.waive += pool->stats->alloc_stats.waive; for_each_possible_cpu(cpu) { const struct page_pool_recycle_stats *pcpu = - per_cpu_ptr(pool->recycle_stats, cpu); + per_cpu_ptr(pool->stats->recycle_stats, cpu); stats->recycle_stats.cached += pcpu->cached; stats->recycle_stats.cache_full += pcpu->cache_full; @@ -93,8 +96,8 @@ u8 *page_pool_ethtool_stats_get_strings(u8 *data) int i; for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { - memcpy(data, pp_stats[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + memcpy(data, pp_stats[i], PP_ETH_GSTRING_LEN); + data += PP_ETH_GSTRING_LEN; } return data; @@ -183,13 +186,16 @@ static int page_pool_init(struct page_pool *pool, } #ifdef CONFIG_PAGE_POOL_STATS - pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); - if (!pool->recycle_stats) + pool->stats = kzalloc_node(sizeof(*pool->stats), GFP_KERNEL, params->nid); + if (!pool->stats) return -ENOMEM; + pool->stats->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); + if (!pool->stats->recycle_stats) + goto out; #endif if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) - return -ENOMEM; + goto out; atomic_set(&pool->pages_state_release_cnt, 0); @@ -200,6 +206,13 @@ static int page_pool_init(struct page_pool *pool, get_device(pool->p.dev); return 0; +out: +#ifdef CONFIG_PAGE_POOL_STATS + free_percpu(pool->stats->recycle_stats); + kfree(pool->stats); + pool->stats = NULL; +#endif + return -ENOMEM; } struct page_pool *page_pool_create(const struct page_pool_params *params) @@ -767,7 +780,8 @@ static void page_pool_free(struct page_pool *pool) put_device(pool->p.dev); #ifdef CONFIG_PAGE_POOL_STATS - free_percpu(pool->recycle_stats); + free_percpu(pool->stats->recycle_stats); + kfree(pool->stats); #endif kfree(pool); } -- Gitee