From 5cc5fbbb1f8d33a4243cfbd3d02ad67806d1259d Mon Sep 17 00:00:00 2001 From: Toke Hoiland-Jorgensen Date: Wed, 3 Sep 2025 03:47:22 +0000 Subject: [PATCH 1/7] bpf: generalise tail call map compatibility check mainline inclusion from mainline-v5.18-rc1 commit f45d5b6ce2e835834c94b8b700787984f02cd662 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICTB0G CVE: CVE-2025-38502 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f45d5b6ce2e835834c94b8b700787984f02cd662 -------------------------------- The check for tail call map compatibility ensures that tail calls only happen between maps of the same type. To ensure backwards compatibility for XDP frags we need a similar type of check for cpumap and devmap programs, so move the state from bpf_array_aux into bpf_map, add xdp_has_frags to the check, and apply the same check to cpumap and devmap. Acked-by: John Fastabend Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: Toke Hoiland-Jorgensen Link: https://lore.kernel.org/r/f19fd97c0328a39927f3ad03e1ca6b43fd53cdfd.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov Conflicts: include/linux/bpf.h kernel/bpf/core.c kernel/bpf/syscall.c kernel/bpf/cpumap.c kernel/bpf/arraymap.c [The conflicts were due to not merge c2f2cdbeffda7.] Signed-off-by: Xiaomeng Zhang --- include/linux/bpf.h | 31 +++++++++++++++++++------------ kernel/bpf/arraymap.c | 4 +--- kernel/bpf/core.c | 28 ++++++++++++---------------- kernel/bpf/cpumap.c | 16 +++++++++------- kernel/bpf/devmap.c | 3 ++- kernel/bpf/syscall.c | 15 +++++++-------- 6 files changed, 50 insertions(+), 47 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f0db30991f68..39d22f62c59b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -186,6 +186,17 @@ struct bpf_map { }) struct mutex freeze_mutex; atomic64_t writecnt; + /* 'Ownership' of prog array is claimed by the first program that + * is going to use this map or by the first program which FD is + * stored in the map to make sure that all callers and callees have + * the same prog type and JITed flag. + */ + struct { + const struct btf_type *attach_func_proto; + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + } owner; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -959,17 +970,6 @@ struct bpf_prog_aux { }; struct bpf_array_aux { - /* 'Ownership' of prog array is claimed by the first program that - * is going to use this map or by the first program which FD is - * stored in the map to make sure that all callers and callees have - * the same prog type and JITed flag. - */ - struct { - const struct btf_type *attach_func_proto; - spinlock_t lock; - enum bpf_prog_type type; - bool jited; - } owner; /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; struct bpf_map *map; @@ -1125,7 +1125,14 @@ struct bpf_event_entry { struct rcu_head rcu; }; -bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +static inline bool map_type_contains_progs(struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || + map->map_type == BPF_MAP_TYPE_DEVMAP || + map->map_type == BPF_MAP_TYPE_CPUMAP; +} + +bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp); const char *kernel_type_name(u32 btf_type_id); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 779297cad45b..935aeb3b06ee 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -807,13 +807,12 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key) static void *prog_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { - struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_prog *prog = bpf_prog_get(fd); if (IS_ERR(prog)) return prog; - if (!bpf_prog_array_compatible(array, prog)) { + if (!bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return ERR_PTR(-EINVAL); } @@ -1004,7 +1003,6 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_LIST_HEAD(&aux->poke_progs); mutex_init(&aux->poke_mutex); - spin_lock_init(&aux->owner.lock); map = array_map_alloc(attr); if (IS_ERR(map)) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4fabe11c84d5..19456012b99a 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1775,7 +1775,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, } #endif -bool bpf_prog_array_compatible(struct bpf_array *array, +bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) { bool ret; @@ -1783,22 +1783,20 @@ bool bpf_prog_array_compatible(struct bpf_array *array, if (fp->kprobe_override) return false; - - spin_lock(&array->aux->owner.lock); - - if (!array->aux->owner.type) { + spin_lock(&map->owner.lock); + if (!map->owner.type) { /* There's no owner yet where we could check for * compatibility. */ - array->aux->owner.type = fp->type; - array->aux->owner.jited = fp->jited; - array->aux->owner.attach_func_proto = aux->attach_func_proto; + map->owner.type = fp->type; + map->owner.jited = fp->jited; + map->owner.attach_func_proto = aux->attach_func_proto; ret = true; } else { - ret = array->aux->owner.type == fp->type && - array->aux->owner.jited == fp->jited; + ret = map->owner.type == fp->type && + map->owner.jited == fp->jited; if (ret && - array->aux->owner.attach_func_proto != aux->attach_func_proto) { + map->owner.attach_func_proto != aux->attach_func_proto) { switch (fp->type) { case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_LSM: @@ -1811,7 +1809,7 @@ bool bpf_prog_array_compatible(struct bpf_array *array, } } } - spin_unlock(&array->aux->owner.lock); + spin_unlock(&map->owner.lock); return ret; } @@ -1823,13 +1821,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) mutex_lock(&aux->used_maps_mutex); for (i = 0; i < aux->used_map_cnt; i++) { struct bpf_map *map = aux->used_maps[i]; - struct bpf_array *array; - if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) + if (!map_type_contains_progs(map)) continue; - array = container_of(map, struct bpf_array, map); - if (!bpf_prog_array_compatible(array, fp)) { + if (!bpf_prog_map_compatible(map, fp)) { ret = -EINVAL; goto out; } diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 6e4ce0be8be8..918202cdff16 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -400,7 +400,8 @@ bool cpu_map_prog_allowed(struct bpf_map *map) map->value_size != offsetofend(struct bpf_cpumap_val, qsize); } -static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) +static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, + struct bpf_map *map, int fd) { struct bpf_prog *prog; @@ -408,7 +409,8 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) if (IS_ERR(prog)) return PTR_ERR(prog); - if (prog->expected_attach_type != BPF_XDP_CPUMAP) { + if (prog->expected_attach_type != BPF_XDP_CPUMAP || + !bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return -EINVAL; } @@ -420,7 +422,7 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) } static struct bpf_cpu_map_entry * -__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) +__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, u32 cpu) { int numa, err, i, fd = value->bpf_prog.fd; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; @@ -455,15 +457,15 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) goto free_queue; rcpu->cpu = cpu; - rcpu->map_id = map_id; + rcpu->map_id = map->id; rcpu->value.qsize = value->qsize; - if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) + if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) goto free_ptr_ring; /* Setup kthread */ rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, - "cpumap/%d/map:%d", cpu, map_id); + "cpumap/%d/map:%d", cpu, map->id); if (IS_ERR(rcpu->kthread)) goto free_prog; @@ -579,7 +581,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, rcpu = NULL; /* Same as deleting */ } else { /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ - rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id); + rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); if (!rcpu) return -ENOMEM; rcpu->cmap = cmap; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index e805811ac2c9..04d6174a98c8 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -619,7 +619,8 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, BPF_PROG_TYPE_XDP, false); if (IS_ERR(prog)) goto err_put_dev; - if (prog->expected_attach_type != BPF_XDP_DEVMAP) + if (prog->expected_attach_type != BPF_XDP_DEVMAP || + !bpf_prog_map_compatible(&dtab->map, prog)) goto err_put_prog; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 3b414e36df6b..9e2d4b101369 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -565,16 +565,14 @@ static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) #ifdef CONFIG_PROC_FS static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) { - const struct bpf_map *map = filp->private_data; - const struct bpf_array *array; + struct bpf_map *map = filp->private_data; u32 type = 0, jited = 0; - if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { - array = container_of(map, struct bpf_array, map); - spin_lock(&array->aux->owner.lock); - type = array->aux->owner.type; - jited = array->aux->owner.jited; - spin_unlock(&array->aux->owner.lock); + if (map_type_contains_progs(map)) { + spin_lock(&map->owner.lock); + type = map->owner.type; + jited = map->owner.jited; + spin_unlock(&map->owner.lock); } seq_printf(m, @@ -866,6 +864,7 @@ static int map_create(union bpf_attr *attr) atomic64_set(&map->refcnt, 1); atomic64_set(&map->usercnt, 1); mutex_init(&map->freeze_mutex); + spin_lock_init(&map->owner.lock); map->spin_lock_off = -EINVAL; if (attr->btf_key_type_id || attr->btf_value_type_id || -- Gitee From 9b9b51ce86593e630c2f049504bb9fb063f1cb52 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 3 Sep 2025 03:47:23 +0000 Subject: [PATCH 2/7] bpf: Add cookie object to bpf maps mainline inclusion from mainline-v6.17-rc1 commit 12df58ad294253ac1d8df0c9bb9cf726397a671d category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICTB0G CVE: CVE-2025-38502 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=12df58ad294253ac1d8df0c9bb9cf726397a671d -------------------------------- Add a cookie to BPF maps to uniquely identify BPF maps for the timespan when the node is up. This is different to comparing a pointer or BPF map id which could get rolled over and reused. Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/r/20250730234733.530041-1-daniel@iogearbox.net Signed-off-by: Alexei Starovoitov Conflicts: include/linux/bpf.h kernel/bpf/syscall.c [The conflicts were due to some minor issue.] Signed-off-by: Xiaomeng Zhang --- include/linux/bpf.h | 1 + kernel/bpf/syscall.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 39d22f62c59b..786edb76c412 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -197,6 +197,7 @@ struct bpf_map { enum bpf_prog_type type; bool jited; } owner; + u64 cookie; /* write-once */ }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9e2d4b101369..0adc7aa356a8 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ @@ -44,6 +45,7 @@ #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) DEFINE_PER_CPU(int, bpf_prog_active); +DEFINE_COOKIE(bpf_map_cookie); static DEFINE_IDR(prog_idr); static DEFINE_SPINLOCK(prog_idr_lock); static DEFINE_IDR(map_idr); @@ -861,6 +863,10 @@ static int map_create(union bpf_attr *attr) if (err < 0) goto free_map; + preempt_disable(); + map->cookie = gen_cookie_next(&bpf_map_cookie); + preempt_enable(); + atomic64_set(&map->refcnt, 1); atomic64_set(&map->usercnt, 1); mutex_init(&map->freeze_mutex); -- Gitee From 6778ec635c7235cb2173b824fabb3ac3a2161133 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 3 Sep 2025 03:47:24 +0000 Subject: [PATCH 3/7] bpf: Move bpf map owner out of common struct mainline inclusion from mainline-v6.17-rc1 commit fd1c98f0ef5cbcec842209776505d9e70d8fcd53 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICTB0G CVE: CVE-2025-38502 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=fd1c98f0ef5cbcec842209776505d9e70d8fcd53 -------------------------------- Given this is only relevant for BPF tail call maps, it is adding up space and penalizing other map types. We also need to extend this with further objects to track / compare to. Therefore, lets move this out into a separate structure and dynamically allocate it only for BPF tail call maps. Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/r/20250730234733.530041-2-daniel@iogearbox.net Signed-off-by: Alexei Starovoitov Conflicts: include/linux/bpf.h kernel/bpf/core.c kernel/bpf/syscall.c [The conflicts were due to some minor issue.] Signed-off-by: Xiaomeng Zhang --- include/linux/bpf.h | 35 ++++++++++++++++++++++++----------- kernel/bpf/core.c | 32 ++++++++++++++++---------------- kernel/bpf/syscall.c | 13 +++++++------ 3 files changed, 47 insertions(+), 33 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 786edb76c412..c20811b6e834 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -21,6 +21,7 @@ #include #include #include +#include struct bpf_verifier_env; struct bpf_verifier_log; @@ -144,6 +145,17 @@ struct bpf_map_memory { struct user_struct *user; }; +/* 'Ownership' of prog array is claimed by the first program that + * is going to use this map or by the first program which FD is + * stored in the map to make sure that all callers and callees have + * the same prog type and JITed flag. + */ +struct bpf_map_owner { + enum bpf_prog_type type; + bool jited; + const struct btf_type *attach_func_proto; +}; + struct bpf_map { /* The first two cachelines with read-mostly members of which some * are also accessed in fast-path (e.g. ops, max_entries). @@ -186,17 +198,8 @@ struct bpf_map { }) struct mutex freeze_mutex; atomic64_t writecnt; - /* 'Ownership' of prog array is claimed by the first program that - * is going to use this map or by the first program which FD is - * stored in the map to make sure that all callers and callees have - * the same prog type and JITed flag. - */ - struct { - const struct btf_type *attach_func_proto; - spinlock_t lock; - enum bpf_prog_type type; - bool jited; - } owner; + spinlock_t owner_lock; + struct bpf_map_owner *owner; u64 cookie; /* write-once */ }; @@ -1119,6 +1122,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags) (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); } +static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map) +{ + return kzalloc(sizeof(*map->owner), GFP_ATOMIC); +} + +static inline void bpf_map_owner_free(struct bpf_map *map) +{ + kfree(map->owner); +} + struct bpf_event_entry { struct perf_event *event; struct file *perf_file; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 19456012b99a..dcc136e0e94d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1778,25 +1778,24 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) { - bool ret; struct bpf_prog_aux *aux = fp->aux; - - if (fp->kprobe_override) - return false; - spin_lock(&map->owner.lock); - if (!map->owner.type) { - /* There's no owner yet where we could check for - * compatibility. - */ - map->owner.type = fp->type; - map->owner.jited = fp->jited; - map->owner.attach_func_proto = aux->attach_func_proto; + bool ret = false; + + spin_lock(&map->owner_lock); + /* There's no owner yet where we could check for compatibility. */ + if (!map->owner) { + map->owner = bpf_map_owner_alloc(map); + if (!map->owner) + goto err; + map->owner->type = fp->type; + map->owner->jited = fp->jited; + map->owner->attach_func_proto = aux->attach_func_proto; ret = true; } else { - ret = map->owner.type == fp->type && - map->owner.jited == fp->jited; + ret = map->owner->type == fp->type && + map->owner->jited == fp->jited; if (ret && - map->owner.attach_func_proto != aux->attach_func_proto) { + map->owner->attach_func_proto != aux->attach_func_proto) { switch (fp->type) { case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_LSM: @@ -1809,7 +1808,8 @@ bool bpf_prog_map_compatible(struct bpf_map *map, } } } - spin_unlock(&map->owner.lock); +err: + spin_unlock(&map->owner_lock); return ret; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0adc7aa356a8..72ba7f768280 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -483,6 +483,7 @@ static void bpf_map_free_deferred(struct work_struct *work) bpf_map_charge_move(&mem, &map->memory); security_bpf_map_free(map); + bpf_map_owner_free(map); /* implementation dependent freeing */ map->ops->map_free(map); bpf_map_charge_finish(&mem); @@ -570,12 +571,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) struct bpf_map *map = filp->private_data; u32 type = 0, jited = 0; - if (map_type_contains_progs(map)) { - spin_lock(&map->owner.lock); - type = map->owner.type; - jited = map->owner.jited; - spin_unlock(&map->owner.lock); + spin_lock(&map->owner_lock); + if (map->owner) { + type = map->owner->type; + jited = map->owner->jited; } + spin_unlock(&map->owner_lock); seq_printf(m, "map_type:\t%u\n" @@ -870,7 +871,7 @@ static int map_create(union bpf_attr *attr) atomic64_set(&map->refcnt, 1); atomic64_set(&map->usercnt, 1); mutex_init(&map->freeze_mutex); - spin_lock_init(&map->owner.lock); + spin_lock_init(&map->owner_lock); map->spin_lock_off = -EINVAL; if (attr->btf_key_type_id || attr->btf_value_type_id || -- Gitee From 1e44490f1c450dcc071af83fbf6f46434e130a26 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 3 Sep 2025 03:47:25 +0000 Subject: [PATCH 4/7] bpf: Move cgroup iterator helpers to bpf.h mainline inclusion from mainline-v6.17-rc1 commit 9621e60f59eae87eb9ffe88d90f24f391a1ef0f0 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICTB0G CVE: CVE-2025-38502 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=9621e60f59eae87eb9ffe88d90f24f391a1ef0f0 -------------------------------- Move them into bpf.h given we also need them in core code. Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/r/20250730234733.530041-3-daniel@iogearbox.net Signed-off-by: Alexei Starovoitov Conflicts: include/linux/bpf.h include/linux/bpf-cgroup.h [The conflicts were due to some minor issue.] Signed-off-by: Xiaomeng Zhang --- include/linux/bpf-cgroup.h | 5 ----- include/linux/bpf.h | 42 ++++++++++++++++++++++---------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 948ab36ae29e..f7301d3a5612 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -114,9 +114,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) -#define for_each_cgroup_storage_type(stype) \ - for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) - struct bpf_cgroup_storage_map; struct bpf_storage_buffer { @@ -562,8 +559,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_HISOCK_EGRESS(sk, skb) ({ HISOCK_PASS; }) #endif -#define for_each_cgroup_storage_type(stype) for (; false; ) - #endif /* CONFIG_CGROUP_BPF */ #endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c20811b6e834..0a05639adc51 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -52,6 +52,30 @@ struct bpf_iter_seq_info { u32 seq_priv_size; }; +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED, + BPF_CGROUP_STORAGE_PERCPU, +#ifdef CONFIG_KABI_RESERVE + BPF_CGROUP_STORAGE_KABI_RESERVE_1, + BPF_CGROUP_STORAGE_KABI_RESERVE_2, + BPF_CGROUP_STORAGE_KABI_RESERVE_3, + BPF_CGROUP_STORAGE_KABI_RESERVE_4, + BPF_CGROUP_STORAGE_KABI_RESERVE_5, + BPF_CGROUP_STORAGE_KABI_RESERVE_6, + BPF_CGROUP_STORAGE_KABI_RESERVE_7, + BPF_CGROUP_STORAGE_KABI_RESERVE_8, +#endif + __BPF_CGROUP_STORAGE_MAX +#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX +}; + +#ifdef CONFIG_CGROUP_BPF +#define for_each_cgroup_storage_type(stype) \ + for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) +#else +#define for_each_cgroup_storage_type(stype) for (; false; ) +#endif /* CONFIG_CGROUP_BPF */ + /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ @@ -594,24 +618,6 @@ struct bpf_prog_offload { u32 jited_len; }; -enum bpf_cgroup_storage_type { - BPF_CGROUP_STORAGE_SHARED, - BPF_CGROUP_STORAGE_PERCPU, -#ifdef CONFIG_KABI_RESERVE - BPF_CGROUP_STORAGE_KABI_RESERVE_1, - BPF_CGROUP_STORAGE_KABI_RESERVE_2, - BPF_CGROUP_STORAGE_KABI_RESERVE_3, - BPF_CGROUP_STORAGE_KABI_RESERVE_4, - BPF_CGROUP_STORAGE_KABI_RESERVE_5, - BPF_CGROUP_STORAGE_KABI_RESERVE_6, - BPF_CGROUP_STORAGE_KABI_RESERVE_7, - BPF_CGROUP_STORAGE_KABI_RESERVE_8, -#endif - __BPF_CGROUP_STORAGE_MAX -}; - -#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX - /* The longest tracepoint has 12 args. * See include/trace/bpf_probe.h */ -- Gitee From 9dd8cf0ce0adb99b2462d44703abd6d5645e4050 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 3 Sep 2025 03:47:26 +0000 Subject: [PATCH 5/7] bpf: Fix oob access in cgroup local storage mainline inclusion from mainline-v6.17-rc1 commit abad3d0bad72a52137e0c350c59542d75ae4f513 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICTB0G CVE: CVE-2025-38502 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=abad3d0bad72a52137e0c350c59542d75ae4f513 -------------------------------- Lonial reported that an out-of-bounds access in cgroup local storage can be crafted via tail calls. Given two programs each utilizing a cgroup local storage with a different value size, and one program doing a tail call into the other. The verifier will validate each of the indivial programs just fine. However, in the runtime context the bpf_cg_run_ctx holds an bpf_prog_array_item which contains the BPF program as well as any cgroup local storage flavor the program uses. Helpers such as bpf_get_local_storage() pick this up from the runtime context: ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); storage = ctx->prog_item->cgroup_storage[stype]; if (stype == BPF_CGROUP_STORAGE_SHARED) ptr = &READ_ONCE(storage->buf)->data[0]; else ptr = this_cpu_ptr(storage->percpu_buf); For the second program which was called from the originally attached one, this means bpf_get_local_storage() will pick up the former program's map, not its own. With mismatching sizes, this can result in an unintended out-of-bounds access. To fix this issue, we need to extend bpf_map_owner with an array of storage_cookie[] to match on i) the exact maps from the original program if the second program was using bpf_get_local_storage(), or ii) allow the tail call combination if the second program was not using any of the cgroup local storage maps. Fixes: 7d9c3427894f ("bpf: Make cgroup storages shared between programs on the same cgroup") Reported-by: Lonial Con Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/r/20250730234733.530041-4-daniel@iogearbox.net Signed-off-by: Alexei Starovoitov Conflicts: include/linux/bpf.h kernel/bpf/core.c [The conflicts were due to some minor issue.] Signed-off-by: Xiaomeng Zhang --- include/linux/bpf.h | 1 + kernel/bpf/core.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0a05639adc51..5c3524de92a6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -177,6 +177,7 @@ struct bpf_map_memory { struct bpf_map_owner { enum bpf_prog_type type; bool jited; + u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE]; const struct btf_type *attach_func_proto; }; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index dcc136e0e94d..5a31bce4cf5a 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1779,7 +1779,9 @@ bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; + enum bpf_cgroup_storage_type i; bool ret = false; + u64 cookie; spin_lock(&map->owner_lock); /* There's no owner yet where we could check for compatibility. */ @@ -1790,10 +1792,23 @@ bool bpf_prog_map_compatible(struct bpf_map *map, map->owner->type = fp->type; map->owner->jited = fp->jited; map->owner->attach_func_proto = aux->attach_func_proto; + for_each_cgroup_storage_type(i) { + map->owner->storage_cookie[i] = + aux->cgroup_storage[i] ? + aux->cgroup_storage[i]->cookie : 0; + } ret = true; } else { ret = map->owner->type == fp->type && map->owner->jited == fp->jited; + for_each_cgroup_storage_type(i) { + if (!ret) + break; + cookie = aux->cgroup_storage[i] ? + aux->cgroup_storage[i]->cookie : 0; + ret = map->owner->storage_cookie[i] == cookie || + !cookie; + } if (ret && map->owner->attach_func_proto != aux->attach_func_proto) { switch (fp->type) { -- Gitee From da09791268f9f4f6dce0192c875125634e435fda Mon Sep 17 00:00:00 2001 From: Xiaomeng Zhang Date: Wed, 3 Sep 2025 03:47:27 +0000 Subject: [PATCH 6/7] Fix kabi breakage for bpf_map by using KABI_FILL_HOLE and KABI_EXTEND hulk inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICTB0G CVE: CVE-2025-38502 -------------------------------- Fix kabi breakage for bpf_map by using KABI_FILL_HOLE and KABI_EXTEND. Fixes: fd1c98f0ef5c ("[Backport] bpf: Move bpf map owner out of common struct") Signed-off-by: Xiaomeng Zhang --- include/linux/bpf.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5c3524de92a6..bd435b20bca1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -207,7 +207,9 @@ struct bpf_map { bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ KABI_EXTEND(bool free_after_mult_rcu_gp) - /* 17 bytes hole */ + KABI_FILL_HOLE(u64 cookie) + KABI_FILL_HOLE(spinlock_t owner_lock) + /* 4 bytes hole */ /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. @@ -223,9 +225,7 @@ struct bpf_map { }) struct mutex freeze_mutex; atomic64_t writecnt; - spinlock_t owner_lock; - struct bpf_map_owner *owner; - u64 cookie; /* write-once */ + KABI_EXTEND(struct bpf_map_owner *owner) }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) -- Gitee From 7f14bf5aab6a09f37836e3c056764442550d2cb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 3 Sep 2025 03:47:28 +0000 Subject: [PATCH 7/7] bpf: Resolve fext program type when checking map compatibility MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mainline inclusion from mainline-v6.2-rc1 commit 1c123c567fb138ebd187480b7fc0610fcb0851f5 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICTB0G CVE: CVE-2025-38502 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=1c123c567fb138ebd187480b7fc0610fcb0851f5 -------------------------------- The bpf_prog_map_compatible() check makes sure that BPF program types are not mixed inside BPF map types that can contain programs (tail call maps, cpumaps and devmaps). It does this by setting the fields of the map->owner struct to the values of the first program being checked against, and rejecting any subsequent programs if the values don't match. One of the values being set in the map owner struct is the program type, and since the code did not resolve the prog type for fext programs, the map owner type would be set to PROG_TYPE_EXT and subsequent loading of programs of the target type into the map would fail. This bug is seen in particular for XDP programs that are loaded as PROG_TYPE_EXT using libxdp; these cannot insert programs into devmaps and cpumaps because the check fails as described above. Fix the bug by resolving the fext program type to its target program type as elsewhere in the verifier. v3: - Add Yonghong's ACK Fixes: f45d5b6ce2e8 ("bpf: generalise tail call map compatibility check") Acked-by: Yonghong Song Signed-off-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/r/20221214230254.790066-1-toke@redhat.com Signed-off-by: Martin KaFai Lau Conflicts: include/linux/bpf_verifier.h kernel/bpf/core.c kernel/bpf/verifier.c [The conflicts were due to some minor issue.] Signed-off-by: Xiaomeng Zhang --- include/linux/bpf_verifier.h | 5 +++++ kernel/bpf/core.c | 6 ++++-- kernel/bpf/verifier.c | 5 ----- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c300b2816e07..32b304844949 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -569,4 +569,9 @@ static inline u32 type_flag(u32 type) return type & ~BPF_BASE_TYPE_MASK; } +static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) +{ + return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; +} + #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5a31bce4cf5a..4d72a8651de5 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -1778,6 +1779,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) { + enum bpf_prog_type prog_type = resolve_prog_type(fp); struct bpf_prog_aux *aux = fp->aux; enum bpf_cgroup_storage_type i; bool ret = false; @@ -1789,7 +1791,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map, map->owner = bpf_map_owner_alloc(map); if (!map->owner) goto err; - map->owner->type = fp->type; + map->owner->type = prog_type; map->owner->jited = fp->jited; map->owner->attach_func_proto = aux->attach_func_proto; for_each_cgroup_storage_type(i) { @@ -1799,7 +1801,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map, } ret = true; } else { - ret = map->owner->type == fp->type && + ret = map->owner->type == prog_type && map->owner->jited == fp->jited; for_each_cgroup_storage_type(i) { if (!ret) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f8c6ea69ea41..13849004ae38 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3322,11 +3322,6 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, #define MAX_PACKET_OFF 0xffff -static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) -{ - return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; -} - static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) -- Gitee