diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 28be17ce53c1869976b3eefcfa3ff8acfafb5a5f..ee657003c1a1efbe135187c49f3e202643f72dd5 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -59,6 +59,28 @@ static const struct bpf_func_proto rc_keydown_proto = { .arg4_type = ARG_ANYTHING, }; +BPF_CALL_3(bpf_rc_pointer_rel, u32*, sample, s32, rel_x, s32, rel_y) +{ + struct ir_raw_event_ctrl *ctrl; + + ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample); + + input_report_rel(ctrl->dev->input_dev, REL_X, rel_x); + input_report_rel(ctrl->dev->input_dev, REL_Y, rel_y); + input_sync(ctrl->dev->input_dev); + + return 0; +} + +static const struct bpf_func_proto rc_pointer_rel_proto = { + .func = bpf_rc_pointer_rel, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto * lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -67,6 +89,8 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &rc_repeat_proto; case BPF_FUNC_rc_keydown: return &rc_keydown_proto; + case BPF_FUNC_rc_pointer_rel: + return &rc_pointer_rel_proto; case BPF_FUNC_map_lookup_elem: return &bpf_map_lookup_elem_proto; case BPF_FUNC_map_update_elem: diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f6619887af05b5e78f861c8bae1c6ed38c15f094..db8234387803f6b5ac6a10de2a6a26cac52ceb9b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -234,6 +234,20 @@ enum bpf_attach_type { */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) +/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the + * verifier will allow any alignment whatsoever. On platforms + * with strict alignment requirements for loads ands stores (such + * as sparc and mips) the verifier validates that all loads and + * stores provably follow this requirement. This flag turns that + * checking and enforcement off. + * + * It is mostly used for testing when we want to validate the + * context and memory access aspects of the verifier, but because + * of an unaligned access the alignment check would trigger before + * the one we are interested in. + */ +#define BPF_F_ANY_ALIGNMENT (1U << 1) + /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ #define BPF_PSEUDO_MAP_FD 1 @@ -362,8 +376,11 @@ union bpf_attr { struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ __u32 prog_fd; __u32 retval; - __u32 data_size_in; - __u32 data_size_out; + __u32 data_size_in; /* input: len of data_in */ + __u32 data_size_out; /* input/output: len of data_out + * returns ENOSPC if data_out + * is too small. + */ __aligned_u64 data_in; __aligned_u64 data_out; __u32 repeat; @@ -2290,6 +2307,20 @@ union bpf_attr { * payload and/or *pop* value being to large. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * Description + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded pointer movement. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * Return + * 0 */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2383,7 +2414,8 @@ union bpf_attr { FN(map_pop_elem), \ FN(map_peek_elem), \ FN(msg_push_data), \ - FN(msg_pop_data), + FN(msg_pop_data), \ + FN(rc_pointer_rel), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2501,6 +2533,7 @@ struct __sk_buff { __u32 data_meta; __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; + __u32 wire_len; }; struct bpf_tunnel_key { @@ -2993,7 +3026,7 @@ struct bpf_flow_keys { }; struct bpf_func_info { - __u32 insn_offset; + __u32 insn_off; __u32 type_id; }; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index c31843f888080a6d9396a50544d288f530ef0f52..c155c8e7c488f182909a142ef5a003008824bfc7 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -926,32 +926,34 @@ EXPORT_SYMBOL_GPL(__bpf_call_base); #define BPF_INSN_MAP(INSN_2, INSN_3) \ /* 32 bit ALU operations. */ \ /* Register based. */ \ - INSN_3(ALU, ADD, X), \ - INSN_3(ALU, SUB, X), \ - INSN_3(ALU, AND, X), \ - INSN_3(ALU, OR, X), \ - INSN_3(ALU, LSH, X), \ - INSN_3(ALU, RSH, X), \ - INSN_3(ALU, XOR, X), \ - INSN_3(ALU, MUL, X), \ - INSN_3(ALU, MOV, X), \ - INSN_3(ALU, DIV, X), \ - INSN_3(ALU, MOD, X), \ + INSN_3(ALU, ADD, X), \ + INSN_3(ALU, SUB, X), \ + INSN_3(ALU, AND, X), \ + INSN_3(ALU, OR, X), \ + INSN_3(ALU, LSH, X), \ + INSN_3(ALU, RSH, X), \ + INSN_3(ALU, XOR, X), \ + INSN_3(ALU, MUL, X), \ + INSN_3(ALU, MOV, X), \ + INSN_3(ALU, ARSH, X), \ + INSN_3(ALU, DIV, X), \ + INSN_3(ALU, MOD, X), \ INSN_2(ALU, NEG), \ INSN_3(ALU, END, TO_BE), \ INSN_3(ALU, END, TO_LE), \ /* Immediate based. */ \ - INSN_3(ALU, ADD, K), \ - INSN_3(ALU, SUB, K), \ - INSN_3(ALU, AND, K), \ - INSN_3(ALU, OR, K), \ - INSN_3(ALU, LSH, K), \ - INSN_3(ALU, RSH, K), \ - INSN_3(ALU, XOR, K), \ - INSN_3(ALU, MUL, K), \ - INSN_3(ALU, MOV, K), \ - INSN_3(ALU, DIV, K), \ - INSN_3(ALU, MOD, K), \ + INSN_3(ALU, ADD, K), \ + INSN_3(ALU, SUB, K), \ + INSN_3(ALU, AND, K), \ + INSN_3(ALU, OR, K), \ + INSN_3(ALU, LSH, K), \ + INSN_3(ALU, RSH, K), \ + INSN_3(ALU, XOR, K), \ + INSN_3(ALU, MUL, K), \ + INSN_3(ALU, MOV, K), \ + INSN_3(ALU, ARSH, K), \ + INSN_3(ALU, DIV, K), \ + INSN_3(ALU, MOD, K), \ /* 64 bit ALU operations. */ \ /* Register based. */ \ INSN_3(ALU64, ADD, X), \ @@ -1130,6 +1132,12 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; insn++; CONT; + ALU_ARSH_X: + DST = (u64) (u32) ((*(s32 *) &DST) >> SRC); + CONT; + ALU_ARSH_K: + DST = (u64) (u32) ((*(s32 *) &DST) >> IMM); + CONT; ALU64_ARSH_X: (*(s64 *) &DST) >>= SRC; CONT; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 092d3293521b0ddff0cecb95f11141352c474f69..5b8ad334b70fdda766f26c0c447c919b178a1f6c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1469,9 +1469,14 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) if (CHECK_ATTR(BPF_PROG_LOAD)) return -EINVAL; - if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT) + if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT)) return -EINVAL; + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + /* copy eBPF program license from user space */ if (strncpy_from_user(license, u64_to_user_ptr(attr->license), sizeof(license) - 1) < 0) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d9bea2d4ced31927dbb1f1b169a747e8d6b460e7..0f333908f14fa6762054d9216f4561872a3aa6da 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4117,11 +4117,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } - if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { - verbose(env, "BPF_ARSH not supported for 32 bit ALU\n"); - return -EINVAL; - } - if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; @@ -5265,24 +5260,24 @@ static int check_btf_func(struct bpf_prog *prog, struct bpf_verifier_env *env, goto free_btf; } - /* check insn_offset */ + /* check insn_off */ if (i == 0) { - if (krecord[i].insn_offset) { + if (krecord[i].insn_off) { verbose(env, - "nonzero insn_offset %u for the first func info record", - krecord[i].insn_offset); + "nonzero insn_off %u for the first func info record", + krecord[i].insn_off); ret = -EINVAL; goto free_btf; } - } else if (krecord[i].insn_offset <= prev_offset) { + } else if (krecord[i].insn_off <= prev_offset) { verbose(env, "same or smaller insn offset (%u) than previous func info record (%u)", - krecord[i].insn_offset, prev_offset); + krecord[i].insn_off, prev_offset); ret = -EINVAL; goto free_btf; } - if (env->subprog_info[i].start != krecord[i].insn_offset) { + if (env->subprog_info[i].start != krecord[i].insn_off) { verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); ret = -EINVAL; goto free_btf; @@ -5297,7 +5292,7 @@ static int check_btf_func(struct bpf_prog *prog, struct bpf_verifier_env *env, goto free_btf; } - prev_offset = krecord[i].insn_offset; + prev_offset = krecord[i].insn_off; urecord += urec_size; } @@ -5320,7 +5315,7 @@ static void adjust_btf_func(struct bpf_verifier_env *env) return; for (i = 0; i < env->subprog_cnt; i++) - env->prog->aux->func_info[i].insn_offset = env->subprog_info[i].start; + env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; } /* check %cur's range satisfies %old's */ @@ -7130,6 +7125,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; + if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) + env->strict_alignment = false; ret = replace_map_fd_with_map_ptr(env); if (ret < 0) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index bb7651b64b69ef4ecbad3eb26c6b7ead04f3654b..8014a80bb42ded203c019b6ab5ed02e679359091 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, return ret; } -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) +static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, + u32 *time) { struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; enum bpf_cgroup_storage_type stype; u64 time_start, time_spent = 0; - u32 ret = 0, i; + u32 i; for_each_cgroup_storage_type(stype) { storage[stype] = bpf_cgroup_storage_alloc(prog, stype); @@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) repeat = 1; time_start = ktime_get_ns(); for (i = 0; i < repeat; i++) { - ret = bpf_test_run_one(prog, ctx, storage); + *ret = bpf_test_run_one(prog, ctx, storage); if (need_resched()) { if (signal_pending(current)) break; @@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) for_each_cgroup_storage_type(stype) bpf_cgroup_storage_free(storage[stype]); - return ret; + return 0; } static int bpf_test_finish(const union bpf_attr *kattr, @@ -74,8 +75,18 @@ static int bpf_test_finish(const union bpf_attr *kattr, { void __user *data_out = u64_to_user_ptr(kattr->test.data_out); int err = -EFAULT; + u32 copy_size = size; + + /* Clamp copy if the user has provided a size hint, but copy the full + * buffer if not to retain old behaviour. + */ + if (kattr->test.data_size_out && + copy_size > kattr->test.data_size_out) { + copy_size = kattr->test.data_size_out; + err = -ENOSPC; + } - if (data_out && copy_to_user(data_out, data, size)) + if (data_out && copy_to_user(data_out, data, copy_size)) goto out; if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) goto out; @@ -83,7 +94,8 @@ static int bpf_test_finish(const union bpf_attr *kattr, goto out; if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) goto out; - err = 0; + if (err != -ENOSPC) + err = 0; out: return err; } @@ -165,7 +177,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, __skb_push(skb, hh_len); if (is_direct_pkt_access) bpf_compute_data_pointers(skb); - retval = bpf_test_run(prog, skb, repeat, &duration); + ret = bpf_test_run(prog, skb, repeat, &retval, &duration); + if (ret) { + kfree_skb(skb); + kfree(sk); + return ret; + } if (!is_l2) { if (skb_headroom(skb) < hh_len) { int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); @@ -212,11 +229,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); xdp.rxq = &rxqueue->xdp_rxq; - retval = bpf_test_run(prog, &xdp, repeat, &duration); + ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration); + if (ret) + goto out; if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || xdp.data_end != xdp.data + size) size = xdp.data_end - xdp.data; ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); +out: kfree(data); return ret; } diff --git a/net/core/filter.c b/net/core/filter.c index b214e71f171728515bf08ca53a0e0b6cfb9bee06..8ba8038d98fcd9f9d557c9e70603fd2abb666b56 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5765,6 +5765,7 @@ static bool sk_filter_is_valid_access(int off, int size, case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, tstamp): + case bpf_ctx_range(struct __sk_buff, wire_len): return false; } @@ -5789,6 +5790,7 @@ static bool cg_skb_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): + case bpf_ctx_range(struct __sk_buff, wire_len): return false; case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_end): @@ -5835,6 +5837,7 @@ static bool lwt_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): case bpf_ctx_range(struct __sk_buff, tstamp): + case bpf_ctx_range(struct __sk_buff, wire_len): return false; } @@ -6267,6 +6270,7 @@ static bool sk_skb_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): case bpf_ctx_range(struct __sk_buff, tstamp): + case bpf_ctx_range(struct __sk_buff, wire_len): return false; } @@ -6354,6 +6358,7 @@ static bool flow_dissector_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, tstamp): + case bpf_ctx_range(struct __sk_buff, wire_len): return false; } @@ -6679,6 +6684,17 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, bpf_target_off(struct sk_buff, tstamp, 8, target_size)); + break; + + case offsetof(struct __sk_buff, wire_len): + BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4); + + off = si->off; + off -= offsetof(struct __sk_buff, wire_len); + off += offsetof(struct sk_buff, cb); + off += offsetof(struct qdisc_skb_cb, pkt_len); + *target_size = 4; + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); } return insn - insn_buf; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index effa72f2b85691602648d14f5154bdfbb214b7cf..5113f76f9954c6c989e68f410513c0e4222c2ac4 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -784,6 +784,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, /* Pass parameters to the BPF program */ cb->qdisc_cb.flow_keys = &flow_keys; flow_keys.nhoff = nhoff; + flow_keys.thoff = nhoff; bpf_compute_data_pointers((struct sk_buff *)skb); result = BPF_PROG_RUN(attached, skb); diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index ab36e920e552165e08b2e09eb75e4b7eb362dd6c..5524b6dccd856468668a6234a903ac0bb07a318f 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -26,8 +26,9 @@ MAP COMMANDS | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}] | **bpftool** **prog pin** *PROG* *FILE* | **bpftool** **prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] -| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*] -| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*] +| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*] +| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*] +| **bpftool** **prog tracelog** | **bpftool** **prog help** | | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } @@ -117,6 +118,14 @@ DESCRIPTION parameter, with the exception of *flow_dissector* which is detached from the current networking name space. + **bpftool prog tracelog** + Dump the trace pipe of the system to the console (stdout). + Hit to stop printing. BPF programs can write to this + trace pipe at runtime with the **bpf_trace_printk()** helper. + This should be used only for debugging purposes. For + streaming data from BPF programs to user space, one can use + perf events (see also **bpftool-map**\ (8)). + **bpftool prog help** Print short help message. diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 9a60080f085f1977d99761ee999cae29a5b6aadb..44c189ba072a3bf3e1cfee218ac6fc89c0200d05 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -398,10 +398,13 @@ _bpftool() ;; esac ;; + tracelog) + return 0 + ;; *) [[ $prev == $object ]] && \ COMPREPLY=( $( compgen -W 'dump help pin attach detach load \ - show list' -- "$cur" ) ) + show list tracelog' -- "$cur" ) ) ;; esac ;; diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 5c4c1cd5a7ba24166a84c7badd929c82552e1deb..a705eb0b590ed79412f0d76540221f62276dbc87 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -347,6 +347,8 @@ int main(int argc, char **argv) }; int opt, ret; + setlinebuf(stdout); + last_do_help = do_help; pretty_output = false; json_output = false; diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 2761981669c8284062ef7c7b429c143d0a9ae3d6..0be0dd8f467ff5c053bc26cdd85c7e3600bbb147 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -167,6 +167,7 @@ int do_event_pipe(int argc, char **argv); int do_cgroup(int argc, char **arg); int do_perf(int argc, char **arg); int do_net(int argc, char **arg); +int do_tracelog(int argc, char **arg); int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what); int prog_parse_fd(int *argc, char ***argv); diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 69fa898f1b91099d5b14e01d0590b34408c52dba..f642f3683f30d1bad87b2f62fe1b681cc3996736 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -592,6 +592,13 @@ static int do_dump(int argc, char **argv) goto err_free; } + if (func_info && !info.func_info) { + /* kernel.kptr_restrict is set. No func_info available. */ + free(func_info); + func_info = NULL; + finfo_cnt = 0; + } + if ((member_len == &info.jited_prog_len && info.jited_prog_insns == 0) || (member_len == &info.xlated_prog_len && @@ -1143,6 +1150,7 @@ static int do_help(int argc, char **argv) " [pinmaps MAP_DIR]\n" " %s %s attach PROG ATTACH_TYPE [MAP]\n" " %s %s detach PROG ATTACH_TYPE [MAP]\n" + " %s %s tracelog\n" " %s %s help\n" "\n" " " HELP_SPEC_MAP "\n" @@ -1161,7 +1169,7 @@ static int do_help(int argc, char **argv) "", bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2]); + bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]); return 0; } @@ -1176,6 +1184,7 @@ static const struct cmd cmds[] = { { "loadall", do_loadall }, { "attach", do_attach }, { "detach", do_detach }, + { "tracelog", do_tracelog }, { 0 } }; diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c new file mode 100644 index 0000000000000000000000000000000000000000..1fa8e513f5906315986874655c8f267da90cafda --- /dev/null +++ b/tools/bpf/bpftool/tracelog.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (c) 2015-2017 Daniel Borkmann */ +/* Copyright (c) 2018 Netronome Systems, Inc. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#ifndef TRACEFS_MAGIC +# define TRACEFS_MAGIC 0x74726163 +#endif + +#define _textify(x) #x +#define textify(x) _textify(x) + +FILE *trace_pipe_fd; +char *buff; + +static int validate_tracefs_mnt(const char *mnt, unsigned long magic) +{ + struct statfs st_fs; + + if (statfs(mnt, &st_fs) < 0) + return -ENOENT; + if ((unsigned long)st_fs.f_type != magic) + return -ENOENT; + + return 0; +} + +static bool +find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt) +{ + size_t src_len; + + if (validate_tracefs_mnt(mntpt, magic)) + return false; + + src_len = strlen(mntpt); + if (src_len + 1 >= PATH_MAX) { + p_err("tracefs mount point name too long"); + return false; + } + + strcpy(mnt, mntpt); + return true; +} + +static bool find_tracefs_pipe(char *mnt) +{ + static const char * const known_mnts[] = { + "/sys/kernel/debug/tracing", + "/sys/kernel/tracing", + "/tracing", + "/trace", + }; + const char *pipe_name = "/trace_pipe"; + const char *fstype = "tracefs"; + char type[100], format[32]; + const char * const *ptr; + bool found = false; + FILE *fp; + + for (ptr = known_mnts; ptr < known_mnts + ARRAY_SIZE(known_mnts); ptr++) + if (find_tracefs_mnt_single(TRACEFS_MAGIC, mnt, *ptr)) + goto exit_found; + + fp = fopen("/proc/mounts", "r"); + if (!fp) + return false; + + /* Allow room for NULL terminating byte and pipe file name */ + snprintf(format, sizeof(format), "%%*s %%%zds %%99s %%*s %%*d %%*d\\n", + PATH_MAX - strlen(pipe_name) - 1); + while (fscanf(fp, format, mnt, type) == 2) + if (strcmp(type, fstype) == 0) { + found = true; + break; + } + fclose(fp); + + /* The string from fscanf() might be truncated, check mnt is valid */ + if (!found || validate_tracefs_mnt(mnt, TRACEFS_MAGIC)) + return false; + +exit_found: + strcat(mnt, pipe_name); + return true; +} + +static void exit_tracelog(int signum) +{ + fclose(trace_pipe_fd); + free(buff); + + if (json_output) { + jsonw_end_array(json_wtr); + jsonw_destroy(&json_wtr); + } + + exit(0); +} + +int do_tracelog(int argc, char **argv) +{ + const struct sigaction act = { + .sa_handler = exit_tracelog + }; + char trace_pipe[PATH_MAX]; + bool found_trace_pipe; + size_t buff_len = 0; + + if (json_output) + jsonw_start_array(json_wtr); + + found_trace_pipe = find_tracefs_pipe(trace_pipe); + if (!found_trace_pipe) { + p_err("could not find trace pipe, tracefs not mounted?"); + return -1; + } + + trace_pipe_fd = fopen(trace_pipe, "r"); + if (!trace_pipe_fd) { + p_err("could not open trace pipe: %s", strerror(errno)); + return -1; + } + + sigaction(SIGHUP, &act, NULL); + sigaction(SIGINT, &act, NULL); + sigaction(SIGTERM, &act, NULL); + while (1) { + ssize_t ret; + + ret = getline(&buff, &buff_len, trace_pipe_fd); + if (ret <= 0) { + p_err("failed to read content from trace pipe: %s", + strerror(errno)); + break; + } + if (json_output) + jsonw_string(json_wtr, buff); + else + printf("%s", buff); + } + + fclose(trace_pipe_fd); + free(buff); + return -1; +} diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index e06ac0286a750164adb2323f725bdfa8f49ec21d..131ecd175533046f3ae026ff54b59dadd1f8137f 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -261,7 +261,7 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len, jsonw_start_object(json_wtr); if (btf && record) { - if (record->insn_offset == i) { + if (record->insn_off == i) { btf_dumper_type_only(btf, record->type_id, func_sig, sizeof(func_sig)); @@ -330,7 +330,7 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len, } if (btf && record) { - if (record->insn_offset == i) { + if (record->insn_off == i) { btf_dumper_type_only(btf, record->type_id, func_sig, sizeof(func_sig)); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 32a7e6eb18253775d81a144bf301f5879aeef626..0b5abe8f50f340efdd48edd6447e7e8ec6e5f38a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -232,6 +232,20 @@ enum bpf_attach_type { */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) +/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the + * verifier will allow any alignment whatsoever. On platforms + * with strict alignment requirements for loads ands stores (such + * as sparc and mips) the verifier validates that all loads and + * stores provably follow this requirement. This flag turns that + * checking and enforcement off. + * + * It is mostly used for testing when we want to validate the + * context and memory access aspects of the verifier, but because + * of an unaligned access the alignment check would trigger before + * the one we are interested in. + */ +#define BPF_F_ANY_ALIGNMENT (1U << 1) + /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ #define BPF_PSEUDO_MAP_FD 1 @@ -360,8 +374,11 @@ union bpf_attr { struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ __u32 prog_fd; __u32 retval; - __u32 data_size_in; - __u32 data_size_out; + __u32 data_size_in; /* input: len of data_in */ + __u32 data_size_out; /* input/output: len of data_out + * returns ENOSPC if data_out + * is too small. + */ __aligned_u64 data_in; __aligned_u64 data_out; __u32 repeat; @@ -2282,9 +2299,22 @@ union bpf_attr { * if possible. Other errors can occur if input parameters are * invalid either due to *start* byte not being valid part of msg * payload and/or *pop* value being to large. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * Description + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded pointer movement. * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". * Return - * 0 on success, or a negative erro in case of failure. + * 0 */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2378,7 +2408,8 @@ union bpf_attr { FN(map_pop_elem), \ FN(map_peek_elem), \ FN(msg_push_data), \ - FN(msg_pop_data), + FN(msg_pop_data), \ + FN(rc_pointer_rel), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2496,6 +2527,7 @@ struct __sk_buff { __u32 data_meta; __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; + __u32 wire_len; }; struct bpf_tunnel_key { @@ -2988,7 +3020,7 @@ struct bpf_flow_keys { }; struct bpf_func_info { - __u32 insn_offset; + __u32 insn_off; __u32 type_id; }; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 97e3968139e4612b0cfd2a5a8f7346ca3b0d4e5d..8af766ae0d509596e082d00697b6c78b43649cfd 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -293,9 +293,9 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, } int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, - size_t insns_cnt, int strict_alignment, - const char *license, __u32 kern_version, - char *log_buf, size_t log_buf_sz, int log_level) + size_t insns_cnt, __u32 prog_flags, const char *license, + __u32 kern_version, char *log_buf, size_t log_buf_sz, + int log_level) { union bpf_attr attr; @@ -309,7 +309,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, attr.log_level = log_level; log_buf[0] = 0; attr.kern_version = kern_version; - attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0; + attr.prog_flags = prog_flags; return sys_bpf_prog_load(&attr, sizeof(attr)); } @@ -477,6 +477,29 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, return ret; } +int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr) +{ + union bpf_attr attr; + int ret; + + if (!test_attr->data_out && test_attr->data_size_out > 0) + return -EINVAL; + + bzero(&attr, sizeof(attr)); + attr.test.prog_fd = test_attr->prog_fd; + attr.test.data_in = ptr_to_u64(test_attr->data_in); + attr.test.data_out = ptr_to_u64(test_attr->data_out); + attr.test.data_size_in = test_attr->data_size_in; + attr.test.data_size_out = test_attr->data_size_out; + attr.test.repeat = test_attr->repeat; + + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + test_attr->data_size_out = attr.test.data_size_out; + test_attr->retval = attr.test.retval; + test_attr->duration = attr.test.duration; + return ret; +} + int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) { union bpf_attr attr; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 0d1793dc1628297df1949eaf8f925d7324b15f3f..3a7a4f23fdcc47229877c88407d9b4a59189c717 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -99,7 +99,7 @@ LIBBPF_API int bpf_load_program(enum bpf_prog_type type, char *log_buf, size_t log_buf_sz); LIBBPF_API int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, - size_t insns_cnt, int strict_alignment, + size_t insns_cnt, __u32 prog_flags, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz, int log_level); @@ -119,6 +119,25 @@ LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd, LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type); + +struct bpf_prog_test_run_attr { + int prog_fd; + int repeat; + const void *data_in; + __u32 data_size_in; + void *data_out; /* optional */ + __u32 data_size_out; /* in: max length of data_out + * out: length of data_out */ + __u32 retval; /* out: return code of the BPF program */ + __u32 duration; /* out: average per repetition in ns */ +}; + +LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr); + +/* + * bpf_prog_test_run does not check that data_out is large enough. Consider + * using bpf_prog_test_run_xattr instead. + */ LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration); diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index c2d641f3e16eee87b8d01c5c5e95e13ae6ff511d..85d6446cf83251a27f1cc9d04d2209819a885983 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -45,7 +45,7 @@ struct btf_ext { /* The minimum bpf_func_info checked by the loader */ struct bpf_func_info_min { - __u32 insn_offset; + __u32 insn_off; __u32 type_id; }; @@ -670,7 +670,7 @@ int btf_ext__reloc_init(struct btf *btf, struct btf_ext *btf_ext, memcpy(data, sinfo->data, records_len); - /* adjust the insn_offset, the data in .BTF.ext is + /* adjust the insn_off, the data in .BTF.ext is * the actual byte offset, and the kernel expects * the offset in term of bpf_insn. * @@ -681,7 +681,7 @@ int btf_ext__reloc_init(struct btf *btf, struct btf_ext *btf_ext, struct bpf_func_info_min *record; record = data + i * record_size; - record->insn_offset /= sizeof(struct bpf_insn); + record->insn_off /= sizeof(struct bpf_insn); } *func_info = data; @@ -722,15 +722,15 @@ int btf_ext__reloc(struct btf *btf, struct btf_ext *btf_ext, return -ENOMEM; memcpy(data + existing_flen, sinfo->data, records_len); - /* adjust insn_offset only, the rest data will be passed + /* adjust insn_off only, the rest data will be passed * to the kernel. */ for (i = 0; i < sinfo->num_func_info; i++) { struct bpf_func_info_min *record; record = data + existing_flen + i * record_size; - record->insn_offset = - record->insn_offset / sizeof(struct bpf_insn) + + record->insn_off = + record->insn_off / sizeof(struct bpf_insn) + insns_cnt; } *func_info = data; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 4fb29f6d7a80b4a6b0dc1037ab1cf65727baf64d..8deff22d61bba2a52989ec8845bf4846b210b258 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -65,6 +65,7 @@ LIBBPF_0.0.1 { bpf_prog_load_xattr; bpf_prog_query; bpf_prog_test_run; + bpf_prog_test_run_xattr; bpf_program__fd; bpf_program__is_kprobe; bpf_program__is_perf_event; diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/bpf_flow.c index b9798f558ca7f47619e3e6c5ebcfeeb9d85b6581..284660f5aa9533aaee034e13242f5ea098729676 100644 --- a/tools/testing/selftests/bpf/bpf_flow.c +++ b/tools/testing/selftests/bpf/bpf_flow.c @@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, { void *data_end = (void *)(long)skb->data_end; void *data = (void *)(long)skb->data; - __u16 nhoff = skb->flow_keys->nhoff; + __u16 thoff = skb->flow_keys->thoff; __u8 *hdr; /* Verifies this variable offset does not overflow */ - if (nhoff > (USHRT_MAX - hdr_size)) + if (thoff > (USHRT_MAX - hdr_size)) return NULL; - hdr = data + nhoff; + hdr = data + thoff; if (hdr + hdr_size <= data_end) return hdr; - if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size)) + if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size)) return NULL; return buffer; @@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) /* Only inspect standard GRE packets with version 0 */ return BPF_OK; - keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */ + keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */ if (GRE_IS_CSUM(gre->flags)) - keys->nhoff += 4; /* Step over chksum and Padding */ + keys->thoff += 4; /* Step over chksum and Padding */ if (GRE_IS_KEY(gre->flags)) - keys->nhoff += 4; /* Step over key */ + keys->thoff += 4; /* Step over key */ if (GRE_IS_SEQ(gre->flags)) - keys->nhoff += 4; /* Step over sequence number */ + keys->thoff += 4; /* Step over sequence number */ keys->is_encap = true; @@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) if (!eth) return BPF_DROP; - keys->nhoff += sizeof(*eth); + keys->thoff += sizeof(*eth); return parse_eth_proto(skb, eth->h_proto); } else { @@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) if ((__u8 *)tcp + (tcp->doff << 2) > data_end) return BPF_DROP; - keys->thoff = keys->nhoff; keys->sport = tcp->source; keys->dport = tcp->dest; return BPF_OK; @@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) if (!udp) return BPF_DROP; - keys->thoff = keys->nhoff; keys->sport = udp->source; keys->dport = udp->dest; return BPF_OK; @@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb) keys->ipv4_src = iph->saddr; keys->ipv4_dst = iph->daddr; - keys->nhoff += iph->ihl << 2; - if (data + keys->nhoff > data_end) + keys->thoff += iph->ihl << 2; + if (data + keys->thoff > data_end) return BPF_DROP; if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { @@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb) keys->addr_proto = ETH_P_IPV6; memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); - keys->nhoff += sizeof(struct ipv6hdr); + keys->thoff += sizeof(struct ipv6hdr); return parse_ipv6_proto(skb, ip6h->nexthdr); } @@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb) /* hlen is in 8-octets and does not include the first 8 bytes * of the header */ - skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3; + skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3; return parse_ipv6_proto(skb, ip6h->nexthdr); } @@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb) if (!fragh) return BPF_DROP; - keys->nhoff += sizeof(*fragh); + keys->thoff += sizeof(*fragh); keys->is_frag = true; if (!(fragh->frag_off & bpf_htons(IP6_OFFSET))) keys->is_first_frag = true; @@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb) __be16 proto; /* Peek back to see if single or double-tagging */ - if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto, + if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto, sizeof(proto))) return BPF_DROP; @@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb) if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) return BPF_DROP; - keys->nhoff += sizeof(*vlan); + keys->thoff += sizeof(*vlan); } vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); if (!vlan) return BPF_DROP; - keys->nhoff += sizeof(*vlan); + keys->thoff += sizeof(*vlan); /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index c5ec5fc74c5d95f500f4f27d00bce5080a1062b7..59716f8920c03a510e0e570250c6685dcaedcbc1 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -166,6 +166,8 @@ static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, (void *) BPF_FUNC_sk_lookup_udp; static int (*bpf_sk_release)(struct bpf_sock *sk) = (void *) BPF_FUNC_sk_release; +static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) = + (void *) BPF_FUNC_rc_pointer_rel; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index bab5667d0b2207c97bf114e8c0638c02ea1f4349..0ae7a741541464390d7e4a43aa941028d0f2a430 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -621,8 +621,8 @@ static int do_test_single(struct bpf_align_test *test) prog_len = probe_filter_length(prog); fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, - prog, prog_len, 1, "GPL", 0, - bpf_vlog, sizeof(bpf_vlog), 2); + prog, prog_len, BPF_F_STRICT_ALIGNMENT, + "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2); if (fd_prog < 0 && test->result != REJECT) { printf("Failed to load program.\n"); printf("%s", bpf_vlog); diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 57f3bbc924860f1d76dd5b403922bf54472edeaa..6532ac5593f6ed9a1f8713d12dfe559227c4f45b 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -3405,7 +3405,7 @@ static struct btf_func_type_test { }, { - .descr = "func_type (Incorrect bpf_func_info.insn_offset)", + .descr = "func_type (Incorrect bpf_func_info.insn_off)", .raw_types = { BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */ @@ -3552,6 +3552,12 @@ static int do_test_func_type(int test_num) goto done; } + if (CHECK(!info.func_info, + "info.func_info == 0. kernel.kptr_restrict is set?")) { + err = -1; + goto done; + } + finfo = func_info; for (i = 0; i < 2; i++) { if (CHECK(finfo->type_id != test->func_info[i][1], diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index 677686198df34d799e67c0eb15ab25f4b68eba4c..ec4e15948e40631cdb063a5d700310796e3386de 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh @@ -21,13 +21,14 @@ do if grep -q DRV_NAME=rc-loopback $i/uevent then LIRCDEV=$(grep DEVNAME= $i/lirc*/uevent | sed sQDEVNAME=Q/dev/Q) + INPUTDEV=$(grep DEVNAME= $i/input*/event*/uevent | sed sQDEVNAME=Q/dev/Q) fi done if [ -n $LIRCDEV ]; then TYPE=lirc_mode2 - ./test_lirc_mode2_user $LIRCDEV + ./test_lirc_mode2_user $LIRCDEV $INPUTDEV ret=$? if [ $ret -ne 0 ]; then echo -e ${RED}"FAIL: $TYPE"${NC} diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c b/tools/testing/selftests/bpf/test_lirc_mode2_kern.c index ba26855563a5f848a7788b2ee1c6df649933338c..4147130cc3b7bc3d845838baa764f7a595cabbd4 100644 --- a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c +++ b/tools/testing/selftests/bpf/test_lirc_mode2_kern.c @@ -15,6 +15,9 @@ int bpf_decoder(unsigned int *sample) if (duration & 0x10000) bpf_rc_keydown(sample, 0x40, duration & 0xffff, 0); + if (duration & 0x20000) + bpf_rc_pointer_rel(sample, (duration >> 8) & 0xff, + duration & 0xff); } return 0; diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c index d470d63c33dbee631c49b48cd7d2dcd61a2651ad..fb5fd6841ef3954fba88646d38ed5e7a90a35547 100644 --- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c +++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -47,12 +48,13 @@ int main(int argc, char **argv) { struct bpf_object *obj; - int ret, lircfd, progfd, mode; - int testir = 0x1dead; + int ret, lircfd, progfd, inputfd; + int testir1 = 0x1dead; + int testir2 = 0x20101; u32 prog_ids[10], prog_flags[10], prog_cnt; - if (argc != 2) { - printf("Usage: %s /dev/lircN\n", argv[0]); + if (argc != 3) { + printf("Usage: %s /dev/lircN /dev/input/eventM\n", argv[0]); return 2; } @@ -76,9 +78,9 @@ int main(int argc, char **argv) return 1; } - mode = LIRC_MODE_SCANCODE; - if (ioctl(lircfd, LIRC_SET_REC_MODE, &mode)) { - printf("failed to set rec mode: %m\n"); + inputfd = open(argv[2], O_RDONLY | O_NONBLOCK); + if (inputfd == -1) { + printf("failed to open input device %s: %m\n", argv[1]); return 1; } @@ -102,29 +104,54 @@ int main(int argc, char **argv) } /* Write raw IR */ - ret = write(lircfd, &testir, sizeof(testir)); - if (ret != sizeof(testir)) { + ret = write(lircfd, &testir1, sizeof(testir1)); + if (ret != sizeof(testir1)) { printf("Failed to send test IR message: %m\n"); return 1; } - struct pollfd pfd = { .fd = lircfd, .events = POLLIN }; - struct lirc_scancode lsc; + struct pollfd pfd = { .fd = inputfd, .events = POLLIN }; + struct input_event event; - poll(&pfd, 1, 100); + for (;;) { + poll(&pfd, 1, 100); - /* Read decoded IR */ - ret = read(lircfd, &lsc, sizeof(lsc)); - if (ret != sizeof(lsc)) { - printf("Failed to read decoded IR: %m\n"); - return 1; + /* Read decoded IR */ + ret = read(inputfd, &event, sizeof(event)); + if (ret != sizeof(event)) { + printf("Failed to read decoded IR: %m\n"); + return 1; + } + + if (event.type == EV_MSC && event.code == MSC_SCAN && + event.value == 0xdead) { + break; + } } - if (lsc.scancode != 0xdead || lsc.rc_proto != 64) { - printf("Incorrect scancode decoded\n"); + /* Write raw IR */ + ret = write(lircfd, &testir2, sizeof(testir2)); + if (ret != sizeof(testir2)) { + printf("Failed to send test IR message: %m\n"); return 1; } + for (;;) { + poll(&pfd, 1, 100); + + /* Read decoded IR */ + ret = read(inputfd, &event, sizeof(event)); + if (ret != sizeof(event)) { + printf("Failed to read decoded IR: %m\n"); + return 1; + } + + if (event.type == EV_REL && event.code == REL_Y && + event.value == 1 ) { + break; + } + } + prog_cnt = 10; ret = bpf_prog_query(lircfd, BPF_LIRC_MODE2, 0, prog_flags, prog_ids, &prog_cnt); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 563f54a6f8b01799825ff38df7a63af67a368fef..25f0083a9b2e7c87d1efbadae3cc00c97e0c5560 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -70,7 +70,7 @@ static struct { .tcp.urg_ptr = 123, }; -#define CHECK(condition, tag, format...) ({ \ +#define _CHECK(condition, tag, duration, format...) ({ \ int __ret = !!(condition); \ if (__ret) { \ error_cnt++; \ @@ -83,6 +83,11 @@ static struct { __ret; \ }) +#define CHECK(condition, tag, format...) \ + _CHECK(condition, tag, duration, format) +#define CHECK_ATTR(condition, tag, format...) \ + _CHECK(condition, tag, tattr.duration, format) + static int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) { @@ -124,6 +129,53 @@ static void test_pkt_access(void) bpf_object__close(obj); } +static void test_prog_run_xattr(void) +{ + const char *file = "./test_pkt_access.o"; + struct bpf_object *obj; + char buf[10]; + int err; + struct bpf_prog_test_run_attr tattr = { + .repeat = 1, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = 5, + }; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, + &tattr.prog_fd); + if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) + return; + + memset(buf, 0, sizeof(buf)); + + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run", + "err %d errno %d retval %d\n", err, errno, tattr.retval); + + CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", + "incorrect output size, want %lu have %u\n", + sizeof(pkt_v4), tattr.data_size_out); + + CHECK_ATTR(buf[5] != 0, "overflow", + "BPF_PROG_TEST_RUN ignored size hint\n"); + + tattr.data_out = NULL; + tattr.data_size_out = 0; + errno = 0; + + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err || errno || tattr.retval, "run_no_output", + "err %d errno %d retval %d\n", err, errno, tattr.retval); + + tattr.data_size_out = 1; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err); + + bpf_object__close(obj); +} + static void test_xdp(void) { struct vip key4 = {.protocol = 6, .family = AF_INET}; @@ -1867,6 +1919,7 @@ int main(void) jit_enabled = is_jit_enabled(); test_pkt_access(); + test_prog_run_xattr(); test_xdp(); test_xdp_adjust_tail(); test_l4lb_all(); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 593adc7ebb26f9e5c20abd1cedaf2c008e98da63..2965f3d6b659cbef64c2a393d6856313bc0885ed 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -721,8 +721,18 @@ static struct bpf_test tests[] = { BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5), BPF_EXIT_INSN(), }, - .result = REJECT, - .errstr = "unknown opcode c4", + .result = ACCEPT, + .retval = 0, + }, + { + "arsh32 on imm 2", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = -16069393, }, { "arsh32 on reg", @@ -732,8 +742,19 @@ static struct bpf_test tests[] = { BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }, - .result = REJECT, - .errstr = "unknown opcode cc", + .result = ACCEPT, + .retval = 0, + }, + { + "arsh32 on reg 2", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 0xffff55667788), + BPF_MOV64_IMM(BPF_REG_1, 15), + BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 43724, }, { "arsh64 on imm", @@ -1823,6 +1844,7 @@ static struct bpf_test tests[] = { .errstr = "invalid bpf_context access", .result = REJECT, .prog_type = BPF_PROG_TYPE_SK_MSG, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "direct packet read for SK_MSG", @@ -2215,6 +2237,8 @@ static struct bpf_test tests[] = { }, .errstr = "invalid bpf_context access", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "check cb access: half, wrong type", @@ -3312,6 +3336,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "R0 invalid mem access 'inv'", .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "raw_stack: skb_load_bytes, spilled regs corruption 2", @@ -3342,6 +3367,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "R3 invalid mem access 'inv'", .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "raw_stack: skb_load_bytes, spilled regs + data", @@ -3841,6 +3867,7 @@ static struct bpf_test tests[] = { .errstr = "R2 invalid mem access 'inv'", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "direct packet access: test16 (arith on data_end)", @@ -3943,6 +3970,7 @@ static struct bpf_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "direct packet access: test21 (x += pkt_ptr, 2)", @@ -3968,6 +3996,7 @@ static struct bpf_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "direct packet access: test22 (x += pkt_ptr, 3)", @@ -3998,6 +4027,7 @@ static struct bpf_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "direct packet access: test23 (x += pkt_ptr, 4)", @@ -4024,6 +4054,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "direct packet access: test24 (x += pkt_ptr, 5)", @@ -4049,6 +4080,7 @@ static struct bpf_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "direct packet access: test25 (marking on <, good access)", @@ -5180,6 +5212,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "invalid access to map value, value_size=64 off=-2 size=4", .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid cgroup storage access 5", @@ -5296,6 +5329,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "invalid access to map value, value_size=64 off=-2 size=4", .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid per-cpu cgroup storage access 5", @@ -6987,6 +7021,7 @@ static struct bpf_test tests[] = { .errstr = "invalid mem access 'inv'", .result = REJECT, .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "map element value illegal alu op, 5", @@ -7009,6 +7044,7 @@ static struct bpf_test tests[] = { .fixup_map_hash_48b = { 3 }, .errstr = "R0 invalid mem access 'inv'", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "map element value is preserved across register spilling", @@ -7502,6 +7538,7 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .retval = 0 /* csum_diff of 64-byte packet */, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", @@ -9468,6 +9505,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data' > pkt_end, bad access 1", @@ -9505,6 +9543,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_end > pkt_data', good access", @@ -9543,6 +9582,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_end > pkt_data', bad access 2", @@ -9561,6 +9601,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data' < pkt_end, good access", @@ -9599,6 +9640,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data' < pkt_end, bad access 2", @@ -9617,6 +9659,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_end < pkt_data', good access", @@ -9634,6 +9677,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_end < pkt_data', bad access 1", @@ -9671,6 +9715,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data' >= pkt_end, good access", @@ -9707,6 +9752,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data' >= pkt_end, bad access 2", @@ -9744,6 +9790,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_end >= pkt_data', bad access 1", @@ -9782,6 +9829,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data' <= pkt_end, good access", @@ -9800,6 +9848,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data' <= pkt_end, bad access 1", @@ -9838,6 +9887,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_end <= pkt_data', good access", @@ -9874,6 +9924,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_end <= pkt_data', bad access 2", @@ -9910,6 +9961,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_meta' > pkt_data, bad access 1", @@ -9947,6 +9999,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data > pkt_meta', good access", @@ -9985,6 +10038,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data > pkt_meta', bad access 2", @@ -10003,6 +10057,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_meta' < pkt_data, good access", @@ -10041,6 +10096,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_meta' < pkt_data, bad access 2", @@ -10059,6 +10115,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data < pkt_meta', good access", @@ -10076,6 +10133,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data < pkt_meta', bad access 1", @@ -10113,6 +10171,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_meta' >= pkt_data, good access", @@ -10149,6 +10208,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_meta' >= pkt_data, bad access 2", @@ -10186,6 +10246,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data >= pkt_meta', bad access 1", @@ -10224,6 +10285,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_meta' <= pkt_data, good access", @@ -10242,6 +10304,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_meta' <= pkt_data, bad access 1", @@ -10280,6 +10343,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data <= pkt_meta', good access", @@ -10316,6 +10380,7 @@ static struct bpf_test tests[] = { .errstr = "R1 offset is outside of the packet", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "XDP pkt read, pkt_data <= pkt_meta', bad access 2", @@ -10430,6 +10495,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "dereference of modified ctx ptr", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "check deducing bounds from const, 8", @@ -10444,6 +10510,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "dereference of modified ctx ptr", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "check deducing bounds from const, 9", @@ -10919,6 +10986,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "R6 invalid mem access 'inv'", .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: two calls with args", @@ -11784,6 +11852,7 @@ static struct bpf_test tests[] = { .fixup_map_hash_8b = { 12, 22 }, .result = REJECT, .errstr = "invalid access to map value, value_size=8 off=2 size=8", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", @@ -11927,6 +11996,7 @@ static struct bpf_test tests[] = { .fixup_map_hash_8b = { 12, 22 }, .result = REJECT, .errstr = "invalid access to map value, value_size=8 off=2 size=8", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: two calls that receive map_value_ptr_or_null via arg. test1", @@ -12098,6 +12168,7 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .retval = POINTER_VALUE, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 2", @@ -12129,6 +12200,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .errstr = "invalid access to packet", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 3", @@ -12164,6 +12236,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, .retval = 1, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 4", @@ -12198,6 +12271,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, .retval = 1, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 5", @@ -12231,6 +12305,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .errstr = "same insn cannot be used with different", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 6", @@ -12266,6 +12341,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .errstr = "R4 invalid mem access", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 7", @@ -12300,6 +12376,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .errstr = "R4 invalid mem access", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 8", @@ -12340,6 +12417,7 @@ static struct bpf_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: pkt_ptr spill into caller stack 9", @@ -12381,6 +12459,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .errstr = "invalid access to packet", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "calls: caller stack init to zero or map_value_or_null", @@ -12746,6 +12825,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "BPF_XADD stores into R2 pkt is not allowed", .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "xadd/w check whether src/dst got mangled, 1", @@ -13232,6 +13312,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .errstr = "Unreleased reference", .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "reference tracking: alloc, check, free in both subbranches", @@ -13260,6 +13341,7 @@ static struct bpf_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "reference tracking in call: free reference in subprog", @@ -13769,6 +13851,38 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, }, + { + "check wire_len is not readable by sockets", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, wire_len)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check wire_len is readable by tc classifier", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, wire_len)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "check wire_len is not writable by tc classifier", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, wire_len)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "invalid bpf_context access", + .errstr_unpriv = "R1 leaks addr", + .result = REJECT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) @@ -14054,13 +14168,14 @@ static int set_admin(bool admin) static void do_test_single(struct bpf_test *test, bool unpriv, int *passes, int *errors) { - int fd_prog, expected_ret, reject_from_alignment; + int fd_prog, expected_ret, alignment_prevented_execution; int prog_len, prog_type = test->prog_type; struct bpf_insn *prog = test->insns; int map_fds[MAX_NR_MAPS]; const char *expected_err; uint32_t expected_val; uint32_t retval; + __u32 pflags; int i, err; for (i = 0; i < MAX_NR_MAPS; i++) @@ -14071,8 +14186,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv, do_test_fixup(test, prog_type, prog, map_fds); prog_len = probe_filter_length(prog); - fd_prog = bpf_verify_program(prog_type, prog, prog_len, - test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, + pflags = 0; + if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) + pflags |= BPF_F_STRICT_ALIGNMENT; + if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) + pflags |= BPF_F_ANY_ALIGNMENT; + fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags, "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1); expected_ret = unpriv && test->result_unpriv != UNDEF ? @@ -14082,28 +14201,27 @@ static void do_test_single(struct bpf_test *test, bool unpriv, expected_val = unpriv && test->retval_unpriv ? test->retval_unpriv : test->retval; - reject_from_alignment = fd_prog < 0 && - (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && - strstr(bpf_vlog, "misaligned"); -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (reject_from_alignment) { - printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", - strerror(errno)); - goto fail_log; - } -#endif + alignment_prevented_execution = 0; + if (expected_ret == ACCEPT) { - if (fd_prog < 0 && !reject_from_alignment) { + if (fd_prog < 0) { printf("FAIL\nFailed to load prog '%s'!\n", strerror(errno)); goto fail_log; } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (fd_prog >= 0 && + (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) { + alignment_prevented_execution = 1; + goto test_ok; + } +#endif } else { if (fd_prog >= 0) { printf("FAIL\nUnexpected success to load!\n"); goto fail_log; } - if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) { + if (!strstr(bpf_vlog, expected_err)) { printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n", expected_err, bpf_vlog); goto fail_log; @@ -14131,9 +14249,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv, goto fail_log; } } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +test_ok: +#endif (*passes)++; - printf("OK%s\n", reject_from_alignment ? - " (NOTE: reject due to unknown alignment)" : ""); + printf("OK%s\n", alignment_prevented_execution ? + " (NOTE: not executed due to unknown alignment)" : ""); close_fds: close(fd_prog); for (i = 0; i < MAX_NR_MAPS; i++)