From ffc687fd35a1f991fee4fced67ee07081cc5061e Mon Sep 17 00:00:00 2001 From: zhuofeng <1107893276@qq.com> Date: Mon, 30 Dec 2024 11:49:41 +0000 Subject: [PATCH] the ebpf adpat to the 6.6 kernel Signed-off-by: zhuofeng <1107893276@qq.com> --- src/services/sentryCollector/collect_io.py | 2 +- .../sentryCollector/ebpf_collector/Makefile | 119 +- .../ebpf_collector/bpf_helpers.h | 535 ----- .../sentryCollector/ebpf_collector/bpf_load.c | 709 ------ .../ebpf_collector/ebpf_collector.bpf.c | 1938 ++++------------- .../ebpf_collector/ebpf_collector.c | 242 +- .../ebpf_collector/ebpf_collector.h | 34 +- 7 files changed, 585 insertions(+), 2994 deletions(-) delete mode 100644 src/services/sentryCollector/ebpf_collector/bpf_helpers.h delete mode 100644 src/services/sentryCollector/ebpf_collector/bpf_load.c diff --git a/src/services/sentryCollector/collect_io.py b/src/services/sentryCollector/collect_io.py index a7e86cb..10446d9 100644 --- a/src/services/sentryCollector/collect_io.py +++ b/src/services/sentryCollector/collect_io.py @@ -27,7 +27,7 @@ IO_CONFIG_DATA = [] EBPF_GLOBAL_DATA = [] EBPF_PROCESS = None EBPF_STAGE_LIST = ["wbt", "rq_driver", "bio", "gettag"] -EBPF_SUPPORT_VERSION = ["4.19.90"] +EBPF_SUPPORT_VERSION = ["6.6.0"] class IoStatus(): TOTAL = 0 diff --git a/src/services/sentryCollector/ebpf_collector/Makefile b/src/services/sentryCollector/ebpf_collector/Makefile index b55128f..f38b0e2 100644 --- a/src/services/sentryCollector/ebpf_collector/Makefile +++ b/src/services/sentryCollector/ebpf_collector/Makefile @@ -1,101 +1,40 @@ -# Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved. -# Description: ebpf collector program -ARCH ?= $(shell uname -m | sed 's/x86_64/x86/' \ - | sed 's/arm.*/arm/' \ - | sed 's/aarch64/arm64/' \ - | sed 's/ppc64le/powerpc/' \ - | sed 's/mips.*/mips/' \ - | sed 's/riscv64/riscv/' \ - | sed 's/loongarch64/loongarch/') +# 定义编译器 +CC = clang -KERNEL_VERSION ?= $(shell rpm -qa | grep "kernel-source-4.19" | cut -d' ' -f1 | sed 's/kernel-source-//') -KERNEL_SRC := /usr/src/kernels/$(KERNEL_VERSION) -KERNEL_PATH := /usr/src/linux-$(KERNEL_VERSION) -GCC_ARCH ?= $(shell gcc -dumpmachine) -GCC_VERSION ?= $(shell gcc -dumpversion) +# 定义内核态编译选项 +KERNEL_CFLAGS = -O2 -g -target bpf -D__TARGET_ARCH_arm64 -LINUX_INCLUDE := -I$(KERNEL_SRC)/include/ -LINUX_INCLUDE += -I$(KERNEL_SRC)/arch/$(ARCH)/include/ -LINUX_INCLUDE += -I$(KERNEL_SRC)/arch/$(ARCH)/include/generated -LINUX_INCLUDE += -I$(KERNEL_SRC)/arch/$(ARCH)/include/uapi -LINUX_INCLUDE += -I$(KERNEL_SRC)/arch/$(ARCH)/include/uapi/linux -LINUX_INCLUDE += -I$(KERNEL_SRC)/arch/$(ARCH)/include/generated/uapi -LINUX_INCLUDE += -I$(KERNEL_SRC)/include/uapi -LINUX_INCLUDE += -I$(KERNEL_SRC)/include/generated/uapi -LINUX_INCLUDE += -include $(KERNEL_SRC)/include/linux/kconfig.h -LINUX_INCLUDE += -I$(KERNEL_PATH)/samples/bpf -LINUX_INCLUDE += -I$(KERNEL_SRC)/tools/lib/ -LINUX_INCLUDE += -I/usr/src/kernels/$(KERNEL_VERSION)/samples/bpf -LINUX_INCLUDE += -I$(KERNEL_SRC)/tools/perf/include/bpf -LINUX_INCLUDE += -I/usr/include/libbpf/src/bpf -LINUX_INCLUDE += -I/usr/src/kernels/$(KERNEL_VERSION)/include/uapi/linux/ -LINUX_INCLUDE += -I/usr/include/bpf/ -LINUX_INCLUDE += -I/usr/include/ -BPF_LOAD_INCLUDE := -I/usr/include -BPF_LOAD_INCLUDE += -I$(KERNEL_SRC)/include/ -BPF_LOAD_INCLUDE += -I/usr/src/kernels/$(KERNEL_VERSION)/include/ -KBUILD_HOSTCFLAGS := -I$(KERNEL_PATH)/include/ -KBUILD_HOSTCFLAGS += -I$(KERNEL_PATH)/tools/lib/ -I$(KERNEL_PATH)/tools/include -KBUILD_HOSTCFLAGS += -I$(KERNEL_PATH)/tools/perf -NOSTDINC_FLAGS := -nostdinc -EXTRA_CFLAGS := -isystem /usr/lib/gcc/$(GCC_ARCH)/$(GCC_VERSION)/include -CFLAGS := -g -Wall -w +# 定义用户态编译选项 +USER_CFLAGS = -g -lbpf -CLANG_BPF_SYS_INCLUDES ?= $(shell $(CLANG) -v -E - &1 \ - | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') +# 定义目标文件 +KERNEL_TARGET = ebpf_collector.bpf.o +USER_TARGET = ebpf_collector +SKELETON_HEADER = ebpf_collector.skel.h +VMLINUX_HEADER = vmlinux.h -APPS = ebpf_collector +# 默认目标 +all: $(KERNEL_TARGET) $(VMLINUX_HEADER) $(SKELETON_HEADER) $(USER_TARGET) -CC = gcc -LLC ?= llc -CLANG ?= clang +# 生成 vmlinux.h 文件 +$(VMLINUX_HEADER): + bpftool btf dump file /sys/kernel/btf/vmlinux format c > $(VMLINUX_HEADER) -USER_CFLAGS = -I. -I/usr/src/kernels/$(KERNEL_VERSION)/include/uapi/linux/ -I/usr/src/kernel/include -Wall -KERNEL_CFLAGS = -I. -I/usr/src/kernels/$(KERNEL_VERSION)/include/uapi/linux/ -Wall -LOADER_CFLAGS = -I. -I/usr/src/kernels/$(KERNEL_VERSION)/include/uapi/linux/ -I/usr/src/kernel/include -CLANG_FLAGS = -O2 -emit-llvm -c -LLC_FLAGS = -march=bpf -filetype=obj +# 编译内核态文件 +$(KERNEL_TARGET): ebpf_collector.bpf.c $(VMLINUX_HEADER) + $(CC) $(KERNEL_CFLAGS) -c $< -o $@ -OUTPUT := output +# 生成骨架文件 +$(SKELETON_HEADER): $(KERNEL_TARGET) + bpftool gen skeleton $< > $@ -.PHONY: all -all: $(APPS) +# 编译用户态文件 +$(USER_TARGET): ebpf_collector.c $(SKELETON_HEADER) $(VMLINUX_HEADER) + $(CC) $(USER_CFLAGS) $< -o $@ -.PHONY: clean +# 清理生成的文件 clean: - $(call msg,CLEAN) - $(Q)rm -rf $(OUTPUT) $(APPS) + rm -f $(KERNEL_TARGET) $(VMLINUX_HEADER) $(SKELETON_HEADER) $(USER_TARGET) -$(OUTPUT): - $(call msg,MKDIR,$@) - $(Q)mkdir -p $@ - -$(OUTPUT)/%.bpf.o: %.bpf.c - $(call msg,BPF,$@) - $(CLANG) $(NOSTDINC_FLAGS) $(EXTRA_CFLAGS) $(LINUX_INCLUDE) $(KBUILD_HOSTCFLAGS) \ - -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \ - -D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \ - -Wno-gnu-variable-sized-type-not-at-end \ - -Wno-address-of-packed-member -Wno-tautological-compare \ - -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ - -O2 -emit-llvm -c $< -o -| $(LLC) $(LLC_FLAGS) -o $@ - -$(patsubst %,$(OUTPUT)/%.o,$(APPS)): %.o: %.bpf.o - -$(OUTPUT)/bpf_load.o: bpf_load.c | $(OUTPUT) - $(call msg,CC,$@) - $(CC) $(NOSTDINC_FLAGS) $(EXTRA_CFLAGS) $(CFLAGS) -I$(KERNEL_PATH)/samples/bpf -I$(KERNEL_PATH)/tools/perf $(BPF_LOAD_INCLUDE) \ - -I$(KERNEL_PATH)/tools/lib/ -I$(KERNEL_PATH)/tools/include \ - -c $(filter %.c,$^) -o $@ - -$(OUTPUT)/%.o: %.c | $(OUTPUT) - $(call msg,CC,$@) - $(CC) $(CFLAGS) $(INCLUDES) -I$(KERNEL_PATH)/samples/bpf -c $(filter %.c,$^) -o $@ - -$(APPS): %: $(OUTPUT)/%.o $(OUTPUT)/bpf_load.o | $(OUTPUT) - $(call msg,BINARY,$@) - $(Q)$(CC) $(CFLAGS) $^ $(ALL_LDFLAGS) -I$(KERNEL_PATH)/samples/bpf -lelf -lbpf -lz -o $@ - -.DELETE_ON_ERROR: - -.SECONDARY: \ No newline at end of file +# 伪目标,避免与文件名冲突 +.PHONY: all clean diff --git a/src/services/sentryCollector/ebpf_collector/bpf_helpers.h b/src/services/sentryCollector/ebpf_collector/bpf_helpers.h deleted file mode 100644 index 99da9a3..0000000 --- a/src/services/sentryCollector/ebpf_collector/bpf_helpers.h +++ /dev/null @@ -1,535 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#ifndef __BPF_HELPERS__ -#define __BPF_HELPERS__ - -#define __uint(name, val) int (*name)[val] -#define __type(name, val) val *name - -/* helper macro to print out debug messages */ -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - -#ifdef __clang__ - -/* helper macro to place programs, maps, license in - * different sections in elf_bpf file. Section names - * are interpreted by elf_bpf loader - */ -#define SEC(NAME) __attribute__((section(NAME), used)) - -/* helper functions called from eBPF programs written in C */ -static void *(*bpf_map_lookup_elem)(void *map, const void *key) = - (void *) BPF_FUNC_map_lookup_elem; -static int (*bpf_map_update_elem)(void *map, const void *key, const void *value, - unsigned long long flags) = - (void *) BPF_FUNC_map_update_elem; -static int (*bpf_map_delete_elem)(void *map, const void *key) = - (void *) BPF_FUNC_map_delete_elem; -static int (*bpf_map_push_elem)(void *map, const void *value, - unsigned long long flags) = - (void *) BPF_FUNC_map_push_elem; -static int (*bpf_map_pop_elem)(void *map, void *value) = - (void *) BPF_FUNC_map_pop_elem; -static int (*bpf_map_peek_elem)(void *map, void *value) = - (void *) BPF_FUNC_map_peek_elem; -static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) = - (void *) BPF_FUNC_probe_read; -static unsigned long long (*bpf_ktime_get_ns)(void) = - (void *) BPF_FUNC_ktime_get_ns; -static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) = - (void *) BPF_FUNC_trace_printk; -static void (*bpf_tail_call)(void *ctx, void *map, int index) = - (void *) BPF_FUNC_tail_call; -static unsigned long long (*bpf_get_smp_processor_id)(void) = - (void *) BPF_FUNC_get_smp_processor_id; -static unsigned long long (*bpf_get_current_pid_tgid)(void) = - (void *) BPF_FUNC_get_current_pid_tgid; -static unsigned long long (*bpf_get_current_uid_gid)(void) = - (void *) BPF_FUNC_get_current_uid_gid; -static int (*bpf_get_current_comm)(void *buf, int buf_size) = - (void *) BPF_FUNC_get_current_comm; -static unsigned long long (*bpf_perf_event_read)(void *map, - unsigned long long flags) = - (void *) BPF_FUNC_perf_event_read; -static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) = - (void *) BPF_FUNC_clone_redirect; -static int (*bpf_redirect)(int ifindex, int flags) = - (void *) BPF_FUNC_redirect; -static int (*bpf_redirect_map)(void *map, int key, int flags) = - (void *) BPF_FUNC_redirect_map; -static int (*bpf_perf_event_output)(void *ctx, void *map, - unsigned long long flags, void *data, - int size) = - (void *) BPF_FUNC_perf_event_output; -static int (*bpf_get_stackid)(void *ctx, void *map, int flags) = - (void *) BPF_FUNC_get_stackid; -static int (*bpf_probe_write_user)(void *dst, const void *src, int size) = - (void *) BPF_FUNC_probe_write_user; -static int (*bpf_current_task_under_cgroup)(void *map, int index) = - (void *) BPF_FUNC_current_task_under_cgroup; -static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) = - (void *) BPF_FUNC_skb_get_tunnel_key; -static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) = - (void *) BPF_FUNC_skb_set_tunnel_key; -static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) = - (void *) BPF_FUNC_skb_get_tunnel_opt; -static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) = - (void *) BPF_FUNC_skb_set_tunnel_opt; -static unsigned long long (*bpf_get_prandom_u32)(void) = - (void *) BPF_FUNC_get_prandom_u32; -static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = - (void *) BPF_FUNC_xdp_adjust_head; -static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = - (void *) BPF_FUNC_xdp_adjust_meta; -static int (*bpf_get_socket_cookie)(void *ctx) = - (void *) BPF_FUNC_get_socket_cookie; -static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, - int optlen) = - (void *) BPF_FUNC_setsockopt; -static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, - int optlen) = - (void *) BPF_FUNC_getsockopt; -static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) = - (void *) BPF_FUNC_sock_ops_cb_flags_set; -static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = - (void *) BPF_FUNC_sk_redirect_map; -static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) = - (void *) BPF_FUNC_sk_redirect_hash; -static int (*bpf_sock_map_update)(void *map, void *key, void *value, - unsigned long long flags) = - (void *) BPF_FUNC_sock_map_update; -static int (*bpf_sock_hash_update)(void *map, void *key, void *value, - unsigned long long flags) = - (void *) BPF_FUNC_sock_hash_update; -static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags, - void *buf, unsigned int buf_size) = - (void *) BPF_FUNC_perf_event_read_value; -static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, - unsigned int buf_size) = - (void *) BPF_FUNC_perf_prog_read_value; -static int (*bpf_override_return)(void *ctx, unsigned long rc) = - (void *) BPF_FUNC_override_return; -static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) = - (void *) BPF_FUNC_msg_redirect_map; -static int (*bpf_msg_redirect_hash)(void *ctx, - void *map, void *key, int flags) = - (void *) BPF_FUNC_msg_redirect_hash; -static int (*bpf_msg_apply_bytes)(void *ctx, int len) = - (void *) BPF_FUNC_msg_apply_bytes; -static int (*bpf_msg_cork_bytes)(void *ctx, int len) = - (void *) BPF_FUNC_msg_cork_bytes; -static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) = - (void *) BPF_FUNC_msg_pull_data; -static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) = - (void *) BPF_FUNC_msg_push_data; -static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) = - (void *) BPF_FUNC_msg_pop_data; -static int (*bpf_bind)(void *ctx, void *addr, int addr_len) = - (void *) BPF_FUNC_bind; -static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) = - (void *) BPF_FUNC_xdp_adjust_tail; -static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state, - int size, int flags) = - (void *) BPF_FUNC_skb_get_xfrm_state; -static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) = - (void *) BPF_FUNC_sk_select_reuseport; -static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) = - (void *) BPF_FUNC_get_stack; -static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, - int plen, __u32 flags) = - (void *) BPF_FUNC_fib_lookup; -static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr, - unsigned int len) = - (void *) BPF_FUNC_lwt_push_encap; -static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset, - void *from, unsigned int len) = - (void *) BPF_FUNC_lwt_seg6_store_bytes; -static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param, - unsigned int param_len) = - (void *) BPF_FUNC_lwt_seg6_action; -static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset, - unsigned int len) = - (void *) BPF_FUNC_lwt_seg6_adjust_srh; -static int (*bpf_rc_repeat)(void *ctx) = - (void *) BPF_FUNC_rc_repeat; -static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol, - unsigned long long scancode, unsigned int toggle) = - (void *) BPF_FUNC_rc_keydown; -static unsigned long long (*bpf_get_current_cgroup_id)(void) = - (void *) BPF_FUNC_get_current_cgroup_id; -static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) = - (void *) BPF_FUNC_get_local_storage; -static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) = - (void *) BPF_FUNC_skb_cgroup_id; -static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = - (void *) BPF_FUNC_skb_ancestor_cgroup_id; -static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, - struct bpf_sock_tuple *tuple, - int size, unsigned long long netns_id, - unsigned long long flags) = - (void *) BPF_FUNC_sk_lookup_tcp; -static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, - struct bpf_sock_tuple *tuple, - int size, unsigned long long netns_id, - unsigned long long flags) = - (void *) BPF_FUNC_skc_lookup_tcp; -static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, - struct bpf_sock_tuple *tuple, - int size, unsigned long long netns_id, - unsigned long long flags) = - (void *) BPF_FUNC_sk_lookup_udp; -static int (*bpf_sk_release)(struct bpf_sock *sk) = - (void *) BPF_FUNC_sk_release; -static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) = - (void *) BPF_FUNC_skb_vlan_push; -static int (*bpf_skb_vlan_pop)(void *ctx) = - (void *) BPF_FUNC_skb_vlan_pop; -static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) = - (void *) BPF_FUNC_rc_pointer_rel; -static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) = - (void *) BPF_FUNC_spin_lock; -static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = - (void *) BPF_FUNC_spin_unlock; -static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = - (void *) BPF_FUNC_sk_fullsock; -static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = - (void *) BPF_FUNC_tcp_sock; -static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = - (void *) BPF_FUNC_get_listener_sock; -static int (*bpf_skb_ecn_set_ce)(void *ctx) = - (void *) BPF_FUNC_skb_ecn_set_ce; -static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk, - void *ip, int ip_len, void *tcp, int tcp_len) = - (void *) BPF_FUNC_tcp_check_syncookie; -static int (*bpf_sysctl_get_name)(void *ctx, char *buf, - unsigned long long buf_len, - unsigned long long flags) = - (void *) BPF_FUNC_sysctl_get_name; -static int (*bpf_sysctl_get_current_value)(void *ctx, char *buf, - unsigned long long buf_len) = - (void *) BPF_FUNC_sysctl_get_current_value; -static int (*bpf_sysctl_get_new_value)(void *ctx, char *buf, - unsigned long long buf_len) = - (void *) BPF_FUNC_sysctl_get_new_value; -static int (*bpf_sysctl_set_new_value)(void *ctx, const char *buf, - unsigned long long buf_len) = - (void *) BPF_FUNC_sysctl_set_new_value; -static int (*bpf_strtol)(const char *buf, unsigned long long buf_len, - unsigned long long flags, long *res) = - (void *) BPF_FUNC_strtol; -static int (*bpf_strtoul)(const char *buf, unsigned long long buf_len, - unsigned long long flags, unsigned long *res) = - (void *) BPF_FUNC_strtoul; -static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk, - void *value, __u64 flags) = - (void *) BPF_FUNC_sk_storage_get; -static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) = - (void *)BPF_FUNC_sk_storage_delete; -static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal; -static long long (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *ip, - int ip_len, void *tcp, int tcp_len) = - (void *) BPF_FUNC_tcp_gen_syncookie; - -/* llvm builtin functions that eBPF C program may use to - * emit BPF_LD_ABS and BPF_LD_IND instructions - */ -struct sk_buff; -unsigned long long load_byte(void *skb, - unsigned long long off) asm("llvm.bpf.load.byte"); -unsigned long long load_half(void *skb, - unsigned long long off) asm("llvm.bpf.load.half"); -unsigned long long load_word(void *skb, - unsigned long long off) asm("llvm.bpf.load.word"); - -/* a helper structure used by eBPF C program - * to describe map attributes to elf_bpf loader - */ -struct bpf_map_def { - unsigned int type; - unsigned int key_size; - unsigned int value_size; - unsigned int max_entries; - unsigned int map_flags; - unsigned int inner_map_idx; - unsigned int numa_node; -}; - -#else - -#include - -#endif - -#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ - struct ____btf_map_##name { \ - type_key key; \ - type_val value; \ - }; \ - struct ____btf_map_##name \ - __attribute__ ((section(".maps." #name), used)) \ - ____btf_map_##name = { } - -static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = - (void *) BPF_FUNC_skb_load_bytes; -static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) = - (void *) BPF_FUNC_skb_load_bytes_relative; -static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = - (void *) BPF_FUNC_skb_store_bytes; -static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) = - (void *) BPF_FUNC_l3_csum_replace; -static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = - (void *) BPF_FUNC_l4_csum_replace; -static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) = - (void *) BPF_FUNC_csum_diff; -static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = - (void *) BPF_FUNC_skb_under_cgroup; -static int (*bpf_skb_change_head)(void *, int len, int flags) = - (void *) BPF_FUNC_skb_change_head; -static int (*bpf_skb_pull_data)(void *, int len) = - (void *) BPF_FUNC_skb_pull_data; -static unsigned int (*bpf_get_cgroup_classid)(void *ctx) = - (void *) BPF_FUNC_get_cgroup_classid; -static unsigned int (*bpf_get_route_realm)(void *ctx) = - (void *) BPF_FUNC_get_route_realm; -static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) = - (void *) BPF_FUNC_skb_change_proto; -static int (*bpf_skb_change_type)(void *ctx, __u32 type) = - (void *) BPF_FUNC_skb_change_type; -static unsigned int (*bpf_get_hash_recalc)(void *ctx) = - (void *) BPF_FUNC_get_hash_recalc; -static unsigned long long (*bpf_get_current_task)(void) = - (void *) BPF_FUNC_get_current_task; -static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) = - (void *) BPF_FUNC_skb_change_tail; -static long long (*bpf_csum_update)(void *ctx, __u32 csum) = - (void *) BPF_FUNC_csum_update; -static void (*bpf_set_hash_invalid)(void *ctx) = - (void *) BPF_FUNC_set_hash_invalid; -static int (*bpf_get_numa_node_id)(void) = - (void *) BPF_FUNC_get_numa_node_id; -static int (*bpf_probe_read_str)(void *ctx, __u32 size, - const void *unsafe_ptr) = - (void *) BPF_FUNC_probe_read_str; -static unsigned int (*bpf_get_socket_uid)(void *ctx) = - (void *) BPF_FUNC_get_socket_uid; -static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) = - (void *) BPF_FUNC_set_hash; -static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode, - unsigned long long flags) = - (void *) BPF_FUNC_skb_adjust_room; - -/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ -#if defined(__TARGET_ARCH_x86) - #define bpf_target_x86 - #define bpf_target_defined -#elif defined(__TARGET_ARCH_s390) - #define bpf_target_s390 - #define bpf_target_defined -#elif defined(__TARGET_ARCH_arm) - #define bpf_target_arm - #define bpf_target_defined -#elif defined(__TARGET_ARCH_arm64) - #define bpf_target_arm64 - #define bpf_target_defined -#elif defined(__TARGET_ARCH_mips) - #define bpf_target_mips - #define bpf_target_defined -#elif defined(__TARGET_ARCH_powerpc) - #define bpf_target_powerpc - #define bpf_target_defined -#elif defined(__TARGET_ARCH_sparc) - #define bpf_target_sparc - #define bpf_target_defined -#else - #undef bpf_target_defined -#endif - -/* Fall back to what the compiler says */ -#ifndef bpf_target_defined -#if defined(__x86_64__) - #define bpf_target_x86 -#elif defined(__s390__) - #define bpf_target_s390 -#elif defined(__arm__) - #define bpf_target_arm -#elif defined(__aarch64__) - #define bpf_target_arm64 -#elif defined(__mips__) - #define bpf_target_mips -#elif defined(__powerpc__) - #define bpf_target_powerpc -#elif defined(__sparc__) - #define bpf_target_sparc -#endif -#endif - -#if defined(bpf_target_x86) - -#ifdef __KERNEL__ -#define PT_REGS_PARM1(x) ((x)->di) -#define PT_REGS_PARM2(x) ((x)->si) -#define PT_REGS_PARM3(x) ((x)->dx) -#define PT_REGS_PARM4(x) ((x)->cx) -#define PT_REGS_PARM5(x) ((x)->r8) -#define PT_REGS_RET(x) ((x)->sp) -#define PT_REGS_FP(x) ((x)->bp) -#define PT_REGS_RC(x) ((x)->ax) -#define PT_REGS_SP(x) ((x)->sp) -#define PT_REGS_IP(x) ((x)->ip) -#else -#ifdef __i386__ -/* i386 kernel is built with -mregparm=3 */ -#define PT_REGS_PARM1(x) ((x)->eax) -#define PT_REGS_PARM2(x) ((x)->edx) -#define PT_REGS_PARM3(x) ((x)->ecx) -#define PT_REGS_PARM4(x) 0 -#define PT_REGS_PARM5(x) 0 -#define PT_REGS_RET(x) ((x)->esp) -#define PT_REGS_FP(x) ((x)->ebp) -#define PT_REGS_RC(x) ((x)->eax) -#define PT_REGS_SP(x) ((x)->esp) -#define PT_REGS_IP(x) ((x)->eip) -#else -#define PT_REGS_PARM1(x) ((x)->rdi) -#define PT_REGS_PARM2(x) ((x)->rsi) -#define PT_REGS_PARM3(x) ((x)->rdx) -#define PT_REGS_PARM4(x) ((x)->rcx) -#define PT_REGS_PARM5(x) ((x)->r8) -#define PT_REGS_RET(x) ((x)->rsp) -#define PT_REGS_FP(x) ((x)->rbp) -#define PT_REGS_RC(x) ((x)->rax) -#define PT_REGS_SP(x) ((x)->rsp) -#define PT_REGS_IP(x) ((x)->rip) -#endif -#endif - -#elif defined(bpf_target_s390) - -/* s390 provides user_pt_regs instead of struct pt_regs to userspace */ -struct pt_regs; -#define PT_REGS_S390 const volatile user_pt_regs -#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2]) -#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3]) -#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4]) -#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5]) -#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6]) -#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14]) -/* Works only with CONFIG_FRAME_POINTER */ -#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11]) -#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2]) -#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15]) -#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr) - -#elif defined(bpf_target_arm) - -#define PT_REGS_PARM1(x) ((x)->uregs[0]) -#define PT_REGS_PARM2(x) ((x)->uregs[1]) -#define PT_REGS_PARM3(x) ((x)->uregs[2]) -#define PT_REGS_PARM4(x) ((x)->uregs[3]) -#define PT_REGS_PARM5(x) ((x)->uregs[4]) -#define PT_REGS_RET(x) ((x)->uregs[14]) -#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */ -#define PT_REGS_RC(x) ((x)->uregs[0]) -#define PT_REGS_SP(x) ((x)->uregs[13]) -#define PT_REGS_IP(x) ((x)->uregs[12]) - -#elif defined(bpf_target_arm64) - -/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ -struct pt_regs; -#define PT_REGS_ARM64 const volatile struct user_pt_regs -#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0]) -#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1]) -#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2]) -#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3]) -#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4]) -#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30]) -/* Works only with CONFIG_FRAME_POINTER */ -#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29]) -#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0]) -#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp) -#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc) - -#elif defined(bpf_target_mips) - -#define PT_REGS_PARM1(x) ((x)->regs[4]) -#define PT_REGS_PARM2(x) ((x)->regs[5]) -#define PT_REGS_PARM3(x) ((x)->regs[6]) -#define PT_REGS_PARM4(x) ((x)->regs[7]) -#define PT_REGS_PARM5(x) ((x)->regs[8]) -#define PT_REGS_RET(x) ((x)->regs[31]) -#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ -#define PT_REGS_RC(x) ((x)->regs[1]) -#define PT_REGS_SP(x) ((x)->regs[29]) -#define PT_REGS_IP(x) ((x)->cp0_epc) - -#elif defined(bpf_target_powerpc) - -#define PT_REGS_PARM1(x) ((x)->gpr[3]) -#define PT_REGS_PARM2(x) ((x)->gpr[4]) -#define PT_REGS_PARM3(x) ((x)->gpr[5]) -#define PT_REGS_PARM4(x) ((x)->gpr[6]) -#define PT_REGS_PARM5(x) ((x)->gpr[7]) -#define PT_REGS_RC(x) ((x)->gpr[3]) -#define PT_REGS_SP(x) ((x)->sp) -#define PT_REGS_IP(x) ((x)->nip) - -#elif defined(bpf_target_sparc) - -#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) -#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) -#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2]) -#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3]) -#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4]) -#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) -#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) -#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) - -/* Should this also be a bpf_target check for the sparc case? */ -#if defined(__arch64__) -#define PT_REGS_IP(x) ((x)->tpc) -#else -#define PT_REGS_IP(x) ((x)->pc) -#endif - -#endif - -#if defined(bpf_target_powerpc) -#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) -#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP -#elif defined(bpf_target_sparc) -#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) -#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP -#else -#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \ - bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) -#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \ - bpf_probe_read(&(ip), sizeof(ip), \ - (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) -#endif - -/* - * BPF_CORE_READ abstracts away bpf_probe_read() call and captures offset - * relocation for source address using __builtin_preserve_access_index() - * built-in, provided by Clang. - * - * __builtin_preserve_access_index() takes as an argument an expression of - * taking an address of a field within struct/union. It makes compiler emit - * a relocation, which records BTF type ID describing root struct/union and an - * accessor string which describes exact embedded field that was used to take - * an address. See detailed description of this relocation format and - * semantics in comments to struct bpf_offset_reloc in libbpf_internal.h. - * - * This relocation allows libbpf to adjust BPF instruction to use correct - * actual field offset, based on target kernel BTF type that matches original - * (local) BTF, used to record relocation. - */ -#define BPF_CORE_READ(dst, src) \ - bpf_probe_read((dst), sizeof(*(src)), \ - __builtin_preserve_access_index(src)) - -#endif \ No newline at end of file diff --git a/src/services/sentryCollector/ebpf_collector/bpf_load.c b/src/services/sentryCollector/ebpf_collector/bpf_load.c deleted file mode 100644 index f1108ff..0000000 --- a/src/services/sentryCollector/ebpf_collector/bpf_load.c +++ /dev/null @@ -1,709 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DEBUGFS "/sys/kernel/debug/tracing/" - -static char license[128]; -static int kern_version; -static bool processed_sec[128]; -char bpf_log_buf[BPF_LOG_BUF_SIZE]; -int map_fd[MAX_MAPS]; -int prog_fd[MAX_PROGS]; -int event_fd[MAX_PROGS]; -int prog_cnt; -int prog_array_fd = -1; - -struct bpf_map_data map_data[MAX_MAPS]; -int map_data_count = 0; - -static int populate_prog_array(const char *event, int prog_fd) -{ - int ind = atoi(event), err; - - err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY); - if (err < 0) { - printf("failed to store prog_fd in prog_array\n"); - return -1; - } - return 0; -} - -static int write_kprobe_events(const char *val) -{ - int fd, ret, flags; - - if (val == NULL) - return -1; - else if (val[0] == '\0') - flags = O_WRONLY | O_TRUNC; - else - flags = O_WRONLY | O_APPEND; - - fd = open("/sys/kernel/debug/tracing/kprobe_events", flags); - - ret = write(fd, val, strlen(val)); - close(fd); - - return ret; -} - -static int load_and_attach(const char *event, struct bpf_insn *prog, int size) -{ - bool is_socket = strncmp(event, "socket", 6) == 0; - bool is_kprobe = strncmp(event, "kprobe/", 7) == 0; - bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0; - bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0; - bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0; - bool is_xdp = strncmp(event, "xdp", 3) == 0; - bool is_perf_event = strncmp(event, "perf_event", 10) == 0; - bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0; - bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0; - bool is_sockops = strncmp(event, "sockops", 7) == 0; - bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0; - bool is_sk_msg = strncmp(event, "sk_msg", 6) == 0; - size_t insns_cnt = size / sizeof(struct bpf_insn); - enum bpf_prog_type prog_type; - char buf[256]; - int fd, efd, err, id; - struct perf_event_attr attr = {}; - - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW; - attr.sample_period = 1; - attr.wakeup_events = 1; - - if (is_socket) { - prog_type = BPF_PROG_TYPE_SOCKET_FILTER; - } else if (is_kprobe || is_kretprobe) { - prog_type = BPF_PROG_TYPE_KPROBE; - } else if (is_tracepoint) { - prog_type = BPF_PROG_TYPE_TRACEPOINT; - } else if (is_raw_tracepoint) { - prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT; - } else if (is_xdp) { - prog_type = BPF_PROG_TYPE_XDP; - } else if (is_perf_event) { - prog_type = BPF_PROG_TYPE_PERF_EVENT; - } else if (is_cgroup_skb) { - prog_type = BPF_PROG_TYPE_CGROUP_SKB; - } else if (is_cgroup_sk) { - prog_type = BPF_PROG_TYPE_CGROUP_SOCK; - } else if (is_sockops) { - prog_type = BPF_PROG_TYPE_SOCK_OPS; - } else if (is_sk_skb) { - prog_type = BPF_PROG_TYPE_SK_SKB; - } else if (is_sk_msg) { - prog_type = BPF_PROG_TYPE_SK_MSG; - } else { - printf("Unknown event '%s'\n", event); - return -1; - } - - if (prog_cnt == MAX_PROGS) - return -1; - - fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version, - bpf_log_buf, BPF_LOG_BUF_SIZE); - if (fd < 0) { - printf("bpf_load_program() err=%d\n%s", errno, bpf_log_buf); - return -1; - } - - prog_fd[prog_cnt++] = fd; - - if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk) - return 0; - - if (is_socket || is_sockops || is_sk_skb || is_sk_msg) { - if (is_socket) - event += 6; - else - event += 7; - if (*event != '/') - return 0; - event++; - if (!isdigit(*event)) { - printf("invalid prog number\n"); - return -1; - } - return populate_prog_array(event, fd); - } - - if (is_raw_tracepoint) { - efd = bpf_raw_tracepoint_open(event + 15, fd); - if (efd < 0) { - printf("tracepoint %s %s\n", event + 15, strerror(errno)); - return -1; - } - event_fd[prog_cnt - 1] = efd; - return 0; - } - - if (is_kprobe || is_kretprobe) { - bool need_normal_check = true; - const char *event_prefix = ""; - - if (is_kprobe) - event += 7; - else - event += 10; - - if (*event == 0) { - printf("event name cannot be empty\n"); - return -1; - } - - if (isdigit(*event)) - return populate_prog_array(event, fd); - -#ifdef __x86_64__ - if (strncmp(event, "sys_", 4) == 0) { - snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s", - is_kprobe ? 'p' : 'r', event, event); - err = write_kprobe_events(buf); - if (err >= 0) { - need_normal_check = false; - event_prefix = "__x64_"; - } - } -#endif - if (need_normal_check) { - if (strcmp("wbt_wait", event) == 0 || strcmp("blk_mq_get_tag", event) == 0) { - if (is_kprobe) { - snprintf(buf, sizeof(buf), "%c:%s_1 %s", - is_kprobe ? 'p' : 'r', event, event); - } - else { - snprintf(buf, sizeof(buf), "%c:%s_2 %s", - is_kprobe ? 'p' : 'r', event, event); - } - } - else { - snprintf(buf, sizeof(buf), "%c:%s %s", - is_kprobe ? 'p' : 'r', event, event); - } - err = write_kprobe_events(buf); - if (err < 0) { - printf("failed to create kprobe '%s' error '%s'\n", - event, strerror(errno)); - return -1; - } - } - - strcpy(buf, DEBUGFS); - strcat(buf, "events/kprobes/"); - strcat(buf, event_prefix); - strcat(buf, event); - - if (strcmp("wbt_wait", event) == 0 || strcmp("blk_mq_get_tag", event) == 0) { - if (is_kprobe) { - strcat(buf, "_1"); - } - else { - strcat(buf, "_2"); - } - } - strcat(buf, "/id"); - } else if (is_tracepoint) { - event += 11; - - if (*event == 0) { - printf("event name cannot be empty\n"); - return -1; - } - strcpy(buf, DEBUGFS); - strcat(buf, "events/"); - strcat(buf, event); - strcat(buf, "/id"); - } - - efd = open(buf, O_RDONLY, 0); - if (efd < 0) { - printf("failed to open event %s\n", event); - return -1; - } - - err = read(efd, buf, sizeof(buf)); - if (err < 0 || err >= sizeof(buf)) { - printf("read from '%s' failed '%s'\n", event, strerror(errno)); - return -1; - } - - close(efd); - - buf[err] = 0; - id = atoi(buf); - attr.config = id; - - efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0); - if (efd < 0) { - printf("event %d fd %d err %s\n", id, efd, strerror(errno)); - return -1; - } - event_fd[prog_cnt - 1] = efd; - err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0); - if (err < 0) { - printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n", - strerror(errno)); - return -1; - } - err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd); - if (err < 0) { - printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n", - strerror(errno)); - return -1; - } - - return 0; -} - -static int load_maps(struct bpf_map_data *maps, int nr_maps, - fixup_map_cb fixup_map) -{ - int i, numa_node; - - for (i = 0; i < nr_maps; i++) { - if (fixup_map) { - fixup_map(&maps[i], i); - /* Allow userspace to assign map FD prior to creation */ - if (maps[i].fd != -1) { - map_fd[i] = maps[i].fd; - continue; - } - } - - numa_node = maps[i].def.map_flags & BPF_F_NUMA_NODE ? - maps[i].def.numa_node : -1; - - if (maps[i].def.type == BPF_MAP_TYPE_ARRAY_OF_MAPS || - maps[i].def.type == BPF_MAP_TYPE_HASH_OF_MAPS) { - int inner_map_fd = map_fd[maps[i].def.inner_map_idx]; - - map_fd[i] = bpf_create_map_in_map_node(maps[i].def.type, - maps[i].name, - maps[i].def.key_size, - inner_map_fd, - maps[i].def.max_entries, - maps[i].def.map_flags, - numa_node); - } else { - map_fd[i] = bpf_create_map_node(maps[i].def.type, - maps[i].name, - maps[i].def.key_size, - maps[i].def.value_size, - maps[i].def.max_entries, - maps[i].def.map_flags, - numa_node); - } - if (map_fd[i] < 0) { - printf("failed to create a map: %d %s\n", - errno, strerror(errno)); - return 1; - } - maps[i].fd = map_fd[i]; - - if (maps[i].def.type == BPF_MAP_TYPE_PROG_ARRAY) - prog_array_fd = map_fd[i]; - } - return 0; -} - -static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname, - GElf_Shdr *shdr, Elf_Data **data) -{ - Elf_Scn *scn; - - scn = elf_getscn(elf, i); - if (!scn) - return 1; - - if (gelf_getshdr(scn, shdr) != shdr) - return 2; - - *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name); - if (!*shname || !shdr->sh_size) - return 3; - - *data = elf_getdata(scn, 0); - if (!*data || elf_getdata(scn, *data) != NULL) - return 4; - - return 0; -} - -static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols, - GElf_Shdr *shdr, struct bpf_insn *insn, - struct bpf_map_data *maps, int nr_maps) -{ - int i, nrels; - - nrels = shdr->sh_size / shdr->sh_entsize; - - for (i = 0; i < nrels; i++) { - GElf_Sym sym; - GElf_Rel rel; - unsigned int insn_idx; - bool match = false; - int j, map_idx; - - gelf_getrel(data, i, &rel); - - insn_idx = rel.r_offset / sizeof(struct bpf_insn); - - gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym); - - if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { - printf("invalid relo for insn[%d].code 0x%x\n", - insn_idx, insn[insn_idx].code); - return 1; - } - insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; - - /* Match FD relocation against recorded map_data[] offset */ - for (map_idx = 0; map_idx < nr_maps; map_idx++) { - if (maps[map_idx].elf_offset == sym.st_value) { - match = true; - break; - } - } - if (match) { - insn[insn_idx].imm = maps[map_idx].fd; - } else { - printf("invalid relo for insn[%d] no map_data match\n", - insn_idx); - return 1; - } - } - - return 0; -} - -static int cmp_symbols(const void *l, const void *r) -{ - const GElf_Sym *lsym = (const GElf_Sym *)l; - const GElf_Sym *rsym = (const GElf_Sym *)r; - - if (lsym->st_value < rsym->st_value) - return -1; - else if (lsym->st_value > rsym->st_value) - return 1; - else - return 0; -} - -static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx, - Elf *elf, Elf_Data *symbols, int strtabidx) -{ - int map_sz_elf, map_sz_copy; - bool validate_zero = false; - Elf_Data *data_maps; - int i, nr_maps; - GElf_Sym *sym; - Elf_Scn *scn; - int copy_sz; - - if (maps_shndx < 0) - return -EINVAL; - if (!symbols) - return -EINVAL; - - /* Get data for maps section via elf index */ - scn = elf_getscn(elf, maps_shndx); - if (scn) - data_maps = elf_getdata(scn, NULL); - if (!scn || !data_maps) { - printf("Failed to get Elf_Data from maps section %d\n", - maps_shndx); - return -EINVAL; - } - - /* For each map get corresponding symbol table entry */ - sym = calloc(MAX_MAPS+1, sizeof(GElf_Sym)); - for (i = 0, nr_maps = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { - assert(nr_maps < MAX_MAPS+1); - if (!gelf_getsym(symbols, i, &sym[nr_maps])) - continue; - if (sym[nr_maps].st_shndx != maps_shndx) - continue; - /* Only increment if maps section */ - nr_maps++; - } - - /* Align to map_fd[] order, via sort on offset in sym.st_value */ - qsort(sym, nr_maps, sizeof(GElf_Sym), cmp_symbols); - - /* Keeping compatible with ELF maps section changes - * ------------------------------------------------ - * The program size of struct bpf_load_map_def is known by loader - * code, but struct stored in ELF file can be different. - * - * Unfortunately sym[i].st_size is zero. To calculate the - * struct size stored in the ELF file, assume all struct have - * the same size, and simply divide with number of map - * symbols. - */ - map_sz_elf = data_maps->d_size / nr_maps; - map_sz_copy = sizeof(struct bpf_load_map_def); - if (map_sz_elf < map_sz_copy) { - /* - * Backward compat, loading older ELF file with - * smaller struct, keeping remaining bytes zero. - */ - map_sz_copy = map_sz_elf; - } else if (map_sz_elf > map_sz_copy) { - /* - * Forward compat, loading newer ELF file with larger - * struct with unknown features. Assume zero means - * feature not used. Thus, validate rest of struct - * data is zero. - */ - validate_zero = true; - } - - /* Memcpy relevant part of ELF maps data to loader maps */ - for (i = 0; i < nr_maps; i++) { - struct bpf_load_map_def *def; - unsigned char *addr, *end; - const char *map_name; - size_t offset; - - map_name = elf_strptr(elf, strtabidx, sym[i].st_name); - maps[i].name = strdup(map_name); - if (!maps[i].name) { - printf("strdup(%s): %s(%d)\n", map_name, - strerror(errno), errno); - free(sym); - return -errno; - } - - /* Symbol value is offset into ELF maps section data area */ - offset = sym[i].st_value; - def = (struct bpf_load_map_def *)(data_maps->d_buf + offset); - maps[i].elf_offset = offset; - memset(&maps[i].def, 0, sizeof(struct bpf_load_map_def)); - memcpy(&maps[i].def, def, map_sz_copy); - - /* Verify no newer features were requested */ - if (validate_zero) { - addr = (unsigned char*) def + map_sz_copy; - end = (unsigned char*) def + map_sz_elf; - for (; addr < end; addr++) { - if (*addr != 0) { - free(sym); - return -EFBIG; - } - } - } - } - - free(sym); - return nr_maps; -} - -static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map) -{ - int fd, i, ret, maps_shndx = -1, strtabidx = -1; - Elf *elf; - GElf_Ehdr ehdr; - GElf_Shdr shdr, shdr_prog; - Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL; - char *shname, *shname_prog; - int nr_maps = 0; - - /* reset global variables */ - kern_version = 0; - memset(license, 0, sizeof(license)); - memset(processed_sec, 0, sizeof(processed_sec)); - - if (elf_version(EV_CURRENT) == EV_NONE) - return 1; - - fd = open(path, O_RDONLY, 0); - if (fd < 0) - return 1; - - elf = elf_begin(fd, ELF_C_READ, NULL); - - if (!elf) - return 1; - - if (gelf_getehdr(elf, &ehdr) != &ehdr) - return 1; - - /* clear all kprobes */ - i = write_kprobe_events(""); - - /* scan over all elf sections to get license and map info */ - for (i = 1; i < ehdr.e_shnum; i++) { - - if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) - continue; - - if (0) /* helpful for llvm debugging */ - printf("section %d:%s data %p size %zd link %d flags %d\n", - i, shname, data->d_buf, data->d_size, - shdr.sh_link, (int) shdr.sh_flags); - - if (strcmp(shname, "license") == 0) { - processed_sec[i] = true; - memcpy(license, data->d_buf, data->d_size); - } else if (strcmp(shname, "version") == 0) { - processed_sec[i] = true; - if (data->d_size != sizeof(int)) { - printf("invalid size of version section %zd\n", - data->d_size); - return 1; - } - memcpy(&kern_version, data->d_buf, sizeof(int)); - } else if (strcmp(shname, "maps") == 0) { - int j; - - maps_shndx = i; - data_maps = data; - for (j = 0; j < MAX_MAPS; j++) - map_data[j].fd = -1; - } else if (shdr.sh_type == SHT_SYMTAB) { - strtabidx = shdr.sh_link; - symbols = data; - } - } - - ret = 1; - - if (!symbols) { - printf("missing SHT_SYMTAB section\n"); - goto done; - } - - if (data_maps) { - nr_maps = load_elf_maps_section(map_data, maps_shndx, - elf, symbols, strtabidx); - if (nr_maps < 0) { - printf("Error: Failed loading ELF maps (errno:%d):%s\n", - nr_maps, strerror(-nr_maps)); - goto done; - } - if (load_maps(map_data, nr_maps, fixup_map)) - goto done; - map_data_count = nr_maps; - - processed_sec[maps_shndx] = true; - } - - /* process all relo sections, and rewrite bpf insns for maps */ - for (i = 1; i < ehdr.e_shnum; i++) { - if (processed_sec[i]) - continue; - - if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) - continue; - - if (shdr.sh_type == SHT_REL) { - struct bpf_insn *insns; - - /* locate prog sec that need map fixup (relocations) */ - if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog, - &shdr_prog, &data_prog)) - continue; - - if (shdr_prog.sh_type != SHT_PROGBITS || - !(shdr_prog.sh_flags & SHF_EXECINSTR)) - continue; - - insns = (struct bpf_insn *) data_prog->d_buf; - processed_sec[i] = true; /* relo section */ - - if (parse_relo_and_apply(data, symbols, &shdr, insns, - map_data, nr_maps)) - continue; - } - } - - /* load programs */ - for (i = 1; i < ehdr.e_shnum; i++) { - - if (processed_sec[i]) - continue; - - if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) - continue; - - if (memcmp(shname, "kprobe/", 7) == 0 || - memcmp(shname, "kretprobe/", 10) == 0 || - memcmp(shname, "tracepoint/", 11) == 0 || - memcmp(shname, "raw_tracepoint/", 15) == 0 || - memcmp(shname, "xdp", 3) == 0 || - memcmp(shname, "perf_event", 10) == 0 || - memcmp(shname, "socket", 6) == 0 || - memcmp(shname, "cgroup/", 7) == 0 || - memcmp(shname, "sockops", 7) == 0 || - memcmp(shname, "sk_skb", 6) == 0 || - memcmp(shname, "sk_msg", 6) == 0) { - ret = load_and_attach(shname, data->d_buf, - data->d_size); - if (ret != 0) - goto done; - } - } - -done: - close(fd); - return ret; -} - -int load_bpf_file(char *path) -{ - return do_load_bpf_file(path, NULL); -} - -int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map) -{ - return do_load_bpf_file(path, fixup_map); -} - -void read_trace_pipe(void) -{ - int trace_fd; - - trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0); - if (trace_fd < 0) - return; - - while (1) { - static char buf[4096]; - ssize_t sz; - - sz = read(trace_fd, buf, sizeof(buf) - 1); - if (sz > 0) { - buf[sz] = 0; - puts(buf); - } - } -} \ No newline at end of file diff --git a/src/services/sentryCollector/ebpf_collector/ebpf_collector.bpf.c b/src/services/sentryCollector/ebpf_collector/ebpf_collector.bpf.c index e561d0d..60ad800 100644 --- a/src/services/sentryCollector/ebpf_collector/ebpf_collector.bpf.c +++ b/src/services/sentryCollector/ebpf_collector/ebpf_collector.bpf.c @@ -4,668 +4,132 @@ * Author: Zhang Nan * Create: 2024-09-27 */ -#define KBUILD_MODNAME "foo" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include + +#include "vmlinux.h" +#include #include -#include "bpf_helpers.h" +#include +#include +#include #include "ebpf_collector.h" -#define VERSION_KY_V2401 1 -#define VERSION_KY_V2101 2 - -#define _(P) ({typeof(P) val; bpf_probe_read(&val, sizeof(val), &P); val;}) - -struct bpf_map_def SEC("maps") blk_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(struct io_counter), - .max_entries = 10000, -}; - -struct bpf_map_def SEC("maps") blk_res = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct stage_data), - .max_entries = 128, -}; - -struct bpf_map_def SEC("maps") bio_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(struct io_counter), - .max_entries = 10000, -}; - -struct bpf_map_def SEC("maps") bio_res = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct stage_data), - .max_entries = 128, -}; - -struct bpf_map_def SEC("maps") wbt_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(struct io_counter), - .max_entries = 10000, -}; - -struct bpf_map_def SEC("maps") wbt_res = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct stage_data), - .max_entries = 128, -}; - -struct bpf_map_def SEC("maps") wbt_args = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 1000, -}; - -struct bpf_map_def SEC("maps") tag_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(struct io_counter), - .max_entries = 10000, -}; - -struct bpf_map_def SEC("maps") tag_res = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct stage_data), - .max_entries = 128, -}; - -struct bpf_map_def SEC("maps") tag_args = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 1000, -}; - -struct bpf_map_def SEC("maps") blk_res_2 = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u64), - .value_size = sizeof(struct time_range_io_count), - .max_entries = MAX_IO_TIME, -}; - -struct bpf_map_def SEC("maps") bio_res_2 = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u64), - .value_size = sizeof(struct time_range_io_count), - .max_entries = MAX_IO_TIME, -}; - -struct bpf_map_def SEC("maps") wbt_res_2 = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u64), - .value_size = sizeof(struct time_range_io_count), - .max_entries = MAX_IO_TIME, -}; - -struct bpf_map_def SEC("maps") tag_res_2 = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u64), - .value_size = sizeof(struct time_range_io_count), - .max_entries = MAX_IO_TIME, -}; - -struct bpf_map_def SEC("maps") version_res = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(32), - .value_size = sizeof(int), - .max_entries = MAX_IO_TIME, -}; - -// 麒麟v2101平台 -struct request_kylin_v2101 { - struct request_queue *q; - struct blk_mq_ctx *mq_ctx; - - int cpu; - unsigned int cmd_flags; /* op and common flags */ - req_flags_t rq_flags; - - int internal_tag; - - /* the following two fields are internal, NEVER access directly */ - unsigned int __data_len; /* total data len */ - int tag; - sector_t __sector; /* sector cursor */ - - struct bio *bio; - struct bio *biotail; - - struct list_head queuelist; - - /* - * The hash is used inside the scheduler, and killed once the - * request reaches the dispatch list. The ipi_list is only used - * to queue the request for softirq completion, which is long - * after the request has been unhashed (and even removed from - * the dispatch list). - */ - union { - struct hlist_node hash; /* merge hash */ - struct list_head ipi_list; - }; - - struct hlist_node front_hash; /* front merge hash */ - - /* - * The rb_node is only used inside the io scheduler, requests - * are pruned when moved to the dispatch queue. So let the - * completion_data share space with the rb_node. - */ - union { - struct rb_node rb_node; /* sort/lookup */ - struct bio_vec special_vec; - void *completion_data; - int error_count; /* for legacy drivers, don't use */ - }; - - /* - * Three pointers are available for the IO schedulers, if they need - * more they have to dynamically allocate it. Flush requests are - * never put on the IO scheduler. So let the flush fields share - * space with the elevator data. - */ - union { - struct { - struct io_cq *icq; - void *priv[2]; - } elv; - - struct { - unsigned int seq; - struct list_head list; - rq_end_io_fn *saved_end_io; - } flush; - }; - - struct gendisk *rq_disk; - struct hd_struct *part; - /* Time that I/O was submitted to the kernel. */ - u64 start_time_ns; - /* Time that I/O was submitted to the device. */ - u64 io_start_time_ns; - -#ifdef CONFIG_BLK_WBT - unsigned short wbt_flags; -#endif -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW - unsigned short throtl_size; -#endif - - /* - * Number of scatter-gather DMA addr+len pairs after - * physical address coalescing is performed. - */ - unsigned short nr_phys_segments; - -#if defined(CONFIG_BLK_DEV_INTEGRITY) - unsigned short nr_integrity_segments; -#endif - - unsigned short write_hint; - unsigned short ioprio; - - void *special; /* opaque pointer available for LLD use */ - - unsigned int extra_len; /* length of alignment and padding */ - - enum mq_rq_state state; - refcount_t ref; - - unsigned int timeout; - - /* access through blk_rq_set_deadline, blk_rq_deadline */ - unsigned long __deadline; - - struct list_head timeout_list; - - union { - struct __call_single_data csd; - u64 fifo_time; - }; - - /* - * completion callback. - */ - rq_end_io_fn *end_io; - void *end_io_data; - - /* for bidi */ - struct request_kylin_v2101 *next_rq; - -#ifdef CONFIG_BLK_CGROUP - struct request_list *rl; /* rl this rq is alloced from */ -#endif - KABI_RESERVE(1); - KABI_RESERVE(2); -}; - -struct blk_mq_alloc_data { - /* input parameter */ - struct request_queue *q; - blk_mq_req_flags_t flags; - unsigned int shallow_depth; - - /* input & output parameter */ - struct blk_mq_ctx *ctx; - struct blk_mq_hw_ctx *hctx; -}; - -// 麒麟v2401平台 -struct request_kylin_v2401 { - struct request_queue *q; - struct blk_mq_ctx *mq_ctx; - struct blk_mq_hw_ctx *mq_hctx; - - unsigned int cmd_flags; /* op and common flags */ - req_flags_t rq_flags; - - int internal_tag; - - /* the following two fields are internal, NEVER access directly */ - unsigned int __data_len; /* total data len */ - int tag; - sector_t __sector; /* sector cursor */ - - struct bio *bio; - struct bio *biotail; - - struct list_head queuelist; - - /* - * The hash is used inside the scheduler, and killed once the - * request reaches the dispatch list. The ipi_list is only used - * to queue the request for softirq completion, which is long - * after the request has been unhashed (and even removed from - * the dispatch list). - */ - union { - struct hlist_node hash; /* merge hash */ - struct list_head ipi_list; - }; - - struct hlist_node front_hash; /* front merge hash */ - - /* - * The rb_node is only used inside the io scheduler, requests - * are pruned when moved to the dispatch queue. So let the - * completion_data share space with the rb_node. - */ - union { - struct rb_node rb_node; /* sort/lookup */ - struct bio_vec special_vec; - void *completion_data; - int error_count; /* for legacy drivers, don't use */ - }; - - /* - * Three pointers are available for the IO schedulers, if they need - * more they have to dynamically allocate it. Flush requests are - * never put on the IO scheduler. So let the flush fields share - * space with the elevator data. - */ - union { - struct { - struct io_cq *icq; - void *priv[2]; - } elv; - - struct { - unsigned int seq; - struct list_head list; - rq_end_io_fn *saved_end_io; - } flush; - }; - - struct gendisk *rq_disk; - struct hd_struct *part; -#ifdef CONFIG_BLK_RQ_ALLOC_TIME - /* Time that the first bio started allocating this request. */ - u64 alloc_time_ns; -#endif - /* Time that this request was allocated for this IO. */ - u64 start_time_ns; - /* Time that I/O was submitted to the device. */ - u64 io_start_time_ns; - -#ifdef CONFIG_BLK_WBT - unsigned short wbt_flags; -#endif -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW - unsigned short throtl_size; -#endif - - /* - * Number of scatter-gather DMA addr+len pairs after - * physical address coalescing is performed. - */ - unsigned short nr_phys_segments; - -#if defined(CONFIG_BLK_DEV_INTEGRITY) - unsigned short nr_integrity_segments; -#endif - - unsigned short write_hint; - unsigned short ioprio; - - void *special; /* opaque pointer available for LLD use */ - - unsigned int extra_len; /* length of alignment and padding */ - - enum mq_rq_state state; - refcount_t ref; - - unsigned int timeout; - - /* access through blk_rq_set_deadline, blk_rq_deadline */ - unsigned long __deadline; - - union { - struct __call_single_data csd; - u64 fifo_time; - }; - - /* - * completion callback. - */ - rq_end_io_fn *end_io; - void *end_io_data; - - /* for bidi */ - struct request_kylin_v2401 *next_rq; - KABI_RESERVE(1); - KABI_RESERVE(2); -}; - -struct request_queue_kylin_v2401 { - /* - * Together with queue_head for cacheline sharing - */ - struct list_head queue_head; - struct request *last_merge; - struct elevator_queue *elevator; - - struct blk_queue_stats *stats; - struct rq_qos *rq_qos; - - make_request_fn *make_request_fn; - poll_q_fn *poll_fn; - dma_drain_needed_fn *dma_drain_needed; - - const struct blk_mq_ops *mq_ops; - - /* sw queues */ - struct blk_mq_ctx __percpu *queue_ctx; - unsigned int nr_queues; - - unsigned int queue_depth; - - /* hw dispatch queues */ - struct blk_mq_hw_ctx **queue_hw_ctx; - unsigned int nr_hw_queues; - - struct backing_dev_info_kylin_v2401 *backing_dev_info; - - /* - * The queue owner gets to use this for whatever they like. - * ll_rw_blk doesn't touch it. - */ - void *queuedata; - - /* - * various queue flags, see QUEUE_* below - */ - unsigned long queue_flags; - /* - * Number of contexts that have called blk_set_pm_only(). If this - * counter is above zero then only RQF_PM and RQF_PREEMPT requests are - * processed. - */ - atomic_t pm_only; - - /* - * ida allocated id for this queue. Used to index queues from - * ioctx. - */ - int id; - - /* - * queue needs bounce pages for pages above this limit - */ - gfp_t bounce_gfp; - - /* - * protects queue structures from reentrancy. ->__queue_lock should - * _never_ be used directly, it is queue private. always use - * ->queue_lock. - */ - spinlock_t __queue_lock; - spinlock_t *queue_lock; - - /* - * queue kobject - */ - struct kobject kobj; - - /* - * mq queue kobject - */ - struct kobject *mq_kobj; - -#ifdef CONFIG_BLK_DEV_INTEGRITY - struct blk_integrity integrity; -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - -#ifdef CONFIG_PM - struct device *dev; - int rpm_status; - unsigned int nr_pending; -#endif - - /* - * queue settings - */ - unsigned long nr_requests; /* Max # of requests */ - - unsigned int dma_drain_size; - void *dma_drain_buffer; - unsigned int dma_pad_mask; - unsigned int dma_alignment; - - unsigned int rq_timeout; - int poll_nsec; - - struct blk_stat_callback *poll_cb; - struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; - - struct timer_list timeout; - struct work_struct timeout_work; - - atomic_t nr_active_requests_shared_sbitmap; - - struct list_head icq_list; -#ifdef CONFIG_BLK_CGROUP - DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); - struct blkcg_gq *root_blkg; - struct list_head blkg_list; -#endif - - struct queue_limits limits; - - unsigned int required_elevator_features; - -#ifdef CONFIG_BLK_DEV_ZONED - /* - * Zoned block device information for request dispatch control. - * nr_zones is the total number of zones of the device. This is always - * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones - * bits which indicates if a zone is conventional (bit clear) or - * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones - * bits which indicates if a zone is write locked, that is, if a write - * request targeting the zone was dispatched. All three fields are - * initialized by the low level device driver (e.g. scsi/sd.c). - * Stacking drivers (device mappers) may or may not initialize - * these fields. - * - * Reads of this information must be protected with blk_queue_enter() / - * blk_queue_exit(). Modifying this information is only allowed while - * no requests are being processed. See also blk_mq_freeze_queue() and - * blk_mq_unfreeze_queue(). - */ - unsigned int nr_zones; - unsigned long *seq_zones_bitmap; - unsigned long *seq_zones_wlock; -#endif /* CONFIG_BLK_DEV_ZONED */ - - /* - * sg stuff - */ - unsigned int sg_timeout; - unsigned int sg_reserved_size; - int node; -#ifdef CONFIG_BLK_DEV_IO_TRACE - struct blk_trace __rcu *blk_trace; - struct mutex blk_trace_mutex; -#endif - /* - * for flush operations - */ - struct blk_flush_queue *fq; - - struct list_head requeue_list; - spinlock_t requeue_lock; - struct delayed_work requeue_work; - - struct mutex sysfs_lock; - - /* - * for reusing dead hctx instance in case of updating - * nr_hw_queues - */ - struct list_head unused_hctx_list; - spinlock_t unused_hctx_lock; - - int mq_freeze_depth; - -#if defined(CONFIG_BLK_DEV_BSG) - struct bsg_class_device bsg_dev; -#endif - -#ifdef CONFIG_BLK_DEV_THROTTLING - /* Throttle data */ - struct throtl_data *td; -#endif - struct rcu_head rcu_head; - wait_queue_head_t mq_freeze_wq; - /* - * Protect concurrent access to q_usage_counter by - * percpu_ref_kill() and percpu_ref_reinit(). - */ - struct mutex mq_freeze_lock; - struct percpu_ref q_usage_counter; - struct list_head all_q_node; - - struct blk_mq_tag_set *tag_set; - struct list_head tag_set_list; - struct bio_set bio_split; - -#ifdef CONFIG_BLK_DEBUG_FS - struct dentry *debugfs_dir; - struct dentry *sched_debugfs_dir; -#endif - - bool mq_sysfs_init_done; - - size_t cmd_size; - void *rq_alloc_data; - - struct work_struct release_work; - -#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC - /* used when QUEUE_FLAG_DISPATCH_ASYNC is set */ - struct cpumask dispatch_async_cpus; - int __percpu *last_dispatch_cpu; -#endif - -#define BLK_MAX_WRITE_HINTS 5 - u64 write_hints[BLK_MAX_WRITE_HINTS]; - - KABI_RESERVE(1); - KABI_RESERVE(2); - KABI_RESERVE(3); - KABI_RESERVE(4); -}; - -struct backing_dev_info_kylin_v2401 { - u64 id; - struct rb_node rb_node; /* keyed by ->id */ - struct list_head bdi_list; - unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ - unsigned long io_pages; /* max allowed IO size */ - congested_fn *congested_fn; /* Function pointer if device is md/dm */ - void *congested_data; /* Pointer to aux data for congested func */ - - const char *name; - - struct kref refcnt; /* Reference counter for the structure */ - unsigned int capabilities; /* Device capabilities */ - unsigned int min_ratio; - unsigned int max_ratio, max_prop_frac; - - /* - * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are - * any dirty wbs, which is depended upon by bdi_has_dirty(). - */ - atomic_long_t tot_write_bandwidth; - - struct bdi_writeback wb; /* the root writeback info for this bdi */ - struct list_head wb_list; /* list of all wbs */ -#ifdef CONFIG_CGROUP_WRITEBACK - struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ - struct rb_root cgwb_congested_tree; /* their congested states */ - struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ - struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ -#else - struct bdi_writeback_congested *wb_congested; -#endif - wait_queue_head_t wb_waitq; - - union { - struct rcu_device *rcu_dev; - struct device *dev; - }; - struct device *owner; - - struct timer_list laptop_mode_wb_timer; - -#ifdef CONFIG_DEBUG_FS - struct dentry *debug_dir; - struct dentry *debug_stats; -#endif - - KABI_RESERVE(1) - KABI_RESERVE(2) - KABI_RESERVE(3) - KABI_RESERVE(4) -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10000); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct io_counter)); +} blk_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 128); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct stage_data)); +} blk_res SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10000); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct io_counter)); +} bio_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 128); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct stage_data)); +} bio_res SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10000); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct io_counter)); +} wbt_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 128); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct stage_data)); +} wbt_res SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1000); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); +} wbt_args SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10000); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct io_counter)); +} tag_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 128); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct stage_data)); +} tag_res SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1000); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); +} tag_args SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_IO_TIME); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct time_range_io_count)); +} blk_res_2 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_IO_TIME); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct time_range_io_count)); +} bio_res_2 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_IO_TIME); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct time_range_io_count)); +} wbt_res_2 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_IO_TIME); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct time_range_io_count)); +} tag_res_2 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 256 * 1024); +} ringbuf SEC(".maps"); + + +static void log_event(u32 stage, u32 period, u32 err) { + struct event *e; + void *data = bpf_ringbuf_reserve(&ringbuf, sizeof(struct event), 0); + if (!data) + return; + + e = (struct event *)data; + e->stage = stage; + e->period = period; + e->err = err; + + bpf_ringbuf_submit(e, 0); +} static __always_inline void blk_fill_rwbs(char *rwbs, unsigned int op) { @@ -712,7 +176,7 @@ static __always_inline void blk_fill_rwbs(char *rwbs, unsigned int op) } } -void update_curr_data_in_start(struct stage_data *curr_data, struct update_params *params) { +static void update_curr_data_in_start(struct stage_data *curr_data, struct update_params *params) { if (curr_data && params) { curr_data->start_count += 1; curr_data->major = params->major; @@ -721,7 +185,7 @@ void update_curr_data_in_start(struct stage_data *curr_data, struct update_param } } -void update_curr_data_in_finish(struct stage_data *curr_data, struct update_params *params, u64 duration) { +static void update_curr_data_in_finish(struct stage_data *curr_data, struct update_params *params, u64 duration) { if (curr_data && params) { curr_data->finish_count += 1; curr_data->major = params->major; @@ -741,556 +205,98 @@ static void init_io_counter(struct io_counter *counterp, int major, int first_mi } } -u32 find_matching_tag_1_keys(int major, int minor) { - u32 key = 0; - struct stage_data *curr_data = bpf_map_lookup_elem(&tag_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 1; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&tag_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 2; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&tag_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_tag_2_keys(int major, int minor) { - u32 key = 3; - struct stage_data *curr_data = bpf_map_lookup_elem(&tag_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 4; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&tag_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 5; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&tag_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_tag_3_keys(int major, int minor) { - u32 key = 6; - struct stage_data *curr_data = bpf_map_lookup_elem(&tag_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 7; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&tag_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 8; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&tag_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_tag_4_keys(int major, int minor) { - u32 key = 9; - struct stage_data *curr_data = bpf_map_lookup_elem(&tag_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 10; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&tag_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 11; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&tag_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_tag_5_keys(int major, int minor) { - u32 key = 12; - struct stage_data *curr_data = bpf_map_lookup_elem(&tag_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 13; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&tag_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 14; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&tag_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_blk_1_keys(int major, int minor) { - u32 key = 0; - struct stage_data *curr_data = bpf_map_lookup_elem(&blk_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 1; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&blk_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 2; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&blk_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_blk_2_keys(int major, int minor) { - u32 key = 3; - struct stage_data *curr_data = bpf_map_lookup_elem(&blk_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 4; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&blk_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 5; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&blk_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_blk_3_keys(int major, int minor) { - u32 key = 6; - struct stage_data *curr_data = bpf_map_lookup_elem(&blk_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 7; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&blk_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 8; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&blk_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_blk_4_keys(int major, int minor) { - u32 key = 9; - struct stage_data *curr_data = bpf_map_lookup_elem(&blk_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 10; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&blk_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 11; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&blk_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_blk_5_keys(int major, int minor) { - u32 key = 12; - struct stage_data *curr_data = bpf_map_lookup_elem(&blk_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 13; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&blk_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 14; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&blk_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_bio_1_keys(int major, int minor) { - u32 key = 0; - struct stage_data *curr_data = bpf_map_lookup_elem(&bio_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 1; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&bio_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 2; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&bio_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_bio_2_keys(int major, int minor) { - u32 key = 3; - struct stage_data *curr_data = bpf_map_lookup_elem(&bio_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 4; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&bio_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 5; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&bio_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_bio_3_keys(int major, int minor) { - u32 key = 6; - struct stage_data *curr_data = bpf_map_lookup_elem(&bio_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 7; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&bio_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 8; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&bio_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_bio_4_keys(int major, int minor) { - u32 key = 9; - struct stage_data *curr_data = bpf_map_lookup_elem(&bio_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 10; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&bio_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 11; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&bio_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_bio_5_keys(int major, int minor) { - u32 key = 12; - struct stage_data *curr_data = bpf_map_lookup_elem(&bio_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 13; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&bio_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 14; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&bio_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; - } - - return MAP_SIZE + 1; -} - -u32 find_matching_wbt_1_keys(int major, int minor) { - u32 key = 0; - struct stage_data *curr_data = bpf_map_lookup_elem(&wbt_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 1; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&wbt_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 2; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&wbt_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; +int find_matching_key_rq_driver(int major, int first_minor) { + int key = 0; + for (size_t i = 0; i < MAP_SIZE; i++) { + struct stage_data *curr_data = bpf_map_lookup_elem(&blk_res, &key); + struct stage_data tmp_data; + bpf_core_read(&tmp_data, sizeof(tmp_data), curr_data); + if (tmp_data.major == major && tmp_data.first_minor == first_minor) { + return key; + } + key++; } - - return MAP_SIZE + 1; + return key; } -u32 find_matching_wbt_2_keys(int major, int minor) { - u32 key = 3; - struct stage_data *curr_data = bpf_map_lookup_elem(&wbt_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 4; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&wbt_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 5; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&wbt_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; +int find_matching_key_bio(int major, int first_minor) { + int key = 0; + for (size_t i = 0; i < MAP_SIZE; i++) { + struct stage_data *curr_data = bpf_map_lookup_elem(&bio_res, &key); + struct stage_data tmp_data; + bpf_core_read(&tmp_data, sizeof(tmp_data), curr_data); + if (tmp_data.major == major && tmp_data.first_minor == first_minor) { + return key; + } + key++; } - - return MAP_SIZE + 1; + return key; } -u32 find_matching_wbt_3_keys(int major, int minor) { - u32 key = 6; - struct stage_data *curr_data = bpf_map_lookup_elem(&wbt_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 7; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&wbt_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 8; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&wbt_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; +int find_matching_key_wbt(int major, int first_minor) { + int key = 0; + for (size_t i = 0; i < MAP_SIZE; i++) { + struct stage_data *curr_data = bpf_map_lookup_elem(&wbt_res, &key); + struct stage_data tmp_data; + bpf_core_read(&tmp_data, sizeof(tmp_data), curr_data); + if (tmp_data.major == major && tmp_data.first_minor == first_minor) { + return key; + } + key++; } - - return MAP_SIZE + 1; + return key; } -u32 find_matching_wbt_4_keys(int major, int minor) { - u32 key = 9; - struct stage_data *curr_data = bpf_map_lookup_elem(&wbt_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 10; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&wbt_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 11; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&wbt_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; +int find_matching_key_get_tag(int major, int first_minor) { + int key = 0; + for (size_t i = 0; i < MAP_SIZE; i++) { + struct stage_data *curr_data = bpf_map_lookup_elem(&tag_res, &key); + struct stage_data tmp_data; + bpf_core_read(&tmp_data, sizeof(tmp_data), curr_data); + if (tmp_data.major == major && tmp_data.first_minor == first_minor) { + return key; + } + key++; } - - return MAP_SIZE + 1; + return key; } -u32 find_matching_wbt_5_keys(int major, int minor) { - u32 key = 12; - struct stage_data *curr_data = bpf_map_lookup_elem(&wbt_res, &key); - - if (curr_data != NULL && curr_data->major == major && curr_data->first_minor == minor) { - return key; - } - - u32 key_2 = 13; - struct stage_data *curr_data_2 = bpf_map_lookup_elem(&wbt_res, &key_2); - - if (curr_data_2 != NULL && curr_data_2->major == major && curr_data_2->first_minor == minor) { - return key_2; - } - - u32 key_3 = 14; - struct stage_data *curr_data_3 = bpf_map_lookup_elem(&wbt_res, &key_3); - - if (curr_data_3 != NULL && curr_data_3->major == major && curr_data_3->first_minor == minor) { - return key_3; +// start rq_driver +SEC("kprobe/blk_mq_start_request") +int kprobe_blk_mq_start_request(struct pt_regs *regs) { + struct request *rq; + struct request_queue *q; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags; + + rq = (struct request *)PT_REGS_PARM1(regs); + bpf_core_read(&q, sizeof(q), &rq->q); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &q->disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + bpf_core_read(&cmd_flags, sizeof(cmd_flags), &rq->cmd_flags); + + if (major == 0) { + log_event(STAGE_RQ_DRIVER, PERIOD_START, ERROR_MAJOR_ZERO); + return 0; } - - return MAP_SIZE + 1; -} -// start rq_driver -SEC("kprobe/blk_mq_start_request") -int kprobe_blk_mq_start_request(struct pt_regs *regs) -{ - struct request *rq = (struct request *)PT_REGS_PARM1(regs); - struct gendisk *curr_rq_disk = _(rq->rq_disk); - - u32 key_version = 1; - struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version); - if (version_map) { - if (version_map->num == VERSION_KY_V2401) { - struct request_kylin_v2401 *rq = (struct request_kylin_v2401 *)PT_REGS_PARM1(regs); - curr_rq_disk = _(rq->rq_disk); - } else if (version_map->num == VERSION_KY_V2101) { - struct request_kylin_v2101 *rq = (struct request_kylin_v2101 *)PT_REGS_PARM1(regs); - curr_rq_disk = _(rq->rq_disk); - } - + u32 key = find_matching_key_rq_driver(major, first_minor); + if (key >= MAP_SIZE) { + return 0; } - int major = _(curr_rq_disk->major); - int first_minor = _(curr_rq_disk->first_minor); - unsigned int cmd_flags = _(rq->cmd_flags); - - struct io_counter *counterp, zero = {}; - - u32 key = find_matching_blk_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } - } + struct io_counter *counterp, zero = {}; init_io_counter(&zero, major, first_minor); - counterp = bpf_map_lookup_elem(&blk_map, &rq); - if (counterp || major == 0) { + if (counterp) { return 0; } - long err = bpf_map_update_elem(&blk_map, &rq, &zero, BPF_NOEXIST); + + long err = bpf_map_update_elem(&blk_map, &rq, &zero, BPF_NOEXIST); if (err) { + log_event(STAGE_RQ_DRIVER, PERIOD_START, ERROR_UPDATE_FAIL); return 0; } @@ -1303,8 +309,8 @@ int kprobe_blk_mq_start_request(struct pt_regs *regs) .curr_start_range = curr_start_range, }; - struct stage_data *curr_data; - curr_data = bpf_map_lookup_elem(&blk_res, &key); + struct stage_data *curr_data; + curr_data = bpf_map_lookup_elem(&blk_res, &key); if (!curr_data) { struct stage_data new_data = { .start_count = 1, @@ -1314,7 +320,7 @@ int kprobe_blk_mq_start_request(struct pt_regs *regs) .major = major, .first_minor = first_minor, .io_type = "", - }; + }; blk_fill_rwbs(new_data.io_type, cmd_flags); bpf_map_update_elem(&blk_res, &key, &new_data, 0); } else { @@ -1327,58 +333,43 @@ int kprobe_blk_mq_start_request(struct pt_regs *regs) struct time_range_io_count new_data = { .count = {0} }; bpf_map_update_elem(&blk_res_2, &curr_start_range, &new_data, 0); } else { - if (key < MAP_SIZE) { + if (key < MAP_SIZE && key >= 0) { __sync_fetch_and_add(&curr_data_time_range->count[key], 1); } } - - return 0; + return 0; } // finish rq_driver -SEC("kprobe/blk_mq_free_request") -int kprobe_blk_mq_free_request(struct pt_regs *regs) +SEC("kprobe/blk_mq_free_request") +int kprobe_blk_mq_free_request(struct pt_regs *regs) { - struct request *rq = (struct request *)PT_REGS_PARM1(regs); - struct gendisk *curr_rq_disk = _(rq->rq_disk); - - u32 key_version = 1; - struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version); - if (version_map) { - if (version_map->num == VERSION_KY_V2401) { - struct request_kylin_v2401 *rq = (struct request_kylin_v2401 *)PT_REGS_PARM1(regs); - curr_rq_disk = _(rq->rq_disk); - } else if (version_map->num == VERSION_KY_V2101) { - struct request_kylin_v2101 *rq = (struct request_kylin_v2101 *)PT_REGS_PARM1(regs); - curr_rq_disk = _(rq->rq_disk); - } + struct request *rq; + struct request_queue *q; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags; + struct io_counter *counterp; + + rq = (struct request *)PT_REGS_PARM1(regs); + bpf_core_read(&q, sizeof(q), &rq->q); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &q->disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + bpf_core_read(&cmd_flags, sizeof(cmd_flags), &rq->cmd_flags); + + if (major == 0) { + log_event(STAGE_RQ_DRIVER, PERIOD_END, ERROR_MAJOR_ZERO); + return 0; } - int major = _(curr_rq_disk->major); - int first_minor = _(curr_rq_disk->first_minor); - unsigned int cmd_flags = _(rq->cmd_flags); + u32 key = find_matching_key_rq_driver(major, first_minor); + if (key >= MAP_SIZE) { + return 0; + } - struct io_counter *counterp; - u32 key = find_matching_blk_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_blk_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } - } - counterp = bpf_map_lookup_elem(&blk_map, &rq); - - if (!counterp) { + if (!counterp) { return 0; } @@ -1393,7 +384,7 @@ int kprobe_blk_mq_free_request(struct pt_regs *regs) }; struct stage_data *curr_data; - curr_data = bpf_map_lookup_elem(&blk_res, &key); + curr_data = bpf_map_lookup_elem(&blk_res, &key); if (curr_data == NULL && duration > DURATION_THRESHOLD) { struct stage_data new_data = { .start_count = 1, @@ -1405,7 +396,7 @@ int kprobe_blk_mq_free_request(struct pt_regs *regs) .io_type = "", }; blk_fill_rwbs(new_data.io_type, cmd_flags); - bpf_map_update_elem(&blk_res, &key, &new_data, 0); + bpf_map_update_elem(&blk_res, &key, &new_data, 0); } else if (curr_data == NULL) { struct stage_data new_data = { .start_count = 1, @@ -1420,7 +411,7 @@ int kprobe_blk_mq_free_request(struct pt_regs *regs) bpf_map_update_elem(&blk_res, &key, &new_data, 0); } else { curr_data->duration += duration; - update_curr_data_in_finish(curr_data, ¶ms, &duration); + update_curr_data_in_finish(curr_data, ¶ms, duration); } struct time_range_io_count *curr_data_time_range; @@ -1433,48 +424,50 @@ int kprobe_blk_mq_free_request(struct pt_regs *regs) __sync_fetch_and_add(&curr_data_time_range->count[key], -1); } } - bpf_map_delete_elem(&blk_map, &rq); return 0; } // start bio -SEC("kprobe/blk_mq_make_request") -int kprobe_blk_mq_make_request(struct pt_regs *regs) +SEC("kprobe/blk_mq_submit_bio") +int kprobe_blk_mq_submit_bio(struct pt_regs *regs) { - struct bio *bio = (struct bio *)PT_REGS_PARM2(regs); - struct gendisk *curr_rq_disk = _(bio->bi_disk); - int major = _(curr_rq_disk->major); - int first_minor = _(curr_rq_disk->first_minor); - unsigned int cmd_flags = _(bio->bi_opf); + struct bio *bio; + struct block_device *bd; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags; + + bio = (struct bio *)PT_REGS_PARM1(regs); + bpf_core_read(&bd, sizeof(bd), &bio->bi_bdev); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &bd->bd_disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + bpf_core_read(&cmd_flags, sizeof(cmd_flags), &bio->bi_opf); + + if (major == 0) { + log_event(STAGE_BIO, PERIOD_START, ERROR_MAJOR_ZERO); + return 0; + } - struct io_counter *counterp, zero = {}; - u32 key = find_matching_bio_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } - } + int key = find_matching_key_bio(major, first_minor); + if (key >= MAP_SIZE) { + return 0; + } + struct io_counter *counterp, zero = {}; init_io_counter(&zero, major, first_minor); counterp = bpf_map_lookup_elem(&bio_map, &bio); - if (counterp || major == 0) + if (counterp) { return 0; + } long err = bpf_map_update_elem(&bio_map, &bio, &zero, BPF_NOEXIST); - if (err && err != -EEXIST) + if (err && err != -EEXIST) { + log_event(STAGE_BIO, PERIOD_START, ERROR_UPDATE_FAIL); return 0; + } u64 curr_start_range = zero.start_time / THRESHOLD; @@ -1509,11 +502,10 @@ int kprobe_blk_mq_make_request(struct pt_regs *regs) struct time_range_io_count new_data = { .count = {0} }; bpf_map_update_elem(&bio_res_2, &curr_start_range, &new_data, 0); } else { - if (key < MAP_SIZE) { + if (key < MAP_SIZE && key >= 0) { __sync_fetch_and_add(&curr_data_time_range->count[key], 1); } } - return 0; } @@ -1521,34 +513,32 @@ int kprobe_blk_mq_make_request(struct pt_regs *regs) SEC("kprobe/bio_endio") int kprobe_bio_endio(struct pt_regs *regs) { - struct bio *bio = (struct bio *)PT_REGS_PARM1(regs); - struct gendisk *curr_rq_disk = _(bio->bi_disk); - int major = _(curr_rq_disk->major); - int first_minor = _(curr_rq_disk->first_minor); - unsigned int cmd_flags = _(bio->bi_opf); - - struct io_counter *counterp; - void *delete_map = NULL; - u32 key = find_matching_bio_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_bio_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } + struct bio *bio; + struct block_device *bd; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags; + + bio = (struct bio *)PT_REGS_PARM1(regs); + bpf_core_read(&bd, sizeof(bd), &bio->bi_bdev); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &bd->bd_disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + bpf_core_read(&cmd_flags, sizeof(cmd_flags), &bio->bi_opf); + + if (major == 0) { + log_event(STAGE_BIO, PERIOD_END, ERROR_MAJOR_ZERO); + return 0; } - counterp = bpf_map_lookup_elem(&bio_map, &bio); + u32 key = find_matching_key_bio(major, first_minor); + if (key >= MAP_SIZE) { + return 0; + } - if (!counterp) { + void *delete_map = NULL; + struct io_counter *counterp = bpf_map_lookup_elem(&bio_map, &bio); + if (!counterp) { return 0; } @@ -1592,7 +582,7 @@ int kprobe_bio_endio(struct pt_regs *regs) bpf_map_update_elem(&bio_res, &key, &new_data, 0); } else { curr_data->duration += duration; - update_curr_data_in_finish(curr_data, ¶ms, &duration); + update_curr_data_in_finish(curr_data, ¶ms, duration); } struct time_range_io_count *curr_data_time_range; @@ -1605,51 +595,51 @@ int kprobe_bio_endio(struct pt_regs *regs) __sync_fetch_and_add(&curr_data_time_range->count[key], -1); } } - - bpf_map_delete_elem(delete_map, &bio); + bpf_map_delete_elem(delete_map, &bio); return 0; } -// start wbt -SEC("kprobe/wbt_wait") -int kprobe_wbt_wait(struct pt_regs *regs) +// start get_tag +SEC("kprobe/blk_mq_get_tag") +int kprobe_blk_mq_get_tag(struct pt_regs *regs) { - u64 wbtkey = bpf_get_current_task(); - u64 value = (u64)PT_REGS_PARM2(regs); - (void)bpf_map_update_elem(&wbt_args, &wbtkey, &value, BPF_ANY); - struct bio *bio = (struct bio *)value; - struct gendisk *curr_rq_disk = _(bio->bi_disk); - int major = _(curr_rq_disk->major); - int first_minor = _(curr_rq_disk->first_minor); - unsigned int cmd_flags = _(bio->bi_opf); + u64 tagkey = bpf_get_current_task(); + u64 value = (u64)PT_REGS_PARM1(regs); + (void)bpf_map_update_elem(&tag_args, &tagkey, &value, BPF_ANY); - struct io_counter *counterp, zero = {}; - u32 key = find_matching_wbt_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } + struct blk_mq_alloc_data *bd; + struct request_queue *q; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags = 0; + + bd = (struct blk_mq_alloc_data *)value; + bpf_core_read(&q, sizeof(q), &bd->q); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &q->disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + + if (major == 0) { + log_event(STAGE_GET_TAG, PERIOD_START, ERROR_MAJOR_ZERO); + return 0; } - init_io_counter(&zero, major, first_minor); - - counterp = bpf_map_lookup_elem(&wbt_map, &wbtkey); + u32 key = find_matching_key_get_tag(major, first_minor); + if (key >= MAP_SIZE) { + return 0; + } - if (counterp || major == 0) + struct io_counter *counterp, zero = {}; + init_io_counter(&zero, major, first_minor); + counterp = bpf_map_lookup_elem(&tag_map, &tagkey); + if (counterp) { return 0; - long err = bpf_map_update_elem(&wbt_map, &wbtkey, &zero, BPF_NOEXIST); - if (err) + } + long err = bpf_map_update_elem(&tag_map, &tagkey, &zero, BPF_NOEXIST); + if (err) { + log_event(STAGE_GET_TAG, PERIOD_START, ERROR_UPDATE_FAIL); return 0; + } u64 curr_start_range = zero.start_time / THRESHOLD; @@ -1658,10 +648,10 @@ int kprobe_wbt_wait(struct pt_regs *regs) .first_minor = first_minor, .cmd_flags = cmd_flags, .curr_start_range = curr_start_range, - }; + }; struct stage_data *curr_data; - curr_data = bpf_map_lookup_elem(&wbt_res, &key); + curr_data = bpf_map_lookup_elem(&tag_res, &key); if (!curr_data) { struct stage_data new_data = { .start_count = 1, @@ -1673,65 +663,61 @@ int kprobe_wbt_wait(struct pt_regs *regs) .io_type = "", }; blk_fill_rwbs(new_data.io_type, cmd_flags); - bpf_map_update_elem(&wbt_res, &key, &new_data, 0); - } else { + bpf_map_update_elem(&tag_res, &key, &new_data, 0); + } else { update_curr_data_in_start(curr_data, ¶ms); } struct time_range_io_count *curr_data_time_range; - curr_data_time_range = bpf_map_lookup_elem(&wbt_res_2, &curr_start_range); + curr_data_time_range = bpf_map_lookup_elem(&tag_res_2, &curr_start_range); if (curr_data_time_range == NULL) { struct time_range_io_count new_data = { .count = {0} }; - bpf_map_update_elem(&wbt_res_2, &curr_start_range, &new_data, 0); + bpf_map_update_elem(&tag_res_2, &curr_start_range, &new_data, 0); } else { - if (key < MAP_SIZE) { + if (key < MAP_SIZE && key >= 0) { __sync_fetch_and_add(&curr_data_time_range->count[key], 1); } } - return 0; } -// finish wbt -SEC("kretprobe/wbt_wait") -int kretprobe_wbt_wait(struct pt_regs *regs) +// finish get_tag +SEC("kretprobe/blk_mq_get_tag") +int kretprobe_blk_mq_get_tag(struct pt_regs *regs) { - struct bio *bio = NULL; - u64 *wbtargs = NULL; - u64 wbtkey = bpf_get_current_task(); - wbtargs = (u64 *)bpf_map_lookup_elem(&wbt_args, &wbtkey); - if (wbtargs == NULL) { - bpf_map_delete_elem(&wbt_args, &wbtkey); + u64 tagkey = bpf_get_current_task(); + u64 *tagargs = (u64 *)bpf_map_lookup_elem(&tag_args, &tagkey); + if (tagargs == NULL) { + bpf_map_delete_elem(&tag_args, &tagkey); return 0; } - bio = (struct bio *)(*wbtargs); - struct gendisk *curr_rq_disk = _(bio->bi_disk); - int major = _(curr_rq_disk->major); - int first_minor = _(curr_rq_disk->first_minor); - unsigned int cmd_flags = _(bio->bi_opf); - - struct io_counter *counterp; - u32 key = find_matching_wbt_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_wbt_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } + + struct blk_mq_alloc_data *bd; + struct request_queue *q; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags = 0; + + bd = (struct blk_mq_alloc_data *)*tagargs; + bpf_core_read(&q, sizeof(q), &bd->q); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &q->disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + + if (major == 0) { + log_event(STAGE_GET_TAG, PERIOD_END, ERROR_MAJOR_ZERO); + return 0; } - counterp = bpf_map_lookup_elem(&wbt_map, &wbtkey); + u32 key = find_matching_key_get_tag(major, first_minor); + if (key >= MAP_SIZE) { + return 0; + } - if (!counterp) + struct io_counter *counterp = bpf_map_lookup_elem(&tag_map, &tagkey); + if (!counterp) { return 0; + } u64 duration = bpf_ktime_get_ns() - counterp->start_time; u64 curr_start_range = counterp->start_time / THRESHOLD; @@ -1741,12 +727,12 @@ int kretprobe_wbt_wait(struct pt_regs *regs) .first_minor = first_minor, .cmd_flags = cmd_flags, .curr_start_range = curr_start_range, - }; + }; struct stage_data *curr_data; - curr_data = bpf_map_lookup_elem(&wbt_res, &key); + curr_data = bpf_map_lookup_elem(&tag_res, &key); if (curr_data == NULL && duration > DURATION_THRESHOLD) { - struct stage_data new_data = { + struct stage_data new_data = { .start_count = 1, .finish_count = 1, .finish_over_time = 1, @@ -1756,97 +742,83 @@ int kretprobe_wbt_wait(struct pt_regs *regs) .io_type = "", }; blk_fill_rwbs(new_data.io_type, cmd_flags); - bpf_map_update_elem(&wbt_res, &key, &new_data, 0); + bpf_map_update_elem(&tag_res, &key, &new_data, 0); } else if (curr_data == NULL) { struct stage_data new_data = { .start_count = 1, .finish_count = 1, .finish_over_time = 0, .duration = 0, - .io_type = "", .major = major, .first_minor = first_minor, + .io_type = "", }; blk_fill_rwbs(new_data.io_type, cmd_flags); - bpf_map_update_elem(&wbt_res, &key, &new_data, 0); + bpf_map_update_elem(&tag_res, &key, &new_data, 0); } else { curr_data->duration += duration; - update_curr_data_in_finish(curr_data, ¶ms, &duration); + update_curr_data_in_finish(curr_data, ¶ms, duration); } struct time_range_io_count *curr_data_time_range; - curr_data_time_range = bpf_map_lookup_elem(&wbt_res_2, &curr_start_range); + curr_data_time_range = bpf_map_lookup_elem(&tag_res_2, &curr_start_range); if (curr_data_time_range == NULL) { struct time_range_io_count new_data = { .count = {0} }; - bpf_map_update_elem(&wbt_res_2, &curr_start_range, &new_data, 0); + bpf_map_update_elem(&tag_res_2, &curr_start_range, &new_data, 0); } else { if (key < MAP_SIZE && curr_data_time_range->count[key] > 0) { __sync_fetch_and_add(&curr_data_time_range->count[key], -1); } } - bpf_map_delete_elem(&wbt_map, &wbtkey); - bpf_map_delete_elem(&wbt_args, &wbtkey); + bpf_map_delete_elem(&tag_map, &tagkey); + bpf_map_delete_elem(&tag_args, &tagkey); return 0; } -// start get_tag -SEC("kprobe/blk_mq_get_tag") -int kprobe_blk_mq_get_tag(struct pt_regs *regs) +// start wbt +SEC("kprobe/wbt_wait") +int kprobe_wbt_wait(struct pt_regs *regs) { - u64 tagkey = bpf_get_current_task(); - u64 value = (u64)PT_REGS_PARM1(regs); - (void)bpf_map_update_elem(&tag_args, &tagkey, &value, BPF_ANY); - struct blk_mq_alloc_data *bd= (struct blk_mq_alloc_data *)value; - struct request_queue *q = (struct request_queue *)_(bd->q); - struct backing_dev_info *backing_dev_info = (struct backing_dev_info *)_(q->backing_dev_info); - struct device *owner = _(backing_dev_info->owner); - - u32 key_version = 1; - struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version); - if (version_map) { - if (version_map->num == VERSION_KY_V2401) { - struct request_queue_kylin_v2401 *q = (struct request_queue_kylin_v2401 *)_(bd->q); - struct backing_dev_info_kylin_v2401 *backing_dev_info = (struct backing_dev_info_kylin_v2401 *)_(q->backing_dev_info); - owner = _(backing_dev_info->owner); - } - } + u64 wbtkey = bpf_get_current_task(); + u64 value = (u64)PT_REGS_PARM2(regs); + (void)bpf_map_update_elem(&wbt_args, &wbtkey, &value, BPF_ANY); - dev_t devt = _(owner->devt); - int major = MAJOR(devt); - int first_minor = MINOR(devt); - unsigned int cmd_flags = 0; - - struct io_counter *counterp, zero = {}; - u32 key = find_matching_tag_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } + struct bio *bio; + struct block_device *bd; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags; + + bio = (struct bio *)value; + bpf_core_read(&bd, sizeof(bd), &bio->bi_bdev); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &bd->bd_disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + bpf_core_read(&cmd_flags, sizeof(cmd_flags), &bio->bi_opf); + + if (major == 0) { + log_event(STAGE_WBT, PERIOD_START, ERROR_MAJOR_ZERO); + return 0; } - init_io_counter(&zero, major, first_minor); + u32 key = find_matching_key_wbt(major, first_minor); + if (key >= MAP_SIZE) { + return 0; + } - counterp = bpf_map_lookup_elem(&tag_map, &tagkey); - if (counterp || major == 0) { + struct io_counter *counterp, zero = {}; + init_io_counter(&zero, major, first_minor); + counterp = bpf_map_lookup_elem(&wbt_map, &wbtkey); + if (counterp) { return 0; } - - long err = bpf_map_update_elem(&tag_map, &tagkey, &zero, BPF_NOEXIST); + long err = bpf_map_update_elem(&wbt_map, &wbtkey, &zero, BPF_NOEXIST); if (err) { + log_event(STAGE_WBT, PERIOD_START, ERROR_UPDATE_FAIL); return 0; } - + u64 curr_start_range = zero.start_time / THRESHOLD; struct update_params params = { @@ -1854,10 +826,10 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs) .first_minor = first_minor, .cmd_flags = cmd_flags, .curr_start_range = curr_start_range, - }; + }; struct stage_data *curr_data; - curr_data = bpf_map_lookup_elem(&tag_res, &key); + curr_data = bpf_map_lookup_elem(&wbt_res, &key); if (!curr_data) { struct stage_data new_data = { .start_count = 1, @@ -1869,77 +841,59 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs) .io_type = "", }; blk_fill_rwbs(new_data.io_type, cmd_flags); - bpf_map_update_elem(&tag_res, &key, &new_data, 0); - } else { + bpf_map_update_elem(&wbt_res, &key, &new_data, 0); + } else { update_curr_data_in_start(curr_data, ¶ms); } struct time_range_io_count *curr_data_time_range; - curr_data_time_range = bpf_map_lookup_elem(&tag_res_2, &curr_start_range); + curr_data_time_range = bpf_map_lookup_elem(&wbt_res_2, &curr_start_range); if (curr_data_time_range == NULL) { struct time_range_io_count new_data = { .count = {0} }; - bpf_map_update_elem(&tag_res_2, &curr_start_range, &new_data, 0); + bpf_map_update_elem(&wbt_res_2, &curr_start_range, &new_data, 0); } else { - if (key < MAP_SIZE) { + if (key < MAP_SIZE && key >= 0) { __sync_fetch_and_add(&curr_data_time_range->count[key], 1); } } return 0; } -// finish get_tag -SEC("kretprobe/blk_mq_get_tag") -int kretprobe_blk_mq_get_tag(struct pt_regs *regs) +// finish wbt +SEC("kretprobe/wbt_wait") +int kretprobe_wbt_wait(struct pt_regs *regs) { - u64 tagkey = bpf_get_current_task(); - u64 *tagargs = NULL; - struct blk_mq_alloc_data *bd = NULL; - - tagargs = (u64 *)bpf_map_lookup_elem(&tag_args, &tagkey); - if (tagargs == NULL) { - bpf_map_delete_elem(&tag_args, &tagkey); + u64 wbtkey = bpf_get_current_task(); + u64 *wbtargs = (u64 *)bpf_map_lookup_elem(&wbt_args, &wbtkey); + if (wbtargs == NULL) { + bpf_map_delete_elem(&wbt_args, &wbtkey); return 0; } - bd = (struct blk_mq_alloc_data *)(*tagargs); - struct request_queue *q = _(bd->q); - struct backing_dev_info *backing_dev_info = _(q->backing_dev_info); - struct device *owner = _(backing_dev_info->owner); - - u32 key_version = 1; - struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version); - if (version_map) { - if (version_map->num == VERSION_KY_V2401) { - struct request_queue_kylin_v2401 *q = (struct request_queue_kylin_v2401 *)_(bd->q); - struct backing_dev_info_kylin_v2401 *backing_dev_info = (struct backing_dev_info_kylin_v2401 *)_(q->backing_dev_info); - owner = _(backing_dev_info->owner); - } - } + + struct bio *bio; + struct block_device *bd; + struct gendisk *curr_rq_disk; + int major, first_minor; + unsigned int cmd_flags; - dev_t devt = _(owner->devt); - int major = MAJOR(devt); - int first_minor = MINOR(devt); - unsigned int cmd_flags = 0; - - struct io_counter *counterp; - u32 key = find_matching_tag_1_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_2_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_3_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_4_keys(major, first_minor); - if (key >= MAP_SIZE){ - key = find_matching_tag_5_keys(major, first_minor); - if (key >= MAP_SIZE){ - return 0; - } - } - } - } + bio = (struct bio *)(*wbtargs); + bpf_core_read(&bd, sizeof(bd), &bio->bi_bdev); + bpf_core_read(&curr_rq_disk, sizeof(curr_rq_disk), &bd->bd_disk); + bpf_core_read(&major, sizeof(major), &curr_rq_disk->major); + bpf_core_read(&first_minor, sizeof(first_minor), &curr_rq_disk->first_minor); + bpf_core_read(&cmd_flags, sizeof(cmd_flags), &bio->bi_opf); + + if (major == 0) { + log_event(STAGE_WBT, PERIOD_END, ERROR_MAJOR_ZERO); + return 0; } - counterp = bpf_map_lookup_elem(&tag_map, &tagkey); + u32 key = find_matching_key_wbt(major, first_minor); + if (key >= MAP_SIZE) { + return 0; + } + struct io_counter *counterp = bpf_map_lookup_elem(&wbt_map, &wbtkey); if (!counterp) { return 0; } @@ -1952,12 +906,12 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs) .first_minor = first_minor, .cmd_flags = cmd_flags, .curr_start_range = curr_start_range, - }; + }; struct stage_data *curr_data; - curr_data = bpf_map_lookup_elem(&tag_res, &key); + curr_data = bpf_map_lookup_elem(&wbt_res, &key); if (curr_data == NULL && duration > DURATION_THRESHOLD) { - struct stage_data new_data = { + struct stage_data new_data = { .start_count = 1, .finish_count = 1, .finish_over_time = 1, @@ -1967,39 +921,37 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs) .io_type = "", }; blk_fill_rwbs(new_data.io_type, cmd_flags); - bpf_map_update_elem(&tag_res, &key, &new_data, 0); + bpf_map_update_elem(&wbt_res, &key, &new_data, 0); } else if (curr_data == NULL) { struct stage_data new_data = { .start_count = 1, .finish_count = 1, .finish_over_time = 0, .duration = 0, + .io_type = "", .major = major, .first_minor = first_minor, - .io_type = "", }; blk_fill_rwbs(new_data.io_type, cmd_flags); - bpf_map_update_elem(&tag_res, &key, &new_data, 0); + bpf_map_update_elem(&wbt_res, &key, &new_data, 0); } else { curr_data->duration += duration; - update_curr_data_in_finish(curr_data, ¶ms, &duration); + update_curr_data_in_finish(curr_data, ¶ms, duration); } struct time_range_io_count *curr_data_time_range; - curr_data_time_range = bpf_map_lookup_elem(&tag_res_2, &curr_start_range); + curr_data_time_range = bpf_map_lookup_elem(&wbt_res_2, &curr_start_range); if (curr_data_time_range == NULL) { struct time_range_io_count new_data = { .count = {0} }; - bpf_map_update_elem(&tag_res_2, &curr_start_range, &new_data, 0); + bpf_map_update_elem(&wbt_res_2, &curr_start_range, &new_data, 0); } else { if (key < MAP_SIZE && curr_data_time_range->count[key] > 0) { __sync_fetch_and_add(&curr_data_time_range->count[key], -1); } } - - bpf_map_delete_elem(&tag_map, &tagkey); - bpf_map_delete_elem(&tag_args, &tagkey); + bpf_map_delete_elem(&wbt_map, &wbtkey); + bpf_map_delete_elem(&wbt_args, &wbtkey); return 0; } -char LICENSE[] SEC("license") = "Dual BSD/GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; +char _license[] SEC("license") = "GPL"; diff --git a/src/services/sentryCollector/ebpf_collector/ebpf_collector.c b/src/services/sentryCollector/ebpf_collector/ebpf_collector.c index dbb3e55..5a2528b 100644 --- a/src/services/sentryCollector/ebpf_collector/ebpf_collector.c +++ b/src/services/sentryCollector/ebpf_collector/ebpf_collector.c @@ -12,31 +12,27 @@ #include #include #include -#include -#include +#include #include -#include +#include #include -#include #include -#include -#include +#include #include "ebpf_collector.h" - -#define BLK_MAP (map_fd[0]) -#define BLK_RES (map_fd[1]) -#define BIO_MAP (map_fd[2]) -#define BIO_RES (map_fd[3]) -#define WBT_MAP (map_fd[4]) -#define WBT_RES (map_fd[5]) -#define TAG_MAP (map_fd[7]) -#define TAG_RES (map_fd[8]) -#define BLK_RES_2 (map_fd[10]) -#define BIO_RES_2 (map_fd[11]) -#define WBT_RES_2 (map_fd[12]) -#define TAG_RES_2 (map_fd[13]) -#define VERSION_RES (map_fd[14]) -#define BPF_FILE "/usr/lib/ebpf_collector.bpf.o" +#include "ebpf_collector.skel.h" + +#define BLK_MAP (bpf_map__fd(skel->maps.blk_map)) +#define BLK_RES (bpf_map__fd(skel->maps.blk_res)) +#define BIO_MAP (bpf_map__fd(skel->maps.bio_map)) +#define BIO_RES (bpf_map__fd(skel->maps.bio_res)) +#define WBT_MAP (bpf_map__fd(skel->maps.wbt_map)) +#define WBT_RES (bpf_map__fd(skel->maps.wbt_res)) +#define TAG_MAP (bpf_map__fd(skel->maps.tag_map)) +#define TAG_RES (bpf_map__fd(skel->maps.tag_res)) +#define BLK_RES_2 (bpf_map__fd(skel->maps.blk_res_2)) +#define BIO_RES_2 (bpf_map__fd(skel->maps.bio_res_2)) +#define WBT_RES_2 (bpf_map__fd(skel->maps.wbt_res_2)) +#define TAG_RES_2 (bpf_map__fd(skel->maps.tag_res_2)) #define MAX_LINE_LENGTH 1024 #define MAX_SECTION_NAME_LENGTH 256 @@ -57,7 +53,7 @@ typedef enum { LogLevel currentLogLevel = LOG_LEVEL_INFO; -static volatile bool exiting; +static volatile bool exiting; const char argp_program_doc[] = "Show block device I/O pattern.\n" @@ -85,6 +81,15 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) { return 0; } +void logMessage(LogLevel level, const char *format, ...){ + if (level >= currentLogLevel) { + va_list args; + va_start(args, format); + vprintf(format, args); + va_end(args); + } +} + static void sig_handler(int sig) { exiting = true; @@ -133,7 +138,7 @@ char* find_device_name(dev_t dev) { return device_name; } -static void update_io_dump(struct bpf_map *map_res, int *io_dump, int *map_size, char *stage) { +static void update_io_dump(int fd, int *io_dump, int map_size, char *stage) { struct time_range_io_count time_count; u32 io_dump_key = 0; struct sysinfo info; @@ -142,7 +147,7 @@ static void update_io_dump(struct bpf_map *map_res, int *io_dump, int *map_size, u32 curr_time = info.uptime; while (count_time >= 0) { io_dump_key = curr_time - count_time; - int err = bpf_map_lookup_elem(map_res, &io_dump_key, &time_count); + int err = bpf_map_lookup_elem(fd, &io_dump_key, &time_count); if (err < 0) { count_time -= 1; continue; @@ -156,21 +161,21 @@ static void update_io_dump(struct bpf_map *map_res, int *io_dump, int *map_size, } } if (isempty || (curr_time - io_dump_key) > IO_DUMP_THRESHOLD) { - bpf_map_delete_elem(map_res, &io_dump_key); + bpf_map_delete_elem(fd, &io_dump_key); } } count_time -= 1; } } -static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size, int *io_dump) +static int print_map_res(int fd, char *stage, int map_size, int *io_dump) { struct stage_data counter; int key = 0; logMessage(LOG_LEVEL_DEBUG, "print_map_res map_size: %d\n", map_size); for (key = 0; key < map_size; key++) { - int err = bpf_map_lookup_elem(map_res, &key, &counter); + int err = bpf_map_lookup_elem(fd, &key, &counter); if (err < 0) { logMessage(LOG_LEVEL_ERROR, "failed to lookup %s map_res: %d\n", stage, err); return -1; @@ -183,7 +188,7 @@ static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size, in io_type = counter.io_type[0]; } else { logMessage(LOG_LEVEL_DEBUG, "io_type not value.\n"); - io_type = NULL; + io_type = '\0'; } int major = counter.major; int first_minor = counter.first_minor; @@ -206,15 +211,15 @@ static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size, in return 0; } -int init_map(int *map_fd, const char *map_name, int *map_size, DeviceInfo *devices) { +int init_map(int fd, const char *map_name, int device_count, DeviceInfo *devices) { struct stage_data init_data = {0}; memset(init_data.io_type, 0, sizeof(init_data.io_type)); - for (int i = 0; i < map_size; i++) { + for (int i = 0; i < device_count; i++) { init_data.major = devices[i].major; init_data.first_minor = devices[i].minor; - if (bpf_map_update_elem(map_fd, &i, &init_data, BPF_ANY) != 0) { + if (bpf_map_update_elem(fd, &i, &init_data, BPF_ANY) != 0) { logMessage(LOG_LEVEL_ERROR, "Failed to initialize map %s at index %d\n", map_name, i); return 1; } @@ -223,19 +228,6 @@ int init_map(int *map_fd, const char *map_name, int *map_size, DeviceInfo *devic return 0; } -int init_version_map(int *map_fd, const char *map_name, int os_num) { - struct version_map_num init_data = {0}; - init_data.num = os_num; - - u32 key = 1; - if (bpf_map_update_elem(map_fd, &key, &init_data, BPF_ANY) != 0) { - logMessage(LOG_LEVEL_ERROR, "Failed to initialize map %s at index %d\n", map_name); - return 1; - } - - return 0; -} - char *read_config_value(const char *file, const char *section, const char *key) { FILE *fp = fopen(file, "r"); if (fp == NULL) { @@ -299,15 +291,6 @@ void setLogLevel(const char *levelStr) { } } -void logMessage(LogLevel level, const char *format, ...){ - if (level >= currentLogLevel) { - va_list args; - va_start(args, format); - vprintf(format, args); - va_end(args); - } -} - int check_for_device(const char *device_name) { char path[256]; snprintf(path, sizeof(path), "/sys/block/%s", device_name); @@ -331,92 +314,11 @@ int check_for_device(const char *device_name) { return 0; } -typedef struct { - const char *version; - int value; -} VersionMap; - -const VersionMap version_map[] = { - {"v2401", 1}, - {"v2101", 2} - }; - -char *get_minor_version(int index, char *buffer) { - char *version_info = NULL; - char* token = strtok(buffer, " "); - int count = 0; - while (token != NULL) { - token = strtok(NULL, " "); - count++; - if (count == 2) { - char* version = strtok(token, "."); - int dot_count = 0; - while (version != NULL) { - version = strtok(NULL, "."); - dot_count++; - if (dot_count == index) { - version_info = strdup(version); - break; - } - } - } - } - return version_info; -} - -int get_os_version() { - FILE* file; - char* distribution = NULL; - char buffer[BUFFER_SIZE]; - - file = fopen(OS_RELEASE_FILE, "r"); - if (file == NULL) { - logMessage(LOG_LEVEL_ERROR, "Failed to open release file: %s\n", OS_RELEASE_FILE); - return -1; - } - - while (fgets(buffer, BUFFER_SIZE, file)) { - if (strncmp(buffer, "ID=", 3) == 0) { - distribution = strdup(buffer + 4); - distribution[strcspn(distribution, "\"\n")] = '\0'; - break; - } - } - fclose(file); - - char* version_info = NULL; - int value = -1; - - file = fopen(PROC_VERSION_FILE, "r"); - if (file == NULL) { - logMessage(LOG_LEVEL_ERROR, "Failed to open version file: %s\n", PROC_VERSION_FILE); - return -1; - } - - if (fgets(buffer, BUFFER_SIZE, file)) { - if (strcmp(distribution, "openEuler") == 0) { - free(distribution); - return 0; - } else if (strcmp(distribution, "kylin") == 0) { - version_info = get_minor_version(4, buffer); - if (!version_info) { - logMessage(LOG_LEVEL_ERROR, "get minor version failed.\n"); - free(distribution); - return -1; - } - } - } - free(distribution); - fclose(file); - - for (int i = 0; version_map[i].version != NULL; ++i) { - if (strcmp(version_map[i].version, version_info) == 0) { - value = version_map[i].value; - break; - } - } - free(version_info); - return value; +// 处理事件的回调函数 +static int handle_event(void *ctx, void *data, size_t data_sz) { + struct event *e = (struct event *)data; + logMessage(LOG_LEVEL_ERROR, "kernelspace error happen, stage: %d, period: %d, error: %d\n", e->stage, e->period, e->err); + return 0; } int main(int argc, char **argv) { @@ -457,20 +359,8 @@ int main(int argc, char **argv) { return err; } - int os_num = get_os_version(); - if (os_num < 0) { - logMessage(LOG_LEVEL_INFO, "get os version failed.\n"); - return 1; - } - - snprintf(filename, sizeof(filename), BPF_FILE); - - if (load_bpf_file(filename)) { - logMessage(LOG_LEVEL_ERROR, "load_bpf_file failed.\n"); - return 1; - } - signal(SIGINT, sig_handler); + signal(SIGTERM, sig_handler); dir = opendir("/dev"); if (dir == NULL) { @@ -506,32 +396,64 @@ int main(int argc, char **argv) { closedir(dir); - if (init_map(BLK_RES, "blk_res_map", device_count, devices) != 0) { + struct ebpf_collector_bpf *skel = ebpf_collector_bpf__open(); + if (!skel) { + logMessage(LOG_LEVEL_ERROR, "Failed to open and load BPF skeleton\n"); + return 1; + } + + // 加载 BPF 程序到内核中 + err = ebpf_collector_bpf__load(skel); + if (err) { + logMessage(LOG_LEVEL_ERROR, "Failed to load BPF skeleton: %d\n", err); + ebpf_collector_bpf__destroy(skel); + return 1; + } + + // 附加 BPF 程序到 kprobe + err = ebpf_collector_bpf__attach(skel); + if (err) { + logMessage(LOG_LEVEL_ERROR, "Failed to attach BPF skeleton: %d\n", err); + ebpf_collector_bpf__destroy(skel); + return 1; + } + + // 初始化环形缓冲区读取器 + struct ring_buffer *rb = NULL; + rb = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), handle_event, NULL, NULL); + if (!rb) { + return 1; + } + + + if (init_map(BLK_RES, "blk_res_map", device_count, devices)) { logMessage(LOG_LEVEL_ERROR, "blk_res_map failed.\n"); + ebpf_collector_bpf__destroy(skel); return 1; } + if (init_map(BIO_RES, "blo_res_map", device_count, devices) != 0) { logMessage(LOG_LEVEL_ERROR, "blo_res_map failed.\n"); + ebpf_collector_bpf__destroy(skel); return 1; } if (init_map(WBT_RES, "wbt_res_map", device_count, devices) != 0) { logMessage(LOG_LEVEL_ERROR, "wbt_res_map failed.\n"); + ebpf_collector_bpf__destroy(skel); return 1; } if (init_map(TAG_RES, "tag_res_map", device_count, devices) != 0) { logMessage(LOG_LEVEL_ERROR, "tag_res_map failed.\n"); + ebpf_collector_bpf__destroy(skel); return 1; } - if (init_version_map(VERSION_RES, "version_res_map", os_num) != 0) { - logMessage(LOG_LEVEL_ERROR, "version_res_map failed.\n"); - return 1; - } - for (;;) { sleep(1); + err = ring_buffer__poll(rb, 100); + int io_dump_blk[MAP_SIZE] = {0}; update_io_dump(BLK_RES_2, io_dump_blk, device_count,"rq_driver"); err = print_map_res(BLK_RES, "rq_driver", device_count, io_dump_blk); @@ -570,5 +492,7 @@ int main(int argc, char **argv) { } } - return -err; + ring_buffer__free(rb); + ebpf_collector_bpf__destroy(skel); + return -err; } diff --git a/src/services/sentryCollector/ebpf_collector/ebpf_collector.h b/src/services/sentryCollector/ebpf_collector/ebpf_collector.h index 904f8e4..adf926b 100644 --- a/src/services/sentryCollector/ebpf_collector/ebpf_collector.h +++ b/src/services/sentryCollector/ebpf_collector/ebpf_collector.h @@ -32,10 +32,29 @@ typedef unsigned int u32; #define REQ_OP_WRITE_SAME 7 #define MAP_SIZE 15 -#define OS_RELEASE_FILE "/etc/os-release" -#define PROC_VERSION_FILE "/proc/version" -#define BUFFER_SIZE 1024 -#define VERSION_LEN 20 +#define RWBS_LEN 8 +#define MINORBITS 20 +#define MINORMASK ((1U << MINORBITS) - 1) + +#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) +#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) + +// 阶段 +#define STAGE_RQ_DRIVER 1 +#define STAGE_BIO 2 +#define STAGE_WBT 3 +#define STAGE_GET_TAG 4 + +// 时期 +#define PERIOD_START 1 +#define PERIOD_END 2 + +// 错误码 +#define ERROR_MAJOR_ZERO 1 +#define ERROR_KEY_OVERFLOW 2 +#define ERROR_KEY_EXIST 3 +#define ERROR_UPDATE_FAIL 4 +#define ERROR_KEY_NOEXIST 5 enum stage_type { BIO=0, @@ -78,9 +97,10 @@ struct time_range_io_count u32 count[MAP_SIZE]; }; -struct version_map_num -{ - int num; +struct event { + u32 stage; + u64 period; + u32 err; }; #endif /* __EBPFCOLLECTOR_H */ -- Gitee