diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h index 445a78d83d21cbcd247e396c8868ed7170dc6092..08ff5246f97d4871320dd6898e15fd473fabbf21 100644 --- a/arch/arm/include/asm/livepatch.h +++ b/arch/arm/include/asm/livepatch.h @@ -34,11 +34,6 @@ struct klp_func; int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - - #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) #ifdef CONFIG_ARM_MODULE_PLTS @@ -63,7 +58,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); - #endif #endif /* _ASM_ARM_LIVEPATCH_H */ diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index b1711d947dfe7eea2b52b9118971a52288fe2c21..f37cc04b4cae61ad11e4e221031fcde1c2eb85c1 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -39,7 +39,6 @@ #define ARM_INSN_SIZE 4 #endif -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * ARM_INSN_SIZE) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY @@ -64,286 +63,109 @@ static bool is_jump_insn(u32 insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - -struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; - int ret; -}; - -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} + struct walk_stackframe_args *args = ws_args; -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) -{ - int ret; - struct klp_object *obj; - struct klp_func_node *func_node; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - struct klp_func_list *pcheck = NULL; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of intructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; + return !args->check_func(args->data, &args->ret, frame->pc); } -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return true; - } - funcs = funcs->next; - } - return false; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; - - return check_func_list(check_funcs, &args->ret, frame->pc); -} - -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; + struct stackframe frame; - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); + if (t == current) { + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_stack_pointer; + frame.lr = (unsigned long)__builtin_return_address(0); + frame.pc = (unsigned long)check_task_calltrace; + } else { + frame.fp = thread_saved_fp(t); + frame.sp = thread_saved_sp(t); + frame.lr = 0; /* recovered from the stack */ + frame.pc = thread_saved_pc(t); } + walk_stackframe(&frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; } static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - struct stackframe frame; + unsigned int cpu; for_each_process_thread(g, t) { - if (t == current) { - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_stack_pointer; - frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)do_check_calltrace; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - frame.fp = thread_saved_fp(t); - frame.sp = thread_saved_sp(t); - frame.lr = 0; /* recovered from the stack */ - frame.pc = thread_saved_pc(t); - } - walk_stackframe(&frame, fn, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; } return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, }; - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); } -static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; - if (within_module_core(frame->pc, args->mod)) { - pr_err("module %s is in use!\n", args->mod->name); + if (within_module_core(frame->pc, mod)) { + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); } return 0; @@ -352,7 +174,7 @@ static int check_module_calltrace(struct stackframe *frame, void *data) int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; @@ -436,14 +258,29 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } +static void klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + + if (len <= 0) + return; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) + __patch_text(dst + i, src[i]); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + __patch_text(dst, src[0]); +} + static int do_patch(unsigned long pc, unsigned long new_addr) { u32 insns[LJMP_INSN_SIZE]; if (!offset_in_range(pc, new_addr, SZ_32M)) { #ifdef CONFIG_ARM_MODULE_PLTS - int i; - /* * [0] LDR PC, [PC+8] * [4] nop @@ -453,8 +290,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) insns[1] = __opcode_to_mem_arm(0xe320f000); insns[2] = new_addr; - for (i = 0; i < LJMP_INSN_SIZE; i++) - __patch_text(((u32 *)pc) + i, insns[i]); + klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); #else /* * When offset from 'new_addr' to 'pc' is out of SZ_32M range but @@ -465,7 +301,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) #endif } else { insns[0] = arm_gen_branch(pc, new_addr); - __patch_text((void *)pc, insns[0]); + klp_patch_text((u32 *)pc, insns, 1); } return 0; } @@ -493,11 +329,7 @@ void arch_klp_unpatch_func(struct klp_func *func) pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - int i; - - for (i = 0; i < LJMP_INSN_SIZE; i++) { - __patch_text(((u32 *)pc) + i, func_node->arch_data.old_insns[i]); - } + klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); } else { next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 34061d75a0d21be9c981d724c4e05c90b4e1000d..6dead7235ec6d525e269b50adfc5d1ac9ada500f 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -362,6 +362,7 @@ CONFIG_LIVEPATCH_WO_FTRACE=y CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y # CONFIG_LIVEPATCH_STACK is not set CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +CONFIG_LIVEPATCH_ISOLATE_KPROBE=y # end of Enable Livepatch # diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h index c41a22adc94488e14845fa6378f0dd092c4cb4f7..2bacd12e46b1f5cb3cfb5de62ac8c8d8a7d981a7 100644 --- a/arch/arm64/include/asm/livepatch.h +++ b/arch/arm64/include/asm/livepatch.h @@ -41,9 +41,6 @@ static inline int klp_check_compiler_support(void) int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif #else #error Live patching support is disabled; check CONFIG_LIVEPATCH #endif @@ -72,7 +69,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); - #endif #endif /* _ASM_ARM64_LIVEPATCH_H */ diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 5b01712548206a2329a447342f73ea01c05f9ee9..363fb8e41c496f86b7743f4600ce53aab87b13f3 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -35,7 +35,6 @@ #include #include -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE static inline bool offset_in_range(unsigned long pc, unsigned long addr, @@ -57,288 +56,114 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \ ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800)) -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - -struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; - int ret; -}; - -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list *)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list *)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - struct klp_func_node *func_node; - struct klp_func_list *pcheck = NULL; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [the active one] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) { - return -EINVAL; - } -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static bool klp_check_jump_func(void *ws_args, unsigned long pc) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} + struct walk_stackframe_args *args = ws_args; -static bool klp_check_jump_func(void *data, unsigned long pc) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; - - return check_func_list(check_funcs, &args->ret, pc); + return args->check_func(args->data, &args->ret, pc); } -static void free_list(struct klp_func_list **funcs) +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) { - struct klp_func_list *p; + struct stackframe frame; - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); + /* + * Handle the current carefully on each CPUs, we shouldn't + * use saved FP and PC when backtrace current. It's difficult + * to backtrack other CPU currents here. But fortunately, + * all CPUs will stay in this function, so the current's + * backtrace is so similar + */ + if (t == current) { + /* current on this CPU */ + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.pc = (unsigned long)check_task_calltrace; + } else { + frame.fp = thread_saved_fp(t); + frame.pc = thread_saved_pc(t); } + start_backtrace(&frame, frame.fp, frame.pc); + walk_stackframe(t, &frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; } static int do_check_calltrace(struct walk_stackframe_args *args, bool (*fn)(void *, unsigned long)) { + int ret; struct task_struct *g, *t; - struct stackframe frame; + unsigned int cpu; for_each_process_thread(g, t) { - /* - * Handle the current carefully on each CPUs, we shouldn't - * use saved FP and PC when backtrace current. It's difficult - * to backtrack other CPU currents here. But fortunately, - * all CPUs will stay in this function, so the current's - * backtrace is so similar - */ - if (t == current) { - /* current on this CPU */ - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)do_check_calltrace; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - frame.fp = thread_saved_fp(t); - frame.pc = thread_saved_pc(t); - } - start_backtrace(&frame, frame.fp, frame.pc); - walk_stackframe(t, &frame, fn, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; } return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, }; - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); -out: - free_list(&check_funcs); - return ret; +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + return do_check_calltrace(&args, klp_check_jump_func); } -static bool check_module_calltrace(void *data, unsigned long pc) +static bool check_module_calltrace(void *ws_args, unsigned long pc) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; - if (within_module_core(pc, args->mod)) { - pr_err("module %s is in use!\n", args->mod->name); + if (within_module_core(pc, mod)) { + pr_err("module %s is in use!\n", mod->name); args->ret = -EBUSY; return false; } @@ -348,7 +173,7 @@ static bool check_module_calltrace(void *data, unsigned long pc) int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; @@ -410,6 +235,27 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } +static int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = aarch64_insn_patch_text_nosync(dst + i, src[i]); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return aarch64_insn_patch_text_nosync(dst, src[0]); +} + static int do_patch(unsigned long pc, unsigned long new_addr) { u32 insns[LJMP_INSN_SIZE]; @@ -418,26 +264,22 @@ static int do_patch(unsigned long pc, unsigned long new_addr) if (offset_in_range(pc, new_addr, SZ_128M)) { insns[0] = aarch64_insn_gen_branch_imm(pc, new_addr, AARCH64_INSN_BRANCH_NOLINK); - ret = aarch64_insn_patch_text_nosync((void *)pc, insns[0]); + ret = klp_patch_text((u32 *)pc, insns, 1); if (ret) { pr_err("patch instruction small range failed, ret=%d\n", ret); return -EPERM; } } else { #ifdef CONFIG_ARM64_MODULE_PLTS - int i; insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; insns[3] = 0xd61f0200; - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]); - if (ret) { - pr_err("patch instruction %d large range failed, ret=%d\n", - i, ret); - return -EPERM; - } + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; } #else /* @@ -469,20 +311,16 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, - func_node->arch_data.old_insns[i]); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - return; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } } else { next_func = list_first_or_null_rcu(&func_node->func_stack, diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c445828ecc3aac18723c6ffd489a4c9ebc08d029..2073a3a7fe75127e19b5e3ee41ea46569667cef7 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -101,6 +101,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = ret_stack->ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(frame->pc)) + frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp); +#endif frame->pc = ptrauth_strip_insn_pac(frame->pc); diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index 285602e637f1df8f31024d590041ac1dd922716a..243c8f93ff6e184c789bfe995de96da9aea31193 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -147,14 +147,10 @@ void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame); +int klp_patch_text(u32 *dst, const u32 *src, int len); #endif /* CONFIG_LIVEPATCH_FTRACE */ -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -struct klp_patch; -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - static inline void klp_init_thread_info(struct task_struct *p) { /* + 1 to account for STACK_END_MAGIC */ diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c index 5ba38c2c7c5ccde526a04ba5462072d0b300920b..a522f1ce24942d3b653162e21211952ba04ea7ba 100644 --- a/arch/powerpc/kernel/livepatch.c +++ b/arch/powerpc/kernel/livepatch.c @@ -136,3 +136,25 @@ int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame) return 0; } + +int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = patch_instruction((struct ppc_inst *)(dst + i), + ppc_inst(src[i])); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return patch_instruction((struct ppc_inst *)dst, ppc_inst(src[0])); +} diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 134b08e12e74a8cccb4e4b8999f667682ae6d094..f2baf13d0d838737dc25a15d990f4269f3944f4b 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -31,7 +31,6 @@ #if defined (CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined (CONFIG_LIVEPATCH_WO_FTRACE) -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #endif @@ -54,191 +53,19 @@ static bool is_jump_insn(u32 insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - -struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; - int ret; -}; - -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - struct klp_func_node *func_node; - struct klp_func_list *pcheck = NULL; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemtion is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func_to_list(check_funcs, &pcheck, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - void notrace klp_walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), struct task_struct *tsk, void *data) @@ -254,29 +81,15 @@ void notrace klp_walk_stackframe(struct stackframe *frame, } } -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args; /* check NIP when the exception stack switching */ - if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + if (frame->nip && !args->check_func(args->data, &args->ret, frame->nip)) return args->ret; if (frame->link && !frame->nip_link_in_same_func && - !check_func_list(check_funcs, &args->ret, frame->link)) + !args->check_func(args->data, &args->ret, frame->link)) return args->ret; /* * There are two cases that frame->pc is reliable: @@ -284,120 +97,132 @@ static int klp_check_jump_func(struct stackframe *frame, void *data) * 2. nip and link are in same function; */ if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (!check_func_list(check_funcs, &args->ret, frame->pc)) + if (!args->check_func(args->data, &args->ret, frame->pc)) return args->ret; } return 0; } -static void free_list(struct klp_func_list **funcs) +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { - struct klp_func_list *p; + struct stackframe frame; + unsigned long *stack; + + if (t == current) { + /* + * Handle the current carefully on each CPUs, we shouldn't + * use saved FP and PC when backtrace current. It's difficult + * to backtrack other CPU currents here. But fortunately, + * all CPUs will stay in this function, so the current's + * backtrace is so similar + */ + stack = (unsigned long *)current_stack_pointer; + } else { + /* + * Skip the first frame since it does not contain lr + * at normal position and nip is stored in the lr + * position in the second frame. + * See arch/powerpc/kernel/entry_32.S _switch . + */ + unsigned long s = *(unsigned long *)t->thread.ksp; - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); + if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) + return 0; + stack = (unsigned long *)s; } + + frame.sp = (unsigned long)stack; + frame.pc = stack[STACK_FRAME_LR_SAVE]; + frame.nip = 0; + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; } static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - unsigned long *stack; + unsigned int cpu; for_each_process_thread(g, t) { - struct stackframe frame = { 0 }; - - if (t == current) { - /* - * Handle the current carefully on each CPUs, we shouldn't - * use saved FP and PC when backtrace current. It's difficult - * to backtrack other CPU currents here. But fortunately, - * all CPUs will stay in this function, so the current's - * backtrace is so similar - */ - stack = (unsigned long *)current_stack_pointer; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - /* - * Skip the first frame since it does not contain lr - * at normal position and nip is stored in the lr - * position in the second frame. - * See arch/powerpc/kernel/entry_32.S _switch . - */ - unsigned long s = *(unsigned long *)t->thread.ksp; - - if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) - continue; - stack = (unsigned long *)s; - } - - frame.sp = (unsigned long)stack; - frame.pc = stack[STACK_FRAME_LR_SAVE]; - klp_walk_stackframe(&frame, fn, t, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; } return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, }; - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); } -static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; /* check NIP when the exception stack switching */ - if (frame->nip && within_module_core(frame->nip, args->mod)) + if (frame->nip && within_module_core(frame->nip, mod)) goto err_out; if (frame->link && !frame->nip_link_in_same_func && - within_module_core(frame->link, args->mod)) + within_module_core(frame->link, mod)) goto err_out; if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (within_module_core(frame->pc, args->mod)) + if (within_module_core(frame->pc, mod)) goto err_out; } return 0; err_out: - pr_err("module %s is in use!\n", args->mod->name); + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); } int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; @@ -432,7 +257,6 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) static int do_patch(unsigned long pc, unsigned long new_addr) { int ret; - int i; u32 insns[LJMP_INSN_SIZE]; if (offset_in_range(pc, new_addr, SZ_32M)) { @@ -456,14 +280,10 @@ static int do_patch(unsigned long pc, unsigned long new_addr) insns[2] = 0x7d8903a6; insns[3] = 0x4e800420; - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(insns[i])); - if (ret) { - pr_err("patch instruction %d large range failed, ret=%d\n", - i, ret); - return -EPERM; - } + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; } } return 0; @@ -487,20 +307,17 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(func_node->arch_data.old_insns[i])); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - return; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, + LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } } else { next_func = list_first_or_null_rcu(&func_node->func_stack, diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index b33839b5916a9dffeb19ce4a511275634a0796b6..a61dff5c5adfe53d57f6f48b0f42b9ed8ade5a6c 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -36,7 +36,6 @@ #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined(CONFIG_LIVEPATCH_WO_FTRACE) -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #endif @@ -59,184 +58,121 @@ static bool is_jump_insn(u32 insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - -struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; - int ret; -}; - -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_func *func, int enable, + klp_add_func_t add_func, struct list_head *func_list) { int ret; - struct klp_object *obj; - struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - struct klp_func_list *pcheck = NULL; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - func_node = klp_find_func_node(func->old_func); - - /* Check func address in stack */ - if (enable) { - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = ppc_function_entry( - (void *)prev->new_func); - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be repalced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the function itself - * which to be unpatched. - */ - func_addr = ppc_function_entry( - (void *)func->new_func); - func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } -#ifdef PPC64_ELF_ABI_v1 + func_node = func->func_node; + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently + * active functions. + */ + if (list_empty(&func_node->func_stack)) { /* - * Check trampoline in stack - * new_func callchain: - * old_func - * -=> trampoline - * -=> new_func - * so, we should check all the func in the callchain + * No patched on this function + * [ the origin one ] */ - if (func_addr != (unsigned long)func->old_func) { + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function + * [ the active one ] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = ppc_function_entry((void *)prev->new_func); + func_size = prev->new_size; + } + /* + * When preemption is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be repalced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + } + } else { + /* + * When disable, check for the function itself + * which to be unpatched. + */ + func_addr = ppc_function_entry((void *)func->new_func); + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + +#ifdef PPC64_ELF_ABI_v1 + /* + * Check trampoline in stack + * new_func callchain: + * old_func + * -=> trampoline + * -=> new_func + * so, we should check all the func in the callchain + */ + if (func_addr != (unsigned long)func->old_func) { #ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, - func_size, "OLD_FUNC", 0); - if (ret) - return ret; + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + ret = add_func(func_list, func_addr, + func_size, "OLD_FUNC", 0); + if (ret) + return ret; #endif - if (func_node == NULL || - func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) - continue; - - func_addr = (unsigned long)&func_node->arch_data.trampoline; - func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func_to_list(check_funcs, &pcheck, func_addr, - func_size, "trampoline", 0); - if (ret) - return ret; - } -#endif - } + if (func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) + return 0; + + func_addr = (unsigned long)&func_node->arch_data.trampoline; + func_size = sizeof(struct ppc64_klp_btramp_entry); + ret = add_func(func_list, func_addr, + func_size, "trampoline", 0); + if (ret) + return ret; } +#endif return 0; } @@ -255,29 +191,15 @@ static void notrace klp_walk_stackframe(struct stackframe *frame, } } -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args; /* check NIP when the exception stack switching */ - if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + if (frame->nip && !args->check_func(args->data, &args->ret, frame->nip)) return args->ret; if (frame->link && !frame->nip_link_in_same_func && - !check_func_list(check_funcs, &args->ret, frame->link)) + !args->check_func(args->data, &args->ret, frame->link)) return args->ret; /* * There are two cases that frame->pc is reliable: @@ -285,124 +207,134 @@ static int klp_check_jump_func(struct stackframe *frame, void *data) * 2. nip and link are in same function; */ if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (!check_func_list(check_funcs, &args->ret, frame->pc)) + if (!args->check_func(args->data, &args->ret, frame->pc)) return args->ret; } return 0; } -static void free_list(struct klp_func_list **funcs) +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { - struct klp_func_list *p; + struct stackframe frame; + unsigned long *stack; - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); + if (t == current) { + /* + * Handle the current carefully on each CPUs, + * we shouldn't use saved FP and PC when + * backtrace current. It's difficult to + * backtrack other CPU currents here. But + * fortunately,all CPUs will stay in this + * function, so the current's backtrace is + * so similar + */ + stack = (unsigned long *)current_stack_pointer; + } else { + /* + * Skip the first frame since it does not contain lr + * at notmal position and nip is store ind the lr + * position in the second frame. + * See arch/powerpc/kernel/entry_64.S _switch . + */ + unsigned long s = *(unsigned long *)t->thread.ksp; + + if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) + return 0; + stack = (unsigned long *)s; } + + frame.sp = (unsigned long)stack; + frame.pc = stack[STACK_FRAME_LR_SAVE]; + frame.nip = 0; + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; } static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - unsigned long *stack; + unsigned int cpu; for_each_process_thread(g, t) { - struct stackframe frame = { 0 }; - - if (t == current) { - /* - * Handle the current carefully on each CPUs, - * we shouldn't use saved FP and PC when - * backtrace current. It's difficult to - * backtrack other CPU currents here. But - * fortunately,all CPUs will stay in this - * function, so the current's backtrace is - * so similar - */ - stack = (unsigned long *)current_stack_pointer; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - /* - * Skip the first frame since it does not contain lr - * at notmal position and nip is store ind the lr - * position in the second frame. - * See arch/powerpc/kernel/entry_64.S _switch . - */ - unsigned long s = *(unsigned long *)t->thread.ksp; - - if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) - continue; - stack = (unsigned long *)s; - } - - frame.sp = (unsigned long)stack; - frame.pc = stack[STACK_FRAME_LR_SAVE]; - klp_walk_stackframe(&frame, fn, t, args); - if (args->ret) { - pr_debug("%s FAILED when %s\n", __func__, - args->enable ? "enabling" : "disabling"); - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; } return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args; + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif - args.check_funcs = check_funcs; - args.ret = 0; - args.enable = enable; - ret = do_check_calltrace(&args, klp_check_jump_func); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); } -static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; /* check NIP when the exception stack switching */ - if (frame->nip && within_module_core(frame->nip, args->mod)) + if (frame->nip && within_module_core(frame->nip, mod)) goto err_out; if (frame->link && !frame->nip_link_in_same_func && - within_module_core(frame->link, args->mod)) + within_module_core(frame->link, mod)) goto err_out; if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (within_module_core(frame->pc, args->mod)) + if (within_module_core(frame->pc, mod)) goto err_out; } return 0; err_out: - pr_err("module %s is in use!\n", args->mod->name); + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); } int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; @@ -467,20 +399,17 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)((u32 *)pc + i), - ppc_inst(func_node->arch_data.old_insns[i])); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - break; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, + LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } pr_debug("[%s %d] restore insns at 0x%lx\n", __func__, __LINE__, pc); diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index ef093691f6063c3b9f4c192d57d6f63de20600ec..d0e4581b0cf0eb6c7e05c21d670b7047bbf521ce 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -817,17 +817,18 @@ int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) * Patch jump stub to reference trampoline * without saved the old R2 and load the new R2. */ -static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, +static int livepatch_create_bstub(void *pc, unsigned long addr, struct module *me) { long reladdr; unsigned long my_r2; unsigned long stub_start, stub_end, stub_size; + struct ppc64_klp_bstub_entry entry; /* Stub uses address relative to r2. */ my_r2 = me ? me->arch.toc : kernel_toc_addr(); - reladdr = (unsigned long)entry - my_r2; + reladdr = (unsigned long)pc - my_r2; if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { pr_err("%s: Address %p of jump stub out of range of %p.\n", me ? me->name : "kernel", @@ -839,15 +840,25 @@ static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, stub_start = ppc_function_entry((void *)livepatch_branch_stub); stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); stub_size = stub_end - stub_start; - memcpy(entry->jump, (u32 *)stub_start, stub_size); + memcpy(entry.jump, (u32 *)stub_start, stub_size); + + entry.jump[0] |= PPC_HA(reladdr); + entry.jump[1] |= PPC_LO(reladdr); + entry.magic = BRANCH_STUB_MAGIC; + entry.trampoline = addr; - entry->jump[0] |= PPC_HA(reladdr); - entry->jump[1] |= PPC_LO(reladdr); - entry->magic = BRANCH_STUB_MAGIC; - entry->trampoline = addr; + /* skip breakpoint at first */ + memcpy(pc + PPC64_INSN_SIZE, (void *)&entry + PPC64_INSN_SIZE, + sizeof(entry) - PPC64_INSN_SIZE); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + memcpy(pc, (void *)&entry, PPC64_INSN_SIZE); pr_debug("Create livepatch branch stub 0x%px with reladdr 0x%lx r2 0x%lx to trampoline 0x%lx\n", - (void *)entry, reladdr, my_r2, addr); + pc, reladdr, my_r2, addr); return 1; } @@ -898,7 +909,7 @@ int livepatch_create_branch(unsigned long pc, #endif /* Create stub to trampoline */ - if (!livepatch_create_bstub((struct ppc64_klp_bstub_entry *)pc, trampoline, me)) + if (!livepatch_create_bstub((void *)pc, trampoline, me)) return -EINVAL; return 0; diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 1835f38f2947ccbf8c017d1d72656c378fa04c54..3335c83a4ee73634f8a56e738d99a29a3cf51bfb 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -505,6 +505,7 @@ CONFIG_LIVEPATCH_WO_FTRACE=y CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y # CONFIG_LIVEPATCH_STACK is not set CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +CONFIG_LIVEPATCH_ISOLATE_KPROBE=y # end of Enable Livepatch # end of Processor type and features diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index e2cef5b2d8aa1a86983efca26a8ae756e822d674..5ffd1de9ce4866fd52b32fd4d7b8282a2f7f5957 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -26,11 +26,6 @@ int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); #endif -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - - #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined(CONFIG_LIVEPATCH_WO_FTRACE) diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 70fc159ebe6959fead369c46d8a143812a1bc058..fbce1e35d406623d92b54fddef95c10d71590780 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -99,6 +100,31 @@ void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {} #endif +static inline +unsigned long unwind_recover_kretprobe(struct unwind_state *state, + unsigned long addr, unsigned long *addr_p) +{ +#ifdef CONFIG_KRETPROBES + return is_kretprobe_trampoline(addr) ? + kretprobe_find_ret_addr(state->task, addr_p) : + addr; +#else + return addr; +#endif +} + +/* Recover the return address modified by kretprobe and ftrace_graph. */ +static inline +unsigned long unwind_recover_ret_addr(struct unwind_state *state, + unsigned long addr, unsigned long *addr_p) +{ + unsigned long ret; + + ret = ftrace_graph_ret_addr(state->task, &state->graph_idx, + addr, addr_p); + return unwind_recover_kretprobe(state, ret, addr_p); +} + /* * This disables KASAN checking when reading a value from another task's stack, * since the other task could be running on another CPU and could have poisoned diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 43404fc1fdbb4700ede192d02170662f2c5f8ce2..99b72629637d2dedc4e4772839fbb419254ae119 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -52,25 +52,7 @@ static bool is_jump_insn(u8 *insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > JMP_E9_INSN_SIZE) - size = JMP_E9_INSN_SIZE; - return size; -} - -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { int len = JMP_E9_INSN_SIZE; struct insn insn; @@ -90,148 +72,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - struct klp_func_node *func_node = NULL; - struct klp_func_list *pcheck = NULL; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - func_node = klp_find_func_node(func->old_func); - /* Check func address in stack */ - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - if (!func_node || - list_empty(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [the active one] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemtion is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, - func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the function - * itself which to be unpatched. - */ - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func_to_list(check_funcs, &pcheck, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - static void klp_print_stack_trace(void *trace_ptr, int trace_len) { int i; @@ -263,21 +103,6 @@ static void klp_print_stack_trace(void *trace_ptr, int trace_len) #endif #define MAX_STACK_ENTRIES 100 -static bool check_func_list(void *data, int *ret, unsigned long pc) -{ - struct klp_func_list *funcs = (struct klp_func_list *)data; - - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - static int klp_check_stack(void *trace_ptr, int trace_len, bool (*fn)(void *, int *, unsigned long), void *data) { @@ -309,20 +134,10 @@ static int klp_check_stack(void *trace_ptr, int trace_len, return 0; } -static void free_list(struct klp_func_list **funcs) +static int check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) { - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } -} - -static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) -{ - struct task_struct *g, *t; int ret = 0; static unsigned long trace_entries[MAX_STACK_ENTRIES]; #ifdef CONFIG_ARCH_STACKWALK @@ -331,62 +146,56 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da struct stack_trace trace; #endif - for_each_process_thread(g, t) { - if (klp_is_migration_thread(t->comm)) - continue; - #ifdef CONFIG_ARCH_STACKWALK - ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); - if (ret < 0) { - pr_err("%s:%d has an unreliable stack, ret=%d\n", - t->comm, t->pid, ret); - return ret; - } - trace_len = ret; - ret = klp_check_stack(trace_entries, trace_len, fn, data); + ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); + if (ret < 0) { + pr_err("%s:%d has an unreliable stack, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + trace_len = ret; + ret = klp_check_stack(trace_entries, trace_len, fn, data); #else - trace.skip = 0; - trace.nr_entries = 0; - trace.max_entries = MAX_STACK_ENTRIES; - trace.entries = trace_entries; - ret = save_stack_trace_tsk_reliable(t, &trace); - WARN_ON_ONCE(ret == -ENOSYS); - if (ret) { - pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", - __func__, t->comm, t->pid, ret); - return ret; - } - ret = klp_check_stack(&trace, 0, fn, data); + trace.skip = 0; + trace.nr_entries = 0; + trace.max_entries = MAX_STACK_ENTRIES; + trace.entries = trace_entries; + ret = save_stack_trace_tsk_reliable(t, &trace); + if (ret) { + pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", + __func__, t->comm, t->pid, ret); + return ret; + } + ret = klp_check_stack(&trace, 0, fn, data); #endif - if (ret) { - pr_err("%s:%d check stack failed, ret=%d\n", - t->comm, t->pid, ret); - return ret; - } + if (ret) { + pr_err("%s:%d check stack failed, ret=%d\n", + t->comm, t->pid, ret); + return ret; } - return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) { int ret = 0; - struct klp_func_list *check_funcs = NULL; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - - if (!check_funcs) - goto out; + struct task_struct *g, *t; + unsigned int cpu; - ret = do_check_calltrace(check_func_list, (void *)check_funcs); + for_each_process_thread(g, t) { + if (klp_is_migration_thread(t->comm)) + continue; -out: - free_list(&check_funcs); - return ret; + ret = check_task_calltrace(t, fn, data); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), fn, data); + if (ret) + return ret; + } + return 0; } static bool check_module_calltrace(void *data, int *ret, unsigned long pc) @@ -401,6 +210,22 @@ static bool check_module_calltrace(void *data, int *ret, unsigned long pc) return true; } +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) +{ + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, fn, data); +} +#endif + +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + return do_check_calltrace(check_func, data); +} + int arch_klp_module_check_calltrace(void *data) { return do_check_calltrace(check_module_calltrace, data); @@ -507,6 +332,21 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } +static void klp_patch_text(void *dst, const void *src, int len) +{ + if (len <= 1) + return; + /* skip breakpoint at first */ + text_poke(dst + 1, src + 1, len - 1); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + /* update jmp opcode */ + text_poke(dst, src, 1); +} + int arch_klp_patch_func(struct klp_func *func) { struct klp_func_node *func_node; @@ -519,15 +359,7 @@ int arch_klp_patch_func(struct klp_func *func) new_addr = (unsigned long)func->new_func; /* replace the text with the new text */ new = (unsigned char *)klp_jmp_code(ip, new_addr); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY - /* update jmp offset */ - text_poke((void *)(ip + 1), new + 1, JMP_E9_INSN_SIZE - 1); - /* update jmp opcode */ - text_poke((void *)ip, new, 1); -#else - text_poke((void *)ip, new, JMP_E9_INSN_SIZE); -#endif - + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); return 0; } @@ -552,6 +384,6 @@ void arch_klp_unpatch_func(struct klp_func *func) } /* replace the text with the new text */ - text_poke((void *)ip, new, JMP_E9_INSN_SIZE); + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); } #endif diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index d7c44b257f7f4ea66bf4a49333fccb32f2c55d08..8e1c50c86e5db161c38cd1c0a630f0372c828ecf 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -240,8 +240,7 @@ static bool update_stack_state(struct unwind_state *state, else { addr_p = unwind_get_return_address_ptr(state); addr = READ_ONCE_TASK_STACK(state->task, *addr_p); - state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - addr, addr_p); + state->ip = unwind_recover_ret_addr(state, addr, addr_p); } /* Save the original stack pointer for unwind_dump(): */ diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c index c49f10ffd8cded385e9c0e26cc65406de78a925b..884d68a6e714e16b2a4dc2a8cb12301de6d8402a 100644 --- a/arch/x86/kernel/unwind_guess.c +++ b/arch/x86/kernel/unwind_guess.c @@ -15,8 +15,7 @@ unsigned long unwind_get_return_address(struct unwind_state *state) addr = READ_ONCE_NOCHECK(*state->sp); - return ftrace_graph_ret_addr(state->task, &state->graph_idx, - addr, state->sp); + return unwind_recover_ret_addr(state, addr, state->sp); } EXPORT_SYMBOL_GPL(unwind_get_return_address); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 9452ba625f6704b1b928f752c2fb10f7a09dbf03..e1105f169bcc994b04cc7e663970d318a162a1c7 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -579,9 +579,8 @@ bool unwind_next_frame(struct unwind_state *state) if (!deref_stack_reg(state, ip_p, &state->ip)) goto err; - state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - state->ip, (void *)ip_p); - + state->ip = unwind_recover_ret_addr(state, state->ip, + (unsigned long *)ip_p); state->sp = sp; state->regs = NULL; state->prev_regs = NULL; @@ -594,7 +593,18 @@ bool unwind_next_frame(struct unwind_state *state) (void *)orig_ip); goto err; } - + /* + * There is a small chance to interrupt at the entry of + * __kretprobe_trampoline() where the ORC info doesn't exist. + * That point is right after the RET to __kretprobe_trampoline() + * which was modified return address. + * At that point, the @addr_p of the unwind_recover_kretprobe() + * (this has to point the address of the stack entry storing + * the modified return address) must be "SP - (a stack entry)" + * because SP is incremented by the RET. + */ + state->ip = unwind_recover_kretprobe(state, state->ip, + (unsigned long *)(state->sp - sizeof(long))); state->regs = (struct pt_regs *)sp; state->prev_regs = NULL; state->full_regs = true; @@ -607,6 +617,9 @@ bool unwind_next_frame(struct unwind_state *state) (void *)orig_ip); goto err; } + /* See UNWIND_HINT_TYPE_REGS case comment. */ + state->ip = unwind_recover_kretprobe(state, state->ip, + (unsigned long *)(state->sp - sizeof(long))); if (state->full_regs) state->prev_regs = state->regs; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 18b7c40ffb37adce316d7cdb0c248712933dcbbd..28a6e871dac61fae349d39d10900e491c03650a0 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -200,6 +200,12 @@ extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs); extern int arch_trampoline_kprobe(struct kprobe *p); +void kretprobe_trampoline(void); +static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) +{ + return !in_nmi() && (void *)addr == &kretprobe_trampoline; +} + /* If the trampoline handler called from a kprobe, use this version */ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, void *trampoline_address, @@ -223,6 +229,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs, return ret; } +unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp); #else /* CONFIG_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) @@ -232,6 +239,15 @@ static inline int arch_trampoline_kprobe(struct kprobe *p) { return 0; } +static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) +{ + return false; +} +static nokprobe_inline +unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp) +{ + return 0; +} #endif /* CONFIG_KRETPROBES */ extern struct kretprobe_blackpoint kretprobe_blacklist[]; @@ -496,6 +512,14 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void); +void kprobes_unlock(void); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void kprobes_lock(void) { } +static inline void kprobes_unlock(void) { } +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + /* Returns true if kprobes handled the fault */ static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, unsigned int trap) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 56ad1c1dd83eb7c2b7e5ce2df674521224f1ee19..ad402ea7a3e48423dcf1d89fc8d690ee9d7a5dba 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -239,9 +239,6 @@ struct klp_func_node { void *brk_func; }; -struct klp_func_node *klp_find_func_node(const void *old_func); -void klp_add_func_node(struct klp_func_node *func_node); -void klp_del_func_node(struct klp_func_node *func_node); void *klp_get_brk_func(void *addr); static inline @@ -259,6 +256,16 @@ int klp_compare_address(unsigned long pc, unsigned long func_addr, void arch_klp_init(void); int klp_module_delete_safety_check(struct module *mod); +typedef int (*klp_add_func_t)(struct list_head *func_list, + unsigned long func_addr, unsigned long func_size, + const char *func_name, int force); + +struct walk_stackframe_args { + void *data; + int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); +}; + #endif int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, @@ -309,7 +316,11 @@ static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +void klp_copy_process(struct task_struct *child); +#else static inline void klp_copy_process(struct task_struct *child) {} +#endif static inline bool klp_have_reliable_stack(void) { return true; } #ifndef klp_smp_isb @@ -353,4 +364,17 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, #endif /* CONFIG_LIVEPATCH */ +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void); +void klp_unlock(void); +int klp_check_patched(unsigned long addr); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void klp_lock(void) { } +static inline void klp_unlock(void) { } +static inline int klp_check_patched(unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 325a81f9c38f534a9d12888b01b90ad069e7b435..0ad524beaba282d4a3031e53d994aef769fbb6f5 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -42,7 +42,9 @@ #include #include #include - +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +#include +#endif #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) @@ -1646,10 +1648,17 @@ static int check_kprobe_address_safe(struct kprobe *p, jump_label_lock(); preempt_disable(); - /* Ensure it is not in reserved area nor out of text */ - if (!(core_kernel_text((unsigned long) p->addr) || - is_module_text_address((unsigned long) p->addr)) || - in_gate_area_no_mm((unsigned long) p->addr) || + /* Ensure the address is in a text area, and find a module if exists. */ + *probed_mod = NULL; + if (!core_kernel_text((unsigned long) p->addr)) { + *probed_mod = __module_text_address((unsigned long) p->addr); + if (!(*probed_mod)) { + ret = -EINVAL; + goto out; + } + } + /* Ensure it is not in reserved area. */ + if (in_gate_area_no_mm((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr) || static_call_text_reserved(p->addr, p->addr) || @@ -1659,8 +1668,7 @@ static int check_kprobe_address_safe(struct kprobe *p, goto out; } - /* Check if are we probing a module */ - *probed_mod = __module_text_address((unsigned long) p->addr); + /* Get module refcount and reject __init functions for loaded modules. */ if (*probed_mod) { /* * We must hold a refcount of the probed module while updating @@ -1689,6 +1697,18 @@ static int check_kprobe_address_safe(struct kprobe *p, return ret; } +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void) +{ + mutex_lock(&kprobe_mutex); +} + +void kprobes_unlock(void) +{ + mutex_unlock(&kprobe_mutex); +} +#endif + int register_kprobe(struct kprobe *p) { int ret; @@ -1716,6 +1736,12 @@ int register_kprobe(struct kprobe *p) return ret; mutex_lock(&kprobe_mutex); +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_lock(); + ret = klp_check_patched((unsigned long)p->addr); + if (ret) + goto out; +#endif old_p = get_kprobe(p->addr); if (old_p) { @@ -1749,6 +1775,9 @@ int register_kprobe(struct kprobe *p) /* Try to optimize kprobe */ try_to_optimize_kprobe(p); out: +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_unlock(); +#endif mutex_unlock(&kprobe_mutex); if (probed_mod) @@ -1955,6 +1984,29 @@ unsigned long __weak arch_deref_entry_point(void *entry) #ifdef CONFIG_KRETPROBES +unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head; + unsigned long flags; + kprobe_opcode_t *correct_ret_addr = NULL; + + kretprobe_hash_lock(tsk, &head, &flags); + hlist_for_each_entry(ri, head, hlist) { + if (ri->task != tsk) + continue; + if (ri->fp != fp) + continue; + if (!is_kretprobe_trampoline((unsigned long)ri->ret_addr)) { + correct_ret_addr = ri->ret_addr; + break; + } + } + kretprobe_hash_unlock(tsk, &flags); + return (unsigned long)correct_ret_addr; +} +NOKPROBE_SYMBOL(kretprobe_find_ret_addr); + unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, void *trampoline_address, void *frame_pointer) diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 297ca41c695e0adb1d30518f2ce0f7a9a3ce688b..55f8ba227e6b1076ee0d089946c9f1d3a287f921 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -98,5 +98,30 @@ config LIVEPATCH_RESTRICT_KPROBE We should not patch for the functions where registered with kprobe, and vice versa. Say Y here if you want to check those. + +config LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE + bool "No stop_machine in breakpoint optimization mode" + depends on LIVEPATCH_WO_FTRACE + default n + help + In breakpoint optimization mode, check tasks calltrace + in batches without using stop machine so that reduce the + service downtime. + Say N if you are unsure. + +config LIVEPATCH_ISOLATE_KPROBE + bool "Isolating livepatch and kprobe" + depends on LIVEPATCH_RESTRICT_KPROBE + depends on DYNAMIC_FTRACE && (X86_64 || ARM64) + default n + help + Kprobe and livepatch_wo may modify the first several instructions of + a function at the same time which causing a conflict. Since dynamic + ftrace reserve instructions at non-notrace functions, we can allow + kprobe works on the reserved instructions and livepatch_wo work on + other instructions so as to avoid the conflict. But note that we also + do not allow both modify the same instruction when a function is + marked as 'notrace' and without the reserved instructions. + endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 978dcede41a1856a815a320f85c4f642710c3807..3f3fa1c3c80cb01d597f3481b2327ce8ebed8751 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -97,6 +97,42 @@ static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) } #endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */ +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void) +{ + mutex_lock(&klp_mutex); +} + +void klp_unlock(void) +{ + mutex_unlock(&klp_mutex); +} + +int klp_check_patched(unsigned long addr) +{ + struct klp_patch *patch; + struct klp_object *obj; + struct klp_func *func; + + lockdep_assert_held(&klp_mutex); + list_for_each_entry(patch, &klp_patches, list) { + if (!patch->enabled) + continue; + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + unsigned long old_func = (unsigned long)func->old_func; + + if (addr >= old_func && addr < old_func + func->old_size) { + pr_err("func %pS has been livepatched\n", (void *)addr); + return -EINVAL; + } + } + } + } + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -483,6 +519,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, patch = container_of(kobj, struct klp_patch, kobj); +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_lock(); +#endif mutex_lock(&klp_mutex); if (!klp_is_patch_registered(patch)) { @@ -507,7 +546,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, out: mutex_unlock(&klp_mutex); - +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_unlock(); +#endif if (ret) return ret; return count; @@ -1028,6 +1069,11 @@ static int klp_init_object_loaded(struct klp_patch *patch, module_enable_ro(patch->mod, true); klp_for_each_func(obj, func) { +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + unsigned long old_func; + unsigned long ftrace_loc; +#endif + ret = klp_find_object_symbol(obj->name, func->old_name, func->old_sympos, (unsigned long *)&func->old_func); @@ -1041,6 +1087,20 @@ static int klp_init_object_loaded(struct klp_patch *patch, func->old_name); return -ENOENT; } +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + old_func = (unsigned long)func->old_func; + ftrace_loc = ftrace_location_range(old_func, old_func + func->old_size - 1); + if (ftrace_loc) { + if (WARN_ON(ftrace_loc < old_func || + ftrace_loc >= old_func + func->old_size - MCOUNT_INSN_SIZE)) { + pr_err("ftrace location for '%s' invalid", func->old_name); + return -EINVAL; + } + func->old_func = (void *)(ftrace_loc + MCOUNT_INSN_SIZE); + func->old_size -= ((unsigned long)func->old_func - old_func); + } +#endif + #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY if (func->old_size < KLP_MAX_REPLACE_SIZE) { pr_err("%s size less than limit (%lu < %zu)\n", func->old_name, @@ -1339,18 +1399,219 @@ static int __klp_disable_patch(struct klp_patch *patch) return 0; } #elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) -int __weak klp_check_calltrace(struct klp_patch *patch, int enable) +int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + return -EINVAL; +} + +bool __weak arch_check_jump_insn(unsigned long func_addr) { + return true; +} + +int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, + klp_add_func_t add_func, + struct list_head *func_list) +{ + int ret; + unsigned long func_addr = 0; + unsigned long func_size; + struct klp_func_node *func_node = NULL; + unsigned long old_func = (unsigned long)func->old_func; + + func_node = func->func_node; + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently active functions. + */ + if (list_empty(&func_node->func_stack)) { + /* + * Not patched on this function [the origin one] + */ + func_addr = old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function [the active one] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + /* + * When preemption is disabled and the replacement area + * does not contain a jump instruction, the migration + * thread is scheduled to run stop machine only after the + * execution of instructions to be replaced is complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || + (func->force == KLP_NORMAL_FORCE) || + arch_check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + if (func_addr != old_func) { + ret = add_func(func_list, old_func, KLP_MAX_REPLACE_SIZE, + func->old_name, func->force); + if (ret) + return ret; + } + } + } else { +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement instructions. Therefore, + * when preemption is not enabled, atomic execution is performed + * and these instructions will not appear on the stack. + */ + if (list_is_singular(&func_node->func_stack)) { + func_addr = old_func; + func_size = func->old_size; + } else { + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + if (func_addr != old_func) { + ret = add_func(func_list, old_func, KLP_MAX_REPLACE_SIZE, + func->old_name, 0); + if (ret) + return ret; + } +#endif + + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + return 0; +} + +static inline unsigned long klp_size_to_check(unsigned long func_size, + int force) +{ + unsigned long size = func_size; + + if (force == KLP_STACK_OPTIMIZE && size > KLP_MAX_REPLACE_SIZE) + size = KLP_MAX_REPLACE_SIZE; + return size; +} + +struct actv_func { + struct list_head list; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; + +static bool check_func_list(void *data, int *ret, unsigned long pc) +{ + struct list_head *func_list = (struct list_head *)data; + struct actv_func *func = NULL; + + list_for_each_entry(func, func_list, list) { + *ret = klp_compare_address(pc, func->func_addr, func->func_name, + klp_size_to_check(func->func_size, func->force)); + if (*ret) + return false; + } + return true; +} + +static int add_func_to_list(struct list_head *func_list, unsigned long func_addr, + unsigned long func_size, const char *func_name, + int force) +{ + struct actv_func *func = kzalloc(sizeof(struct actv_func), GFP_ATOMIC); + + if (!func) + return -ENOMEM; + func->func_addr = func_addr; + func->func_size = func_size; + func->func_name = func_name; + func->force = force; + list_add_tail(&func->list, func_list); + return 0; +} + +static void free_func_list(struct list_head *func_list) +{ + struct actv_func *func = NULL; + struct actv_func *tmp = NULL; + + list_for_each_entry_safe(func, tmp, func_list, list) { + list_del(&func->list); + kfree(func); + } +} + +static int klp_check_activeness_func(struct klp_patch *patch, int enable, + struct list_head *func_list) +{ + int ret; + struct klp_object *obj = NULL; + struct klp_func *func = NULL; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = arch_klp_check_activeness_func(func, enable, + add_func_to_list, + func_list); + if (ret) + return ret; + } + } return 0; } +static int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + LIST_HEAD(func_list); + + ret = klp_check_activeness_func(patch, enable, &func_list); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + + if (list_empty(&func_list)) + goto out; + + ret = arch_klp_check_calltrace(check_func_list, (void *)&func_list); + +out: + free_func_list(&func_list); + return ret; +} + static LIST_HEAD(klp_func_list); /* * The caller must ensure that the klp_mutex lock is held or is in the rcu read * critical area. */ -struct klp_func_node *klp_find_func_node(const void *old_func) +static struct klp_func_node *klp_find_func_node(const void *old_func) { struct klp_func_node *func_node; @@ -1363,12 +1624,12 @@ struct klp_func_node *klp_find_func_node(const void *old_func) return NULL; } -void klp_add_func_node(struct klp_func_node *func_node) +static void klp_add_func_node(struct klp_func_node *func_node) { list_add_rcu(&func_node->node, &klp_func_list); } -void klp_del_func_node(struct klp_func_node *func_node) +static void klp_del_func_node(struct klp_func_node *func_node) { list_del_rcu(&func_node->node); } @@ -1965,17 +2226,184 @@ static bool klp_use_breakpoint(struct klp_patch *patch) return true; } -static int klp_breakpoint_optimize(struct klp_patch *patch) +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +#include +#include "../sched/sched.h" + +int __weak arch_klp_check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) { - int ret; + return -EINVAL; +} + +/* Called from copy_process() during fork */ +void klp_copy_process(struct task_struct *child) +{ + child->patch_state = current->patch_state; +} + +static void set_tasks_patch_state(int patch_state) +{ + unsigned int cpu; + struct task_struct *g, *task; + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + task->patch_state = patch_state; + } + read_unlock(&tasklist_lock); + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + task->patch_state = patch_state; + } + put_online_cpus(); +} + +static void update_patch_state(struct task_struct *task, struct list_head *func_list) +{ + struct rq *rq; + struct rq_flags flags; + + if (task->patch_state == KLP_PATCHED) + return; + WARN_ON_ONCE(task->patch_state != KLP_UNPATCHED); + rq = task_rq_lock(task, &flags); + if (task_running(rq, task) && task != current) + goto done; + if (arch_klp_check_task_calltrace(task, check_func_list, (void *)func_list)) + goto done; + task->patch_state = KLP_PATCHED; +done: + task_rq_unlock(rq, task, &flags); +} + +#ifdef CONFIG_SMP +static void check_task_calltrace_ipi(void *func_list) +{ + if (current->patch_state == KLP_PATCHED) + return; + if (arch_klp_check_task_calltrace(current, check_func_list, func_list)) + return; + current->patch_state = KLP_PATCHED; +} + +static void update_patch_state_ipi(struct list_head *func_list) +{ + unsigned int cpu; + unsigned int curr_cpu; + + preempt_disable(); + curr_cpu = smp_processor_id(); + for_each_online_cpu(cpu) { + if (cpu == curr_cpu) + continue; + smp_call_function_single(cpu, check_task_calltrace_ipi, func_list, 1); + } + preempt_enable(); +} +#endif + +static void update_tasks_patch_state(struct list_head *func_list) +{ + unsigned int cpu; + struct task_struct *g, *task; + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) + update_patch_state(task, func_list); + read_unlock(&tasklist_lock); + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + if (cpu_online(cpu)) { + update_patch_state(task, func_list); + } else if (task->patch_state != KLP_PATCHED) { + /* offline idle tasks can be directly updated */ + task->patch_state = KLP_PATCHED; + } + } + put_online_cpus(); +#ifdef CONFIG_SMP + update_patch_state_ipi(func_list); +#endif +} + +static bool is_patchable(void) +{ + unsigned int cpu; + struct task_struct *g, *task; + int patchable = true; + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + WARN_ON_ONCE(task->patch_state == KLP_UNDEFINED); + if (task->patch_state != KLP_PATCHED) { + put_online_cpus(); + return false; + } + } + put_online_cpus(); + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + WARN_ON_ONCE(task->patch_state == KLP_UNDEFINED); + if (task->patch_state != KLP_PATCHED) { + patchable = false; + goto out_unlock; + } + } +out_unlock: + read_unlock(&tasklist_lock); + return patchable; +} + +static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) +{ + LIST_HEAD(func_list); + int ret = -EINVAL; int i; - int cnt = 0; + int retry_cnt = 0; - ret = klp_add_breakpoint(patch); + ret = klp_check_activeness_func(patch, true, &func_list); if (ret) { - pr_err("failed to add breakpoints, ret=%d\n", ret); - return ret; + pr_err("break optimize collecting active functions failed, ret=%d\n", ret); + goto out; + } + + set_tasks_patch_state(KLP_UNPATCHED); + + for (i = 0; i < KLP_RETRY_COUNT; i++) { + retry_cnt++; + + update_tasks_patch_state(&func_list); + if (is_patchable()) { + arch_klp_code_modify_prepare(); + ret = enable_patch(patch, true); + arch_klp_code_modify_post_process(); + break; + } + ret = -EAGAIN; + pr_notice("try again in %d ms\n", KLP_RETRY_INTERVAL); + msleep(KLP_RETRY_INTERVAL); } + set_tasks_patch_state(KLP_UNDEFINED); +out: + free_func_list(&func_list); + *cnt = retry_cnt; + return ret; +} + +#else /* !CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE */ + +static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) +{ + int ret = -EINVAL; + int i; + int retry_cnt = 0; for (i = 0; i < KLP_RETRY_COUNT; i++) { struct patch_data patch_data = { @@ -1987,20 +2415,37 @@ static int klp_breakpoint_optimize(struct klp_patch *patch) if (i == KLP_RETRY_COUNT - 1) patch_data.rollback = true; - cnt++; + retry_cnt++; ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); if (!ret || ret != -EAGAIN) break; - pr_notice("try again in %d ms.\n", KLP_RETRY_INTERVAL); + pr_notice("try again in %d ms\n", KLP_RETRY_INTERVAL); msleep(KLP_RETRY_INTERVAL); } + *cnt = retry_cnt; + return ret; +} +#endif /* CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE */ + +static int klp_breakpoint_optimize(struct klp_patch *patch) +{ + int ret; + int cnt = 0; + + ret = klp_add_breakpoint(patch); + if (ret) { + pr_err("failed to add breakpoints, ret=%d\n", ret); + return ret; + } + + ret = klp_breakpoint_enable_patch(patch, &cnt); + pr_notice("patching %s, tried %d times, ret=%d.\n", ret ? "failed" : "success", cnt, ret); - /* * If the patch is enabled successfully, the breakpoint instruction * has been replaced with the jump instruction. However, if the patch diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b2888890add04928e772a3e9e9bb2196d844af21..6b7deaaecc51a42eed5ebc10abae53fa5a5c5339 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1567,12 +1567,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) unsigned long ftrace_location_range(unsigned long start, unsigned long end) { struct dyn_ftrace *rec; + unsigned long ip = 0; + rcu_read_lock(); rec = lookup_rec(start, end); if (rec) - return rec->ip; + ip = rec->ip; + rcu_read_unlock(); - return 0; + return ip; } /** @@ -1585,25 +1588,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end) */ unsigned long ftrace_location(unsigned long ip) { - struct dyn_ftrace *rec; + unsigned long loc; unsigned long offset; unsigned long size; - rec = lookup_rec(ip, ip); - if (!rec) { + loc = ftrace_location_range(ip, ip); + if (!loc) { if (!kallsyms_lookup_size_offset(ip, &size, &offset)) goto out; /* map sym+0 to __fentry__ */ if (!offset) - rec = lookup_rec(ip, ip + size - 1); + loc = ftrace_location_range(ip, ip + size - 1); } - if (rec) - return rec->ip; - out: - return 0; + return loc; } /** @@ -6347,6 +6347,8 @@ static int ftrace_process_locs(struct module *mod, /* We should have used all pages unless we skipped some */ if (pg_unuse) { WARN_ON(!skipped); + /* Need to synchronize with ftrace_location_range() */ + synchronize_rcu(); ftrace_free_pages(pg_unuse); } return ret; @@ -6529,6 +6531,9 @@ void ftrace_release_mod(struct module *mod) out_unlock: mutex_unlock(&ftrace_lock); + /* Need to synchronize with ftrace_location_range() */ + if (tmp_page) + synchronize_rcu(); for (pg = tmp_page; pg; pg = tmp_page) { /* Needs to be called outside of ftrace_lock */ @@ -6851,6 +6856,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) unsigned long start = (unsigned long)(start_ptr); unsigned long end = (unsigned long)(end_ptr); struct ftrace_page **last_pg = &ftrace_pages_start; + struct ftrace_page *tmp_page = NULL; struct ftrace_page *pg; struct dyn_ftrace *rec; struct dyn_ftrace key; @@ -6894,12 +6900,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) ftrace_update_tot_cnt--; if (!pg->index) { *last_pg = pg->next; - if (pg->records) { - free_pages((unsigned long)pg->records, pg->order); - ftrace_number_of_pages -= 1 << pg->order; - } - ftrace_number_of_groups--; - kfree(pg); + pg->next = tmp_page; + tmp_page = pg; pg = container_of(last_pg, struct ftrace_page, next); if (!(*last_pg)) ftrace_pages = pg; @@ -6916,6 +6918,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) clear_func_from_hashes(func); kfree(func); } + /* Need to synchronize with ftrace_location_range() */ + if (tmp_page) { + synchronize_rcu(); + ftrace_free_pages(tmp_page); + } } void __init ftrace_free_init_mem(void)