diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 2b59cff8be1798cf3ecb947cf3203ef1d3300ad2..65e0556064d681830286870534651433034fc8fd 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -2111,7 +2111,7 @@ permissions on the task specified to change its timerslack_ns value. 3.11 /proc//patch_state - Livepatch patch operation state ----------------------------------------------------------------- -When CONFIG_LIVEPATCH is enabled, this file displays the value of the +When CONFIG_LIVEPATCH_FTRACE is enabled, this file displays the value of the patch state for the task. A value of '-1' indicates that no patch is in transition. diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9557808e8937b1348cf4a7466ce28059fa5df14e..e093c9ba408a4c8e1bf52bd13024f46b24f74096 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -139,6 +139,7 @@ config ARM select RTC_LIB select SPARSE_IRQ if !(ARCH_FOOTBRIDGE || ARCH_RPC) select SYS_SUPPORTS_APM_EMULATION + select HAVE_LIVEPATCH_WO_FTRACE select THREAD_INFO_IN_TASK select TIMER_OF if OF select HAVE_ARCH_VMAP_STACK if MMU && ARM_HAS_GROUP_RELOCS @@ -1811,3 +1812,5 @@ config ARCH_HIBERNATION_POSSIBLE endmenu source "arch/arm/Kconfig.assembler" + +source "kernel/livepatch/Kconfig" diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h new file mode 100644 index 0000000000000000000000000000000000000000..892231ca55ce3f02715519e9039be04f0bd728eb --- /dev/null +++ b/arch/arm/include/asm/livepatch.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * livepatch.h - arm-specific Kernel Live Patching Core + * + * Copyright (C) 2018 Huawei Technologies Co., Ltd. + * Copyright (C) 2023 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _ASM_ARM_LIVEPATCH_H +#define _ASM_ARM_LIVEPATCH_H + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + +#ifdef CONFIG_ARM_MODULE_PLTS +#define LJMP_INSN_SIZE 3 +#else +#define LJMP_INSN_SIZE 1 +#endif /* CONFIG_ARM_MODULE_PLTS */ + +struct arch_klp_data { + u32 old_insns[LJMP_INSN_SIZE]; +}; + +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + +struct klp_func; + +/* kernel livepatch instruction barrier */ +#define klp_smp_isb() isb() +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +bool arch_check_jump_insn(unsigned long func_addr); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data); + +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + +#endif /* _ASM_ARM_LIVEPATCH_H */ diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 07c51a34f77d5fc666237132d3c9d292bbe160b8..76130564683126203aa5abdc87f32b39308cc784 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -21,6 +21,7 @@ struct plt_entries { struct mod_plt_sec { struct elf32_shdr *plt; struct plt_entries *plt_ent; + int plt_shndx; int plt_count; }; @@ -36,7 +37,8 @@ struct mod_arch_specific { }; struct module; -u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val); +u32 get_module_plt(struct module *mod, Elf32_Shdr *sechdrs, + unsigned long loc, Elf32_Addr val); #ifdef CONFIG_ARM_MODULE_PLTS bool in_module_plt(unsigned long loc); #else diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index d53f56d6f840857a838517586f10ceb12f05412b..3d1f8ff10a86936b1ed113157cfc5cee1400dba3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o +obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o insn.o patch.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o # Main staffs in KPROBES are in arch/arm/probes/ . diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index a0b6d1e3812fdba8f25aa4b59406044904150526..d99874cc9b2e35ae9d24a2b118651ecbca562d60 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -158,7 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) new = ftrace_call_replace(ip, aaddr, !mod); #ifdef CONFIG_ARM_MODULE_PLTS if (!new && mod) { - aaddr = get_module_plt(mod, ip, aaddr); + aaddr = get_module_plt(mod, NULL, ip, aaddr); new = ftrace_call_replace(ip, aaddr, true); } #endif @@ -204,7 +204,7 @@ int ftrace_make_nop(struct module *mod, !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod); #ifdef CONFIG_ARM_MODULE_PLTS if (!old && mod) { - aaddr = get_module_plt(mod, ip, aaddr); + aaddr = get_module_plt(mod, NULL, ip, aaddr); old = ftrace_call_replace(ip, aaddr, true); } #endif diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c new file mode 100644 index 0000000000000000000000000000000000000000..7a5d2cfde32e70011b004d403a3d84890bc88b6a --- /dev/null +++ b/arch/arm/kernel/livepatch.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * livepatch.c - arm-specific Kernel Live Patching Core + * + * Copyright (C) 2018 Huawei Technologies Co., Ltd. + * Copyright (C) 2023 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef ARM_INSN_SIZE +#error "ARM_INSN_SIZE have been redefined, please check" +#else +#define ARM_INSN_SIZE 4 +#endif + +#define CHECK_JUMP_RANGE LJMP_INSN_SIZE + +/* + * The instruction set on arm is A32. + * The instruction of BL is xxxx1011xxxxxxxxxxxxxxxxxxxxxxxx, and first four + * bits could not be 1111. + * The instruction of BLX(immediate) is 1111101xxxxxxxxxxxxxxxxxxxxxxxxx. + * The instruction of BLX(register) is xxxx00010010xxxxxxxxxxxx0011xxxx, and + * first four bits could not be 1111. + */ +static bool is_jump_insn(u32 insn) +{ + if (((insn & 0x0f000000) == 0x0b000000) && + ((insn & 0xf0000000) != 0xf0000000)) + return true; + if ((insn & 0xfe000000) == 0xfa000000) + return true; + if (((insn & 0x0ff000f0) == 0x01200030) && + ((insn & 0xf0000000) != 0xf0000000)) + return true; + return false; +} + +bool arch_check_jump_insn(unsigned long func_addr) +{ + unsigned long i; + u32 *insn = (u32*)func_addr; + + for (i = 0; i < CHECK_JUMP_RANGE; i++) { + if (is_jump_insn(*insn)) + return true; + insn++; + } + return false; +} + +static bool klp_check_jump_func(void *ws_args, unsigned long pc) +{ + struct walk_stackframe_args *args = ws_args; + + return args->check_func(args->data, &args->ret, pc); +} + +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + struct stackframe frame; + + if (t == current) { + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_stack_pointer; + frame.lr = (unsigned long)__builtin_return_address(0); + frame.pc = (unsigned long)check_task_calltrace; + } else { + frame.fp = thread_saved_fp(t); + frame.sp = thread_saved_sp(t); + frame.lr = 0; /* recovered from the stack */ + frame.pc = thread_saved_pc(t); + } + walk_stackframe(&frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + +static int do_check_calltrace(struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + int ret; + struct task_struct *g, *t; + unsigned int cpu; + + for_each_process_thread(g, t) { + if (klp_is_migration_thread(t->comm)) + continue; + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } + return 0; +} + +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + return do_check_calltrace(&args, klp_check_jump_func); +} + +static inline bool offset_in_range(unsigned long pc, unsigned long addr, + long range) +{ + long offset = addr - pc; + + return (offset >= -range && offset < range); +} + +long arm_insn_read(void *addr, u32 *insnp) +{ + long ret; + u32 val; + + ret = copy_from_kernel_nofault(&val, addr, ARM_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + long ret; + int i; + + for (i = 0; i < LJMP_INSN_SIZE; i++) { + ret = arm_insn_read((u32 *)old_func + i, &arch_data->old_insns[i]); + if (ret) + break; + } + return ret; +} + +static void klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + + if (len <= 0) + return; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) + __patch_text(dst + i, src[i]); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + __patch_text(dst, src[0]); +} + +static int do_patch(unsigned long pc, unsigned long new_addr) +{ + u32 insns[LJMP_INSN_SIZE]; + + if (!offset_in_range(pc, new_addr, SZ_32M)) { +#ifdef CONFIG_ARM_MODULE_PLTS + /* + * [0] LDR PC, [PC+8] + * [4] nop + * [8] new_addr_to_jump + */ + insns[0] = __opcode_to_mem_arm(0xe59ff000); + insns[1] = __opcode_to_mem_arm(0xe320f000); + insns[2] = new_addr; + + klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); +#else + /* + * When offset from 'new_addr' to 'pc' is out of SZ_32M range but + * CONFIG_ARM_MODULE_PLTS not enabled, we should stop patching. + */ + pr_err("new address out of range\n"); + return -EFAULT; +#endif + } else { + insns[0] = arm_gen_branch(pc, new_addr); + klp_patch_text((u32 *)pc, insns, 1); + } + return 0; +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; + + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, (unsigned long)func->new_func); + if (ret) + list_del_rcu(&func->stack_node); + return ret; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long pc; + + func_node = func->func_node; + pc = (unsigned long)func_node->old_func; + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); + } else { + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + + do_patch(pc, (unsigned long)next_func->new_func); + } +} diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index da2ee8d6ef1a7bb352b3803ebf5828fa48419105..630388dbbf258859d2b6363130a3b35f03c41b72 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -43,16 +43,19 @@ static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt) memcpy(plt->lit, fixed_plts, sizeof(fixed_plts)); } -u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) +u32 get_module_plt(struct module *mod, Elf32_Shdr *sechdrs, + unsigned long loc, Elf32_Addr val) { struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ? &mod->arch.core : &mod->arch.init; + Elf32_Shdr *plt_shdr = sechdrs ? &sechdrs[pltsec->plt_shndx] : + pltsec->plt; struct plt_entries *plt; int idx; /* cache the address, ELF header is available only during module load */ if (!pltsec->plt_ent) - pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr; + pltsec->plt_ent = (struct plt_entries *)plt_shdr->sh_addr; plt = pltsec->plt_ent; prealloc_fixed(pltsec, plt); @@ -80,7 +83,7 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) } pltsec->plt_count++; - BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size); + BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > plt_shdr->sh_size); if (!idx) /* Populate a new set of entries */ @@ -213,21 +216,24 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned long init_plts = ARRAY_SIZE(fixed_plts); Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; Elf32_Sym *syms = NULL; + int i = 0; /* * To store the PLTs, we expand the .text section for core module code * and for initialization code. */ - for (s = sechdrs; s < sechdrs_end; ++s) { - if (strcmp(".plt", secstrings + s->sh_name) == 0) + for (s = sechdrs; s < sechdrs_end; ++s, ++i) { + if (strcmp(".plt", secstrings + s->sh_name) == 0) { mod->arch.core.plt = s; - else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) + mod->arch.core.plt_shndx = i; + } else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) { mod->arch.init.plt = s; - else if (s->sh_type == SHT_SYMTAB) + mod->arch.init.plt_shndx = i; + } else if (s->sh_type == SHT_SYMTAB) syms = (Elf32_Sym *)s->sh_addr; } - if (!mod->arch.core.plt || !mod->arch.init.plt) { + if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) { pr_err("%s: module PLT section(s) missing\n", mod->name); return -ENOEXEC; } diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index e74d84f58b77c31b644918d646f5baf67a746b7b..6c310d49f71c0ed2769744bc6808bf6dd2626839 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -182,7 +182,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && (offset <= (s32)0xfe000000 || offset >= (s32)0x02000000)) - offset = get_module_plt(module, loc, + offset = get_module_plt(module, sechdrs, loc, offset + loc + 8) - loc - 8; @@ -353,7 +353,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && (offset <= (s32)0xff000000 || offset >= (s32)0x01000000)) - offset = get_module_plt(module, loc, + offset = get_module_plt(module, sechdrs, loc, offset + loc + 4) - loc - 4; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 80e848335cb1013467d8d3079ed82c9aca773d21..a16daaf364e9b49984c345baa4bcddb46819b2cf 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -251,6 +251,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_LIVEPATCH_WO_FTRACE select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_NMI_SUPPORT @@ -411,6 +412,8 @@ config UNWIND_TABLES source "arch/arm64/Kconfig.platforms" +source "kernel/livepatch/Kconfig" + menu "Kernel Features" menu "ARM errata workarounds via the alternatives framework" diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 919fa9e7b5ded9d46d2f22d9460fb05c0f7e3aae..4e3f2b982e746db2806a7be5d7c7d4fbbb0d06cb 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -337,6 +337,18 @@ CONFIG_ARCH_XGENE=y # CONFIG_ARCH_ZYNQMP is not set # end of Platform selection +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y + +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +# CONFIG_LIVEPATCH_STACK is not set +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +# end of Enable Livepatch + # # Kernel Features # diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fbfaad1f31ff3b5639a10c26ca2dd9338207671 --- /dev/null +++ b/arch/arm64/include/asm/livepatch.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2014-2019, Huawei. + * Author: Li Bin + * Author: Cheng Jian + * Copyright (C) 2023 Huawei. + * Author: Zheng Yejian + * + * livepatch.h - arm64-specific Kernel Live Patching Core + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _ASM_ARM64_LIVEPATCH_H +#define _ASM_ARM64_LIVEPATCH_H + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + +#define LJMP_INSN_SIZE 4 + +struct arch_klp_data { + u32 old_insns[LJMP_INSN_SIZE]; +}; + +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + +struct klp_func; + +#define klp_smp_isb() isb() +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +bool arch_check_jump_insn(unsigned long func_addr); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data); +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + +#endif /* _ASM_ARM64_LIVEPATCH_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 59fa5d6e6f3e632aa4cf38ee78503b6cce35dbd9..730a576a8829184b62a087dbfbd6b78bc08aa608 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o +obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c new file mode 100644 index 0000000000000000000000000000000000000000..c2f8da8bfec906585c4a8cd554c48b34e738b225 --- /dev/null +++ b/arch/arm64/kernel/livepatch.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * livepatch.c - arm64-specific Kernel Live Patching Core + * + * Copyright (C) 2014 Li Bin + * Copyright (C) 2023 Zheng Yejian + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CHECK_JUMP_RANGE LJMP_INSN_SIZE + +static inline bool offset_in_range(unsigned long pc, unsigned long addr, + long range) +{ + long offset = addr - pc; + + return (offset >= -range && offset < range); +} + +/* + * The instruction set on arm64 is A64. + * The instruction of BLR is 1101011000111111000000xxxxx00000. + * The instruction of BL is 100101xxxxxxxxxxxxxxxxxxxxxxxxxx. + * The instruction of BLRAX is 1101011x0011111100001xxxxxxxxxxx. + */ +#define is_jump_insn(insn) (((le32_to_cpu(insn) & 0xfffffc1f) == 0xd63f0000) || \ + ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \ + ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800)) + +bool arch_check_jump_insn(unsigned long func_addr) +{ + unsigned long i; + u32 *insn = (u32 *)func_addr; + + for (i = 0; i < CHECK_JUMP_RANGE; i++) { + if (is_jump_insn(*insn)) + return true; + insn++; + } + return false; +} + +static bool klp_check_jump_func(void *ws_args, unsigned long pc) +{ + struct walk_stackframe_args *args = ws_args; + + return args->check_func(args->data, &args->ret, pc); +} + +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + arch_stack_walk(fn, args, t, NULL); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + +static int do_check_calltrace(struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + int ret; + struct task_struct *g, *t; + unsigned int cpu; + + for_each_process_thread(g, t) { + if (klp_is_migration_thread(t->comm)) + continue; + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } + return 0; +} + +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + return do_check_calltrace(&args, klp_check_jump_func); +} + +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + long ret; + int i; + + for (i = 0; i < LJMP_INSN_SIZE; i++) { + ret = aarch64_insn_read(((u32 *)old_func) + i, + &arch_data->old_insns[i]); + if (ret) + break; + } + return ret; +} + +static int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = aarch64_insn_patch_text_nosync(dst + i, src[i]); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return aarch64_insn_patch_text_nosync(dst, src[0]); +} + +static int do_patch(unsigned long pc, unsigned long new_addr) +{ + u32 insns[LJMP_INSN_SIZE]; + int ret; + + if (offset_in_range(pc, new_addr, SZ_128M)) { + insns[0] = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + ret = klp_patch_text((u32 *)pc, insns, 1); + if (ret) { + pr_err("patch instruction small range failed, ret=%d\n", ret); + return -EPERM; + } + } else { + /* movn x16, #0x.... */ + /* movk x16, #0x...., lsl #16 */ + /* movk x16, #0x...., lsl #32 */ + /* br x16 */ + insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; + insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; + insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; + insns[3] = 0xd61f0200; + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; + } + } + return 0; +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; + + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, (unsigned long)func->new_func); + if (ret) + list_del_rcu(&func->stack_node); + return ret; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long pc; + int ret; + + func_node = func->func_node; + pc = (unsigned long)func_node->old_func; + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; + } + } else { + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + if (WARN_ON(!next_func)) + return; + do_patch(pc, (unsigned long)next_func->new_func); + } +} diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d5d5388973ac7612fd8bbc5467d37ecf805b3f46..165146badfe4df8c71cf351f545c665e8bf2b1e8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -25,7 +25,7 @@ config 64BIT config LIVEPATCH_64 def_bool PPC64 - depends on LIVEPATCH + depends on LIVEPATCH_FTRACE config MMU bool @@ -256,7 +256,7 @@ config PPC select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT && (!ARCH_USING_PATCHABLE_FUNCTION_ENTRY || (!CC_IS_GCC || GCC_VERSION >= 110100)) - select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_LIVEPATCH_FTRACE if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_OPTPROBES diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ae29e4392664ad40236ade2614106fa2127d4a55..a663dc1a35389a960e79c9bd77a5e7fcf050d240 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -194,7 +194,7 @@ config S390 select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES select HAVE_KVM - select HAVE_LIVEPATCH + select HAVE_LIVEPATCH_FTRACE select HAVE_MEMBLOCK_PHYS_MAP select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 438cd92e60801bd3c12cd1f891c5a9009fe115f8..ea93ed91211dbcd918a57d9d12680ca1e71c1d5a 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -44,6 +44,7 @@ CONFIG_KEXEC_FILE=y CONFIG_KEXEC_SIG=y CONFIG_CRASH_DUMP=y CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_FTRACE=y CONFIG_MARCH_ZEC12=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 1b8150e50f6a65a9d0dadbbf6bb1ee7e2785f8d3..cab45afd11c00a8020b19ccd4ec90a9b4c767565 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -42,6 +42,7 @@ CONFIG_KEXEC_FILE=y CONFIG_KEXEC_SIG=y CONFIG_CRASH_DUMP=y CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_FTRACE=y CONFIG_MARCH_ZEC12=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 66bfabae8814919c61902da3cdc77a45d9efa775..4ff6c115d127c35736ab5769d01572b7b773a887 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -241,7 +241,8 @@ config X86 select HAVE_KRETPROBES select HAVE_RETHOOK select HAVE_KVM - select HAVE_LIVEPATCH if X86_64 + select HAVE_LIVEPATCH_FTRACE if X86_64 + select HAVE_LIVEPATCH_WO_FTRACE if X86_64 select HAVE_MIXED_BREAKPOINTS_REGS select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOVE_PMD diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index b40100e166838208b2a77a09d011f190c52ceb53..649bb5ee82ddac1e4af55339f8b36b1792caa751 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -499,8 +499,18 @@ CONFIG_LEGACY_VSYSCALL_XONLY=y # CONFIG_CMDLINE_BOOL is not set CONFIG_MODIFY_LDT_SYSCALL=y # CONFIG_STRICT_SIGALTSTACK_SIZE is not set -CONFIG_HAVE_LIVEPATCH=y +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y + +# +# Enable Livepatch +# CONFIG_LIVEPATCH=y +# CONFIG_LIVEPATCH_FTRACE is not set +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +# CONFIG_LIVEPATCH_STACK is not set +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +# end of Enable Livepatch # end of Processor type and features CONFIG_FUNCTION_PADDING_CFI=11 diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 1b29f58f730fde4d2510fa4c7ce3e82600fb40f1..039863fdecfbd6f4e9033e86043ce1cc3be41d59 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -180,6 +180,13 @@ static inline int insn_has_emulate_prefix(struct insn *insn) return !!insn->emulate_prefix_size; } +/* Ensure this instruction is decoded completely */ +static inline int insn_complete(struct insn *insn) +{ + return insn->opcode.got && insn->modrm.got && insn->sib.got && + insn->displacement.got && insn->immediate.got; +} + static inline insn_byte_t insn_vex_m_bits(struct insn *insn) { if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h new file mode 100644 index 0000000000000000000000000000000000000000..359c06ad1cc2b5119d97c5765271e16e6cc88ab2 --- /dev/null +++ b/arch/x86/include/asm/livepatch.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * livepatch.h - x86-specific Kernel Live Patching Core + * + * Copyright (C) 2023 Huawei. + */ + +#ifndef _ASM_X86_LIVEPATCH_H +#define _ASM_X86_LIVEPATCH_H + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + +#define JMP_E9_INSN_SIZE 5 +struct arch_klp_data { + unsigned char old_insns[JMP_E9_INSN_SIZE]; +}; + +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + +struct klp_func; + +#define klp_smp_isb() +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +bool arch_check_jump_insn(unsigned long func_addr); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data); +void arch_klp_code_modify_prepare(void); +void arch_klp_code_modify_post_process(void); + +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + +#endif /* _ASM_X86_LIVEPATCH_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3269a0e23d3ab86752380175352106085bde640a..6fafa4560c6d5d4c3528c84f5db5bd1b98334369 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-y += apic/ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o +obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c new file mode 100644 index 0000000000000000000000000000000000000000..a877ffdf96ac51c215d23e43e250758ac2484304 --- /dev/null +++ b/arch/x86/kernel/livepatch.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * livepatch.c - x86-specific Kernel Live Patching Core + * + * Copyright (C) 2023 Huawei Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * The instruction set on x86 is CISC. + * The instructions of call in same segment are 11101000(direct), + * 11111111(register indirect) and 11111111(memory indirect). + * The instructions of call in other segment are 10011010(direct), + * 11111111(indirect). + */ +static bool is_jump_insn(u8 *insn) +{ + if ((insn[0] == 0xE8) || (insn[0] == 0x9a)) + return true; + else if ((insn[0] == 0xFF) && ((insn[1] & 0x30) == 0x10)) + return true; + return false; +} + +bool arch_check_jump_insn(unsigned long func_addr) +{ + int len = JMP_E9_INSN_SIZE; + struct insn insn; + u8 *addr = (u8 *)func_addr; + + do { + if (is_jump_insn(addr)) + return true; + insn_init(&insn, addr, MAX_INSN_SIZE, 1); + insn_get_length(&insn); + if (!insn.length || !insn_complete(&insn)) + return true; + len -= insn.length; + addr += insn.length; + } while (len > 0); + + return false; +} + +static void klp_print_stack_trace(void *trace_ptr, int trace_len) +{ + int i; +#ifdef CONFIG_ARCH_STACKWALK + unsigned long *trace = trace_ptr; +#else + struct stack_trace *trace = trace_ptr; +#endif + + pr_err("Call Trace:\n"); +#ifdef CONFIG_ARCH_STACKWALK + for (i = 0; i < trace_len; i++) { + pr_err("[<%pK>] %pS\n", + (void *)trace[i], + (void *)trace[i]); + } +#else + for (i = 0; i < trace->nr_entries; i++) { + pr_err("[<%pK>] %pS\n", + (void *)trace->entries[i], + (void *)trace->entries[i]); + } +#endif + +} + +#ifdef MAX_STACK_ENTRIES +#undef MAX_STACK_ENTRIES +#endif +#define MAX_STACK_ENTRIES 100 + +static int klp_check_stack(void *trace_ptr, int trace_len, + bool (*fn)(void *, int *, unsigned long), void *data) +{ +#ifdef CONFIG_ARCH_STACKWALK + unsigned long *trace = trace_ptr; +#else + struct stack_trace *trace = trace_ptr; +#endif + unsigned long address; + int i, ret; + +#ifdef CONFIG_ARCH_STACKWALK + for (i = 0; i < trace_len; i++) { + address = trace[i]; +#else + for (i = 0; i < trace->nr_entries; i++) { + address = trace->entries[i]; +#endif + if (!fn(data, &ret, address)) { +#ifdef CONFIG_ARCH_STACKWALK + klp_print_stack_trace(trace_ptr, trace_len); +#else + klp_print_stack_trace(trace_ptr, 0); +#endif + return ret; + } + } + + return 0; +} + +static int check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) +{ + int ret = 0; + static unsigned long trace_entries[MAX_STACK_ENTRIES]; +#ifdef CONFIG_ARCH_STACKWALK + int trace_len; +#else + struct stack_trace trace; +#endif + +#ifdef CONFIG_ARCH_STACKWALK + ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); + if (ret < 0) { + pr_err("%s:%d has an unreliable stack, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + trace_len = ret; + ret = klp_check_stack(trace_entries, trace_len, fn, data); +#else + trace.skip = 0; + trace.nr_entries = 0; + trace.max_entries = MAX_STACK_ENTRIES; + trace.entries = trace_entries; + ret = save_stack_trace_tsk_reliable(t, &trace); + if (ret) { + pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", + __func__, t->comm, t->pid, ret); + return ret; + } + ret = klp_check_stack(&trace, 0, fn, data); +#endif + if (ret) { + pr_err("%s:%d check stack failed, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + return 0; +} + +static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + int ret = 0; + struct task_struct *g, *t; + unsigned int cpu; + + for_each_process_thread(g, t) { + if (klp_is_migration_thread(t->comm)) + continue; + + ret = check_task_calltrace(t, fn, data); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), fn, data); + if (ret) + return ret; + } + return 0; +} + +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + return do_check_calltrace(check_func, data); +} + +void arch_klp_code_modify_prepare(void) + __acquires(&text_mutex) +{ + mutex_lock(&text_mutex); +} + +void arch_klp_code_modify_post_process(void) + __releases(&text_mutex) +{ + text_poke_sync(); + mutex_unlock(&text_mutex); +} + +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + long ret; + + /* Prevent text modification */ + mutex_lock(&text_mutex); + ret = copy_from_kernel_nofault(arch_data->old_insns, + old_func, JMP_E9_INSN_SIZE); + mutex_unlock(&text_mutex); + + return ret; +} + +static void klp_patch_text(void *dst, const void *src, int len) +{ + if (len <= 1) + return; + /* skip breakpoint at first */ + text_poke(dst + 1, src + 1, len - 1); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + /* update jmp opcode */ + text_poke(dst, src, 1); +} + +static void *klp_jmp_code(unsigned long ip, unsigned long addr) +{ + return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + unsigned long ip, new_addr; + unsigned char *new; + + func_node = func->func_node; + ip = (unsigned long)func->old_func; + list_add_rcu(&func->stack_node, &func_node->func_stack); + new_addr = (unsigned long)func->new_func; + /* replace the text with the new text */ + new = (unsigned char *)klp_jmp_code(ip, new_addr); + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); + return 0; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long ip, new_addr; + void *new; + + func_node = func->func_node; + ip = (unsigned long)func_node->old_func; + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + new = func_node->arch_data.old_insns; + } else { + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + + new_addr = (unsigned long)next_func->new_func; + new = klp_jmp_code(ip, new_addr); + } + + /* replace the text with the new text */ + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); +} diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 5f71a0cf4399a577e1235f6040a70fb26ee56346..9b75f1e02d6071751b7e3820aa89edd046fe9cd9 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -258,7 +258,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, return write_relocate_add(sechdrs, strtab, symindex, relsec, me, true); } -#ifdef CONFIG_LIVEPATCH +#ifdef CONFIG_LIVEPATCH_FTRACE void clear_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 55e371cc69fd5d00670a08983335e3bddfff9571..ac7ac472eb7deef5c5db1403efc71ca7c2d95d5e 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -715,13 +715,6 @@ int insn_get_length(struct insn *insn) return 0; } -/* Ensure this instruction is decoded completely */ -static inline int insn_complete(struct insn *insn) -{ - return insn->opcode.got && insn->modrm.got && insn->sib.got && - insn->displacement.got && insn->immediate.got; -} - /** * insn_decode() - Decode an x86 instruction * @insn: &struct insn to be initialized diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f0a949b7c9733ce3209fb3adc561ac2f4d93d3ca..39386db201a2891a05a02102941efcdd63e5f0c7 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -235,6 +235,9 @@ extern void static_key_disable(struct static_key *key); extern void static_key_enable_cpuslocked(struct static_key *key); extern void static_key_disable_cpuslocked(struct static_key *key); extern enum jump_label_type jump_label_init_type(struct jump_entry *entry); +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +extern int jump_label_register(struct module *mod); +#endif /* * We should be using ATOMIC_INIT() for initializing .enabled, but @@ -314,6 +317,13 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +static inline int jump_label_register(struct module *mod) +{ + return 0; +} +#endif + static inline void static_key_enable(struct static_key *key) { STATIC_KEY_CHECK_USE(key); diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9b9b38e8956352d6f102ca1e28db51a4d9095d5d..e91b2adc8f7e01fbe883ece535759f720062a1d2 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -17,11 +17,17 @@ #if IS_ENABLED(CONFIG_LIVEPATCH) +#include + /* task patch states */ #define KLP_UNDEFINED -1 #define KLP_UNPATCHED 0 #define KLP_PATCHED 1 +#define KLP_NORMAL_FORCE 0 +#define KLP_ENFORCEMENT 1 +#define KLP_STACK_OPTIMIZE 2 + /** * struct klp_func - function structure for live patching * @old_name: name of the function to be patched @@ -65,6 +71,7 @@ struct klp_func { * in kallsyms for the given object is used. */ unsigned long old_sympos; + int force; /* Only used in the solution without ftrace */ /* internal */ void *old_func; @@ -72,10 +79,19 @@ struct klp_func { struct list_head node; struct list_head stack_node; unsigned long old_size, new_size; - bool nop; + bool nop; /* Not used in the solution without ftrace */ bool patched; +#ifdef CONFIG_LIVEPATCH_FTRACE bool transition; +#endif + void *func_node; /* Only used in the solution without ftrace */ +}; + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +struct klp_hook { + void (*hook)(void); }; +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ struct klp_object; @@ -118,14 +134,18 @@ struct klp_object { /* external */ const char *name; struct klp_func *funcs; - struct klp_callbacks callbacks; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + struct klp_hook *hooks_load; + struct klp_hook *hooks_unload; +#endif + struct klp_callbacks callbacks; /* Not used in the solution without ftrace */ /* internal */ struct kobject kobj; struct list_head func_list; struct list_head node; struct module *mod; - bool dynamic; + bool dynamic; /* Not used in the solution without ftrace */ bool patched; }; @@ -159,15 +179,15 @@ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; - struct klp_state *states; - bool replace; + struct klp_state *states; /* Not used in the solution without ftrace */ + bool replace; /* Not used in the solution without ftrace */ /* internal */ struct list_head list; struct kobject kobj; struct list_head obj_list; bool enabled; - bool forced; + bool forced; /* Not used in the solution without ftrace */ struct work_struct free_work; struct completion finish; }; @@ -192,6 +212,7 @@ struct klp_patch { #define klp_for_each_func(obj, func) \ list_for_each_entry(func, &obj->func_list, node) +#ifdef CONFIG_LIVEPATCH_FTRACE int klp_enable_patch(struct klp_patch *); /* Called from the module loader during module coming/going states */ @@ -230,6 +251,70 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id); struct klp_state *klp_get_prev_state(unsigned long id); +#else /* !CONFIG_LIVEPATCH_FTRACE */ + +struct klp_func_node { + struct list_head node; + struct list_head func_stack; + void *old_func; + struct arch_klp_data arch_data; +}; + +static inline +int klp_compare_address(unsigned long pc, unsigned long func_addr, + const char *func_name, unsigned long check_size) +{ + if (pc >= func_addr && pc < func_addr + check_size) { + pr_warn("func %s is in use!\n", func_name); + /* Return -EAGAIN for next retry */ + return -EAGAIN; + } + return 0; +} + +typedef int (*klp_add_func_t)(struct list_head *func_list, + unsigned long func_addr, unsigned long func_size, + const char *func_name, int force); + +struct walk_stackframe_args { + void *data; + int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); +}; + +#ifndef klp_smp_isb +#define klp_smp_isb() +#endif + +#define KLP_MIGRATION_NAME_PREFIX "migration/" +static inline bool klp_is_migration_thread(const char *task_name) +{ + /* + * current on other CPU + * we call this in stop_machine, so the current + * of each CPUs is migration, just compare the + * task_comm here, because we can't get the + * cpu_curr(task_cpu(t))). This assumes that no + * other thread will pretend to be a stopper via + * task_comm. + */ + return !strncmp(task_name, KLP_MIGRATION_NAME_PREFIX, + sizeof(KLP_MIGRATION_NAME_PREFIX) - 1); +} + +int klp_register_patch(struct klp_patch *patch); +int klp_unregister_patch(struct klp_patch *patch); +static inline int klp_module_coming(struct module *mod) { return 0; } +static inline void klp_module_going(struct module *mod) {} +static inline bool klp_patch_pending(struct task_struct *task) { return false; } +static inline void klp_update_patch_state(struct task_struct *task) {} +static inline void klp_copy_process(struct task_struct *child) {} +static inline bool klp_have_reliable_stack(void) { return true; } +extern void module_enable_ro(const struct module *mod, bool after_init); +extern void module_disable_ro(const struct module *mod); + +#endif /* CONFIG_LIVEPATCH_FTRACE */ + int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, const char *shstrtab, const char *strtab, unsigned int symindex, unsigned int secindex, diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h index 013794fb5da081e741edad8684c4e689a60e8b2d..7fe69bb59a16eca80bb97a92482c76365d438ddf 100644 --- a/include/linux/livepatch_sched.h +++ b/include/linux/livepatch_sched.h @@ -5,7 +5,7 @@ #include #include -#ifdef CONFIG_LIVEPATCH +#ifdef CONFIG_LIVEPATCH_FTRACE void __klp_sched_try_switch(void); @@ -21,9 +21,9 @@ static __always_inline void klp_sched_try_switch(void) #endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ -#else /* !CONFIG_LIVEPATCH */ +#else /* !CONFIG_LIVEPATCH_FTRACE */ static inline void klp_sched_try_switch(void) {} static inline void __klp_sched_try_switch(void) {} -#endif /* CONFIG_LIVEPATCH */ +#endif /* CONFIG_LIVEPATCH_FTRACE */ #endif /* _LINUX_LIVEPATCH_SCHED_H_ */ diff --git a/include/linux/module.h b/include/linux/module.h index a98e188cf37b8182b8654b3be069f93ce1d97174..c6ee29331e87d078d64be29c3dd01d6ec27ac19f 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -383,6 +383,12 @@ struct mod_kallsyms { }; #ifdef CONFIG_LIVEPATCH +enum MODULE_KLP_REL_STATE { + MODULE_KLP_REL_NONE = 0, + MODULE_KLP_REL_UNDO, + MODULE_KLP_REL_DONE, +}; + /** * struct klp_modinfo - ELF information preserved from the livepatch module * @@ -551,6 +557,19 @@ struct module { /* ELF information */ struct klp_modinfo *klp_info; + /* + * livepatch should relocate the key of jump_label by + * using klp_apply_section_relocs. So it's necessary to + * do jump_label_apply_nops() and jump_label_add_module() + * later after livepatch relocation finised. + * + * for normal module : + * always MODULE_KLP_REL_DONE. + * for livepatch module : + * init as MODULE_KLP_REL_UNDO, + * set to MODULE_KLP_REL_DONE when relocate completed. + */ + enum MODULE_KLP_REL_STATE klp_rel_state; /* Only used in the solution without ftrace */ #endif #ifdef CONFIG_PRINTK_INDEX @@ -750,6 +769,20 @@ static inline bool is_livepatch_module(struct module *mod) #endif } +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +static inline void set_mod_klp_rel_state(struct module *mod, + enum MODULE_KLP_REL_STATE state) +{ + mod->klp_rel_state = state; +} + +static inline bool mod_klp_rel_completed(struct module *mod) +{ + return mod->klp_rel_state == MODULE_KLP_REL_NONE || + mod->klp_rel_state == MODULE_KLP_REL_DONE; +} +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + void set_module_sig_enforced(void); #else /* !CONFIG_MODULES... */ diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 001b2ce83832ed2de2e25b3427aaad5788b2ac3b..176fef9870b97dc53e532d288dad914d25911d18 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -80,7 +80,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, unsigned int symindex, unsigned int relsec, struct module *mod); -#ifdef CONFIG_LIVEPATCH +#ifdef CONFIG_LIVEPATCH_FTRACE /* * Some architectures (namely x86_64 and ppc64) perform sanity checks when * applying relocations. If a patched module gets unloaded and then later @@ -121,6 +121,8 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); +void flush_module_icache(const struct module *mod); + #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ !defined(CONFIG_KASAN_VMALLOC) #include diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 141e6b176a1b308c89cedf3481ba7592c6d974a8..a2c6241c09f29d1876e9236607ced5e38f911027 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -343,4 +343,10 @@ static inline int static_call_text_reserved(void *start, void *end) #endif /* CONFIG_HAVE_STATIC_CALL */ +#if defined(CONFIG_HAVE_STATIC_CALL_INLINE) && defined(CONFIG_LIVEPATCH_WO_FTRACE) +int klp_static_call_register(struct module *mod); +#else +static inline int klp_static_call_register(struct module *mod) { return 0; } +#endif + #endif /* _LINUX_STATIC_CALL_H */ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index d9c822bbffb8d3c8977d410e683bb8a7c578f3c4..97c06f095358fc11d566adbfb4c91912dbd73684 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -628,6 +628,11 @@ static int jump_label_add_module(struct module *mod) struct static_key *key = NULL; struct static_key_mod *jlm, *jlm2; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (unlikely(!mod_klp_rel_completed(mod))) + return 0; +#endif + /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) return 0; @@ -690,6 +695,11 @@ static void jump_label_del_module(struct module *mod) struct static_key *key = NULL; struct static_key_mod *jlm, **prev; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (unlikely(!mod_klp_rel_completed(mod))) + return; +#endif + for (iter = iter_start; iter < iter_stop; iter++) { if (jump_entry_key(iter) == key) continue; @@ -766,6 +776,18 @@ static struct notifier_block jump_label_module_nb = { .priority = 1, /* higher than tracepoints */ }; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +int jump_label_register(struct module *mod) +{ + int ret; + + ret = jump_label_module_notify(&jump_label_module_nb, + MODULE_STATE_COMING, mod); + + return notifier_to_errno(ret); +} +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + static __init int jump_label_init_module(void) { return register_module_notifier(&jump_label_module_nb); diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 53d51ed619a3d53a583c86a0ac10e363877296c8..ad15685dfd53528ffa790682e4fbc23454a1b9a8 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -1,20 +1,90 @@ # SPDX-License-Identifier: GPL-2.0-only -config HAVE_LIVEPATCH +config HAVE_LIVEPATCH_FTRACE bool help - Arch supports kernel live patching + Arch supports kernel live patching based on ftrace + +config HAVE_LIVEPATCH_WO_FTRACE + bool + help + Arch supports kernel live patching without ftrace + +menu "Enable Livepatch" config LIVEPATCH bool "Kernel Live Patching" - depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS + depends on (HAVE_LIVEPATCH_FTRACE && (DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS)) || (HAVE_LIVEPATCH_WO_FTRACE && DEBUG_INFO) depends on MODULES depends on SYSFS depends on KALLSYMS_ALL - depends on HAVE_LIVEPATCH depends on !TRIM_UNUSED_KSYMS + default n help Say Y here if you want to support kernel live patching. This option has no runtime impact until a kernel "patch" module uses the interface provided by this option to register a patch, causing calls to patched functions to be redirected to new function code contained in the patch module. + +choice + prompt "live patching method" + depends on LIVEPATCH + help + Live patching implementation method configuration. + Choose an interested live patching solution which will + allow calls to patched functions to be redirected to new + function code contained in the patch module. + +config LIVEPATCH_FTRACE + bool "based on ftrace" + depends on HAVE_LIVEPATCH_FTRACE + depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS + help + Supports kernel live patching based on ftrace. + This is the original implementation of kernel live + patching which is just renamed to distinguish from + another live patching solution. + +config LIVEPATCH_WO_FTRACE + bool "without ftrace" + depends on HAVE_LIVEPATCH_WO_FTRACE + depends on DEBUG_INFO + select LIVEPATCH_STOP_MACHINE_CONSISTENCY + help + Supports kernel live patching without ftrace. + This solution will patch the first few instructions + of a function so that caller of it will jump to + another expected function. + Note that this patching solution would not handle conflict + with other patching technologies (i.e. ftrace, kprobe), + please avoid acting them on the same function! + +endchoice + +config LIVEPATCH_STOP_MACHINE_CONSISTENCY + bool "Stop machine consistency" + depends on LIVEPATCH_WO_FTRACE + help + Use stop machine consistency model + stop-machine consistency and kpatch's stack + trace checking. + +config LIVEPATCH_STACK + bool "Enforcing the patch stacking principle" + depends on LIVEPATCH_WO_FTRACE + default y + help + Say N here if you want to remove the patch stacking principle. + +config LIVEPATCH_RESTRICT_KPROBE + bool "Enforing check livepatch and kprobe restrict" + depends on LIVEPATCH_WO_FTRACE + depends on KPROBES + default y + help + Livepatch without ftrace and kprobe are conflicting. + We should not patch for the functions where registered with kprobe, + and vice versa. + Say Y here if you want to check those. + +endmenu diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile index cf03d4bdfc663f97078aaf09b69427b22538c7c4..facf512b237a6c8bcde6cb172068e8fa32c0a4fd 100644 --- a/kernel/livepatch/Makefile +++ b/kernel/livepatch/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_LIVEPATCH) += livepatch.o -livepatch-objs := core.o patch.o shadow.o state.o transition.o +obj-$(CONFIG_LIVEPATCH_FTRACE) += patch.o shadow.o state.o transition.o +livepatch-objs := core.o diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ecbc9b6aba3a10bcadecac960bf6ee09f2419d20..f06175c2dd1c0b00d954c072100055f34534e3f0 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -4,6 +4,7 @@ * * Copyright (C) 2014 Seth Jennings * Copyright (C) 2014 SUSE + * Copyright (C) 2023 Huawei Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -22,9 +23,19 @@ #include #include #include "core.h" +#ifdef CONFIG_LIVEPATCH_FTRACE #include "patch.h" #include "state.h" #include "transition.h" +#else /* !CONFIG_LIVEPATCH_FTRACE */ +#include +#include +#include +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +#include +#endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */ +#include +#endif /* CONFIG_LIVEPATCH_FTRACE */ /* * klp_mutex is a coarse lock which serializes access to klp data. All @@ -51,6 +62,7 @@ static bool klp_is_module(struct klp_object *obj) return obj->name; } +#ifdef CONFIG_LIVEPATCH_FTRACE /* sets obj->mod if object is not vmlinux and module is found */ static void klp_find_object_module(struct klp_object *obj) { @@ -78,12 +90,16 @@ static void klp_find_object_module(struct klp_object *obj) rcu_read_unlock_sched(); } +#else /* !CONFIG_LIVEPATCH_FTRACE */ +static int klp_find_object_module(struct klp_object *obj); +#endif /* CONFIG_LIVEPATCH_FTRACE */ static bool klp_initialized(void) { return !!klp_root_kobj; } +#ifdef CONFIG_LIVEPATCH_FTRACE static struct klp_func *klp_find_func(struct klp_object *obj, struct klp_func *old_func) { @@ -117,6 +133,7 @@ static struct klp_object *klp_find_object(struct klp_patch *patch, return NULL; } +#endif /* CONFIG_LIVEPATCH_FTRACE */ struct klp_find_arg { const char *name; @@ -196,7 +213,11 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, int i, cnt, ret; char sym_objname[MODULE_NAME_LEN]; char sym_name[KSYM_NAME_LEN]; +#ifdef CONFIG_MODULES_USE_ELF_RELA Elf_Rela *relas; +#else + Elf_Rel *relas; +#endif Elf_Sym *sym; unsigned long sympos, addr; bool sym_vmlinux; @@ -214,9 +235,13 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, */ BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512); +#ifdef CONFIG_MODULES_USE_ELF_RELA relas = (Elf_Rela *) relasec->sh_addr; +#else + relas = (Elf_Rel *) relasec->sh_addr; +#endif /* For each rela in this klp relocation section */ - for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { + for (i = 0; i < relasec->sh_size / sizeof(*relas); i++) { sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); if (sym->st_shndx != SHN_LIVEPATCH) { pr_err("symbol %s is not marked as a livepatch symbol\n", @@ -260,6 +285,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, return 0; } +#ifdef CONFIG_LIVEPATCH_FTRACE void __weak clear_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, @@ -267,6 +293,7 @@ void __weak clear_relocate_add(Elf_Shdr *sechdrs, struct module *me) { } +#endif /* * At a high-level, there are two types of klp relocation sections: those which @@ -322,10 +349,16 @@ static int klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, if (ret) return ret; +#ifdef CONFIG_MODULES_USE_ELF_RELA return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod); +#else + return apply_relocate(sechdrs, strtab, symndx, secndx, pmod); +#endif } +#ifdef CONFIG_LIVEPATCH_FTRACE clear_relocate_add(sechdrs, strtab, symndx, secndx, pmod); +#endif return 0; } @@ -350,6 +383,8 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, * /sys/kernel/livepatch///patched * /sys/kernel/livepatch/// */ +#ifdef CONFIG_LIVEPATCH_FTRACE + static int __klp_disable_patch(struct klp_patch *patch); static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, @@ -395,6 +430,33 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, return count; } +static inline void klp_module_enable_ro(const struct module *mod, bool after_init) {} +static inline void klp_module_disable_ro(const struct module *mod) {} + +#else /* !CONFIG_LIVEPATCH_FTRACE */ + +static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); +static inline int klp_load_hook(struct klp_object *obj); +static inline int klp_unload_hook(struct klp_object *obj); +static int check_address_conflict(struct klp_patch *patch); + +static void klp_module_enable_ro(const struct module *mod, bool after_init) +{ +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) + module_enable_ro(mod, after_init); +#endif +} + +static void klp_module_disable_ro(const struct module *mod) +{ +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) + module_disable_ro(mod); +#endif +} + +#endif /* CONFIG_LIVEPATCH_FTRACE */ + static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -404,6 +466,7 @@ static ssize_t enabled_show(struct kobject *kobj, return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); } +#ifdef CONFIG_LIVEPATCH_FTRACE static ssize_t transition_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -442,18 +505,24 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, return count; } +#endif /* CONFIG_LIVEPATCH_FTRACE */ static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); +#ifdef CONFIG_LIVEPATCH_FTRACE static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); +#endif /* CONFIG_LIVEPATCH_FTRACE */ static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, +#ifdef CONFIG_LIVEPATCH_FTRACE &transition_kobj_attr.attr, &force_kobj_attr.attr, +#endif /* CONFIG_LIVEPATCH_FTRACE */ NULL }; ATTRIBUTE_GROUPS(klp_patch); +#ifdef CONFIG_LIVEPATCH_FTRACE static ssize_t patched_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -475,12 +544,14 @@ static void klp_free_object_dynamic(struct klp_object *obj) kfree(obj->name); kfree(obj); } +#endif /* CONFIG_LIVEPATCH_FTRACE */ static void klp_init_func_early(struct klp_object *obj, struct klp_func *func); static void klp_init_object_early(struct klp_patch *patch, struct klp_object *obj); +#ifdef CONFIG_LIVEPATCH_FTRACE static struct klp_object *klp_alloc_object_dynamic(const char *name, struct klp_patch *patch) { @@ -587,6 +658,7 @@ static int klp_add_nops(struct klp_patch *patch) return 0; } +#endif /* CONFIG_LIVEPATCH_FTRACE */ static void klp_kobj_release_patch(struct kobject *kobj) { @@ -604,28 +676,34 @@ static const struct kobj_type klp_ktype_patch = { static void klp_kobj_release_object(struct kobject *kobj) { +#ifdef CONFIG_LIVEPATCH_FTRACE struct klp_object *obj; obj = container_of(kobj, struct klp_object, kobj); if (obj->dynamic) klp_free_object_dynamic(obj); +#endif } static const struct kobj_type klp_ktype_object = { .release = klp_kobj_release_object, .sysfs_ops = &kobj_sysfs_ops, +#ifdef CONFIG_LIVEPATCH_FTRACE .default_groups = klp_object_groups, +#endif }; static void klp_kobj_release_func(struct kobject *kobj) { +#ifdef CONFIG_LIVEPATCH_FTRACE struct klp_func *func; func = container_of(kobj, struct klp_func, kobj); if (func->nop) klp_free_func_nop(func); +#endif } static const struct kobj_type klp_ktype_func = { @@ -638,14 +716,17 @@ static void __klp_free_funcs(struct klp_object *obj, bool nops_only) struct klp_func *func, *tmp_func; klp_for_each_func_safe(obj, func, tmp_func) { +#ifdef CONFIG_LIVEPATCH_FTRACE if (nops_only && !func->nop) continue; +#endif list_del(&func->node); kobject_put(&func->kobj); } } +#ifdef CONFIG_LIVEPATCH_FTRACE /* Clean up when a patched object is unloaded */ static void klp_free_object_loaded(struct klp_object *obj) { @@ -660,17 +741,24 @@ static void klp_free_object_loaded(struct klp_object *obj) func->new_func = NULL; } } +#endif /* CONFIG_LIVEPATCH_FTRACE */ static void __klp_free_objects(struct klp_patch *patch, bool nops_only) { struct klp_object *obj, *tmp_obj; klp_for_each_object_safe(patch, obj, tmp_obj) { +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (klp_is_module(obj) && obj->mod) { + module_put(obj->mod); + obj->mod = NULL; + } +#endif __klp_free_funcs(obj, nops_only); - +#ifdef CONFIG_LIVEPATCH_FTRACE if (nops_only && !obj->dynamic) continue; - +#endif list_del(&obj->node); kobject_put(&obj->kobj); } @@ -681,10 +769,12 @@ static void klp_free_objects(struct klp_patch *patch) __klp_free_objects(patch, false); } +#ifdef CONFIG_LIVEPATCH_FTRACE static void klp_free_objects_dynamic(struct klp_patch *patch) { __klp_free_objects(patch, true); } +#endif /* CONFIG_LIVEPATCH_FTRACE */ /* * This function implements the free operations that can be called safely @@ -720,9 +810,13 @@ static void klp_free_patch_finish(struct klp_patch *patch) kobject_put(&patch->kobj); wait_for_completion(&patch->finish); +#ifdef CONFIG_LIVEPATCH_FTRACE /* Put the module after the last access to struct klp_patch. */ if (!patch->forced) module_put(patch->mod); +#else + module_put(patch->mod); +#endif /* CONFIG_LIVEPATCH_FTRACE */ } /* @@ -738,6 +832,7 @@ static void klp_free_patch_work_fn(struct work_struct *work) klp_free_patch_finish(patch); } +#ifdef CONFIG_LIVEPATCH_FTRACE void klp_free_patch_async(struct klp_patch *patch) { klp_free_patch_start(patch); @@ -754,25 +849,33 @@ void klp_free_replaced_patches_async(struct klp_patch *new_patch) klp_free_patch_async(old_patch); } } +#endif /* CONFIG_LIVEPATCH_FTRACE */ static int klp_init_func(struct klp_object *obj, struct klp_func *func) { if (!func->old_name) return -EINVAL; +#ifdef CONFIG_LIVEPATCH_FTRACE /* * NOPs get the address later. The patched module must be loaded, * see klp_init_object_loaded(). */ if (!func->new_func && !func->nop) return -EINVAL; +#else /* !CONFIG_LIVEPATCH_FTRACE */ + if (!func->new_func) + return -EINVAL; +#endif /* CONFIG_LIVEPATCH_FTRACE */ if (strlen(func->old_name) >= KSYM_NAME_LEN) return -EINVAL; INIT_LIST_HEAD(&func->stack_node); func->patched = false; +#ifdef CONFIG_LIVEPATCH_FTRACE func->transition = false; +#endif /* The format for the sysfs directory is where sympos * is the nth occurrence of this symbol in kallsyms for the patched @@ -814,11 +917,13 @@ static int klp_apply_object_relocs(struct klp_patch *patch, return klp_write_object_relocs(patch, obj, true); } +#ifdef CONFIG_LIVEPATCH_FTRACE static void klp_clear_object_relocs(struct klp_patch *patch, struct klp_object *obj) { klp_write_object_relocs(patch, obj, false); } +#endif /* CONFIG_LIVEPATCH_FTRACE */ /* parts of the initialization that is done only when the object is loaded */ static int klp_init_object_loaded(struct klp_patch *patch, @@ -827,6 +932,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, struct klp_func *func; int ret; + klp_module_disable_ro(patch->mod); if (klp_is_module(obj)) { /* * Only write module-specific relocations here @@ -835,9 +941,12 @@ static int klp_init_object_loaded(struct klp_patch *patch, * itself. */ ret = klp_apply_object_relocs(patch, obj); - if (ret) + if (ret) { + klp_module_enable_ro(patch->mod, true); return ret; + } } + klp_module_enable_ro(patch->mod, true); klp_for_each_func(obj, func) { ret = klp_find_object_symbol(obj->name, func->old_name, @@ -848,15 +957,29 @@ static int klp_init_object_loaded(struct klp_patch *patch, ret = kallsyms_lookup_size_offset((unsigned long)func->old_func, &func->old_size, NULL); +#ifdef CONFIG_LIVEPATCH_FTRACE if (!ret) { pr_err("kallsyms size lookup failed for '%s'\n", func->old_name); return -ENOENT; } +#else /* !CONFIG_LIVEPATCH_FTRACE */ + if (!ret || ((long)func->old_size < 0)) { + pr_err("kallsyms size lookup failed for '%s'\n", + func->old_name); + return -ENOENT; + } + if (func->old_size < KLP_MAX_REPLACE_SIZE) { + pr_err("%s size less than limit (%lu < %zu)\n", func->old_name, + func->old_size, KLP_MAX_REPLACE_SIZE); + return -EINVAL; + } +#endif /* CONFIG_LIVEPATCH_FTRACE */ +#ifdef CONFIG_LIVEPATCH_FTRACE if (func->nop) func->new_func = func->old_func; - +#endif ret = kallsyms_lookup_size_offset((unsigned long)func->new_func, &func->new_size, NULL); if (!ret) { @@ -869,6 +992,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, return 0; } +#ifdef CONFIG_LIVEPATCH_FTRACE static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) { struct klp_func *func; @@ -899,12 +1023,18 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) return ret; } +#else /* !CONFIG_LIVEPATCH_FTRACE */ +static int klp_init_object(struct klp_patch *patch, struct klp_object *obj); +#endif /* CONFIG_LIVEPATCH_FTRACE */ static void klp_init_func_early(struct klp_object *obj, struct klp_func *func) { kobject_init(&func->kobj, &klp_ktype_func); list_add_tail(&func->node, &obj->func_list); +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + func->func_node = NULL; +#endif } static void klp_init_object_early(struct klp_patch *patch, @@ -913,6 +1043,9 @@ static void klp_init_object_early(struct klp_patch *patch, INIT_LIST_HEAD(&obj->func_list); kobject_init(&obj->kobj, &klp_ktype_object); list_add_tail(&obj->node, &patch->obj_list); +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + obj->mod = NULL; +#endif } static void klp_init_patch_early(struct klp_patch *patch) @@ -924,7 +1057,9 @@ static void klp_init_patch_early(struct klp_patch *patch) INIT_LIST_HEAD(&patch->obj_list); kobject_init(&patch->kobj, &klp_ktype_patch); patch->enabled = false; +#ifdef CONFIG_LIVEPATCH_FTRACE patch->forced = false; +#endif INIT_WORK(&patch->free_work, klp_free_patch_work_fn); init_completion(&patch->finish); @@ -946,11 +1081,13 @@ static int klp_init_patch(struct klp_patch *patch) if (ret) return ret; +#ifdef CONFIG_LIVEPATCH_FTRACE if (patch->replace) { ret = klp_add_nops(patch); if (ret) return ret; } +#endif klp_for_each_object(patch, obj) { ret = klp_init_object(patch, obj); @@ -958,11 +1095,45 @@ static int klp_init_patch(struct klp_patch *patch) return ret; } +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + flush_module_icache(patch->mod); + set_mod_klp_rel_state(patch->mod, MODULE_KLP_REL_DONE); + klp_module_disable_ro(patch->mod); + ret = jump_label_register(patch->mod); + if (ret) { + klp_module_enable_ro(patch->mod, true); + pr_err("register jump label failed, ret=%d\n", ret); + return ret; + } + ret = klp_static_call_register(patch->mod); + if (ret) { + /* + * We no need to distinctly clean pre-registered jump_label + * here because it will be clean at path: + * load_module + * do_init_module + * fail_free_freeinit: <-- notify GOING here + */ + klp_module_enable_ro(patch->mod, true); + pr_err("register static call failed, ret=%d\n", ret); + return ret; + } + klp_module_enable_ro(patch->mod, true); + + ret = check_address_conflict(patch); + if (ret) + return ret; + + klp_for_each_object(patch, obj) + klp_load_hook(obj); +#endif + list_add_tail(&patch->list, &klp_patches); return 0; } +#ifdef CONFIG_LIVEPATCH_FTRACE static int __klp_disable_patch(struct klp_patch *patch) { struct klp_object *obj; @@ -1093,12 +1264,14 @@ int klp_enable_patch(struct klp_patch *patch) mutex_lock(&klp_mutex); +#ifdef CONFIG_LIVEPATCH_FTRACE if (!klp_is_patch_compatible(patch)) { pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n", patch->mod->name); mutex_unlock(&klp_mutex); return -EINVAL; } +#endif /* CONFIG_LIVEPATCH_FTRACE */ if (!try_module_get(patch->mod)) { mutex_unlock(&klp_mutex); @@ -1323,4 +1496,1066 @@ static int __init klp_init(void) return 0; } +#else /* !CONFIG_LIVEPATCH_FTRACE */ + +struct patch_data { + struct klp_patch *patch; + atomic_t cpu_count; +}; + +static bool klp_is_patch_registered(struct klp_patch *patch) +{ + struct klp_patch *mypatch; + + list_for_each_entry(mypatch, &klp_patches, list) + if (mypatch == patch) + return true; + + return false; +} + +static int check_address_conflict(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + int ret; + void *start; + void *end; + + /* + * Locks seem required as comment of jump_label_text_reserved() said: + * Caller must hold jump_label_mutex. + * But looking into implementation of jump_label_text_reserved() and + * static_call_text_reserved(), call sites of every jump_label or static_call + * are checked, and they won't be changed after corresponding module inserted, + * so no need to take jump_label_lock and static_call_lock here. + */ + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + start = func->old_func; + end = start + KLP_MAX_REPLACE_SIZE - 1; + ret = jump_label_text_reserved(start, end); + if (ret) { + pr_err("'%s' has static key in first %zu bytes, ret=%d\n", + func->old_name, KLP_MAX_REPLACE_SIZE, ret); + return -EINVAL; + } + ret = static_call_text_reserved(start, end); + if (ret) { + pr_err("'%s' has static call in first %zu bytes, ret=%d\n", + func->old_name, KLP_MAX_REPLACE_SIZE, ret); + return -EINVAL; + } + } + } + return 0; +} + +static int state_show(struct seq_file *m, void *v) +{ + struct klp_patch *patch; + char *state; + int index = 0; + + seq_printf(m, "%-5s\t%-26s\t%-8s\n", "Index", "Patch", "State"); + seq_puts(m, "-----------------------------------------------\n"); + mutex_lock(&klp_mutex); + list_for_each_entry(patch, &klp_patches, list) { + if (patch->enabled) + state = "enabled"; + else + state = "disabled"; + + seq_printf(m, "%-5d\t%-26s\t%-8s\n", ++index, + patch->mod->name, state); + } + mutex_unlock(&klp_mutex); + seq_puts(m, "-----------------------------------------------\n"); + + return 0; +} + +static int klp_state_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, state_show, NULL); +} + +static const struct proc_ops proc_klpstate_operations = { + .proc_open = klp_state_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static inline int klp_load_hook(struct klp_object *obj) +{ + struct klp_hook *hook; + + if (!obj->hooks_load) + return 0; + + for (hook = obj->hooks_load; hook->hook; hook++) + (*hook->hook)(); + + return 0; +} + +static inline int klp_unload_hook(struct klp_object *obj) +{ + struct klp_hook *hook; + + if (!obj->hooks_unload) + return 0; + + for (hook = obj->hooks_unload; hook->hook; hook++) + (*hook->hook)(); + + return 0; +} + +static int klp_find_object_module(struct klp_object *obj) +{ + struct module *mod; + + if (!klp_is_module(obj)) + return 0; + + rcu_read_lock_sched(); + /* + * We do not want to block removal of patched modules and therefore + * we do not take a reference here. The patches are removed by + * klp_module_going() instead. + */ + mod = find_module(obj->name); + if (!mod) { + pr_err("module '%s' not loaded\n", obj->name); + rcu_read_unlock_sched(); + return -ENOPKG; /* the deponds module is not loaded */ + } + + if (mod->state == MODULE_STATE_COMING || !try_module_get(mod)) { + rcu_read_unlock_sched(); + return -EINVAL; + } + + obj->mod = mod; + + rcu_read_unlock_sched(); + return 0; +} + +static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) +{ + struct klp_func *func; + int ret; + const char *name; + + if (klp_is_module(obj) && strnlen(obj->name, MODULE_NAME_LEN) >= MODULE_NAME_LEN) { + pr_err("obj name is too long\n"); + return -EINVAL; + } + klp_for_each_func(obj, func) { + if (!func->old_name) { + pr_err("old name is invalid\n"); + return -EINVAL; + } + /* + * NOPs get the address later. The patched module must be loaded, + * see klp_init_object_loaded(). + */ + if (!func->new_func && !func->nop) { + pr_err("new_func is invalid\n"); + return -EINVAL; + } + if (strlen(func->old_name) >= KSYM_NAME_LEN) { + pr_err("function old name is too long\n"); + return -EINVAL; + } + } + + obj->patched = false; + obj->mod = NULL; + + ret = klp_find_object_module(obj); + if (ret) + return ret; + + name = klp_is_module(obj) ? obj->name : "vmlinux"; + ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name); + if (ret) + goto out; + + /* + * For livepatch without ftrace, we need to modify the first N + * instructions of the to-be-patched func. So should check if the + * func length enough to allow this modification. + * + * We add check hook in klp_init_func and will using the old_size + * internally, so the klp_init_object_loaded should called first + * to fill the klp_func struct. + */ + if (klp_is_object_loaded(obj)) { + ret = klp_init_object_loaded(patch, obj); + if (ret) + goto out; + } + + klp_for_each_func(obj, func) { + ret = klp_init_func(obj, func); + if (ret) + goto out; + } + + return 0; + +out: + if (klp_is_module(obj)) { + module_put(obj->mod); + obj->mod = NULL; + } + return ret; +} + +int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + return -EINVAL; +} + +bool __weak arch_check_jump_insn(unsigned long func_addr) +{ + return true; +} + +int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, + klp_add_func_t add_func, + struct list_head *func_list) +{ + int ret; + unsigned long func_addr = 0; + unsigned long func_size; + struct klp_func_node *func_node = NULL; + unsigned long old_func = (unsigned long)func->old_func; + + func_node = func->func_node; + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently active functions. + */ + if (list_empty(&func_node->func_stack)) { + /* + * Not patched on this function [the origin one] + */ + func_addr = old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function [the active one] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + /* + * When preemption is disabled and the replacement area + * does not contain a jump instruction, the migration + * thread is scheduled to run stop machine only after the + * execution of instructions to be replaced is complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + (func->force == KLP_NORMAL_FORCE) || + arch_check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + if (func_addr != old_func) { + ret = add_func(func_list, old_func, KLP_MAX_REPLACE_SIZE, + func->old_name, func->force); + if (ret) + return ret; + } + } + } else { +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement instructions. Therefore, + * when preemption is not enabled, atomic execution is performed + * and these instructions will not appear on the stack. + */ + if (list_is_singular(&func_node->func_stack)) { + func_addr = old_func; + func_size = func->old_size; + } else { + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + if (func_addr != old_func) { + ret = add_func(func_list, old_func, KLP_MAX_REPLACE_SIZE, + func->old_name, 0); + if (ret) + return ret; + } +#endif + + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + return 0; +} + +static inline unsigned long klp_size_to_check(unsigned long func_size, + int force) +{ + unsigned long size = func_size; + + if (force == KLP_STACK_OPTIMIZE && size > KLP_MAX_REPLACE_SIZE) + size = KLP_MAX_REPLACE_SIZE; + return size; +} + +struct actv_func { + struct list_head list; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; + +static bool check_func_list(void *data, int *ret, unsigned long pc) +{ + struct list_head *func_list = (struct list_head *)data; + struct actv_func *func = NULL; + + list_for_each_entry(func, func_list, list) { + *ret = klp_compare_address(pc, func->func_addr, func->func_name, + klp_size_to_check(func->func_size, func->force)); + if (*ret) + return false; + } + return true; +} + +static int add_func_to_list(struct list_head *func_list, unsigned long func_addr, + unsigned long func_size, const char *func_name, + int force) +{ + struct actv_func *func = kzalloc(sizeof(struct actv_func), GFP_ATOMIC); + + if (!func) + return -ENOMEM; + func->func_addr = func_addr; + func->func_size = func_size; + func->func_name = func_name; + func->force = force; + list_add_tail(&func->list, func_list); + return 0; +} + +static void free_func_list(struct list_head *func_list) +{ + struct actv_func *func = NULL; + struct actv_func *tmp = NULL; + + list_for_each_entry_safe(func, tmp, func_list, list) { + list_del(&func->list); + kfree(func); + } +} + +static int klp_check_activeness_func(struct klp_patch *patch, int enable, + struct list_head *func_list) +{ + int ret; + struct klp_object *obj = NULL; + struct klp_func *func = NULL; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = arch_klp_check_activeness_func(func, enable, + add_func_to_list, + func_list); + if (ret) + return ret; + } + } + return 0; +} + +static int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + LIST_HEAD(func_list); + + ret = klp_check_activeness_func(patch, enable, &func_list); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + + if (list_empty(&func_list)) + goto out; + + ret = arch_klp_check_calltrace(check_func_list, (void *)&func_list); + +out: + free_func_list(&func_list); + return ret; +} + +static LIST_HEAD(klp_func_list); + +/* + * The caller must ensure that the klp_mutex lock is held or is in the rcu read + * critical area. + */ +static struct klp_func_node *klp_find_func_node(const void *old_func) +{ + struct klp_func_node *func_node; + + list_for_each_entry_rcu(func_node, &klp_func_list, node, + lockdep_is_held(&klp_mutex)) { + if (func_node->old_func == old_func) + return func_node; + } + + return NULL; +} + +static void klp_add_func_node(struct klp_func_node *func_node) +{ + list_add_rcu(&func_node->node, &klp_func_list); +} + +static void klp_del_func_node(struct klp_func_node *func_node) +{ + list_del_rcu(&func_node->node); +} + +void __weak *arch_klp_mem_alloc(size_t size) +{ + return kzalloc(size, GFP_ATOMIC); +} + +void __weak arch_klp_mem_free(void *mem) +{ + kfree(mem); +} + +long __weak arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + return -EINVAL; +} + +static struct klp_func_node *func_node_alloc(struct klp_func *func) +{ + long ret; + struct klp_func_node *func_node = NULL; + + func_node = klp_find_func_node(func->old_func); + if (func_node) /* The old_func has ever been patched */ + return func_node; + func_node = arch_klp_mem_alloc(sizeof(struct klp_func_node)); + if (func_node) { + INIT_LIST_HEAD(&func_node->func_stack); + func_node->old_func = func->old_func; + /* + * Module which contains 'old_func' would not be removed because + * it's reference count has been held during registration. + * But it's not in stop_machine context here, 'old_func' should + * not be modified as saving old code. + */ + ret = arch_klp_save_old_code(&func_node->arch_data, func->old_func); + if (ret) { + arch_klp_mem_free(func_node); + pr_err("save old code failed, ret=%ld\n", ret); + return NULL; + } + klp_add_func_node(func_node); + } + return func_node; +} + +static void func_node_free(struct klp_func *func) +{ + struct klp_func_node *func_node; + + func_node = func->func_node; + if (func_node) { + func->func_node = NULL; + if (list_empty(&func_node->func_stack)) { + klp_del_func_node(func_node); + synchronize_rcu(); + arch_klp_mem_free(func_node); + } + } +} + +static void klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node_free(func); + } + } +} + +static int klp_mem_prepare(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func->func_node = func_node_alloc(func); + if (func->func_node == NULL) { + klp_mem_recycle(patch); + pr_err("alloc func_node failed\n"); + return -ENOMEM; + } + } + } + return 0; +} + +#ifdef CONFIG_LIVEPATCH_RESTRICT_KPROBE +/* + * Check whether a function has been registered with kprobes before patched. + * We can't patched this function util we unregistered the kprobes. + */ +static struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + struct kprobe *kp; + int i; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + for (i = 0; i < func->old_size; i++) { + kp = get_kprobe(func->old_func + i); + if (kp) { + pr_err("func %s has been probed, (un)patch failed\n", + func->old_name); + return kp; + } + } + } + } + + return NULL; +} +#else +static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) +{ + return NULL; +} +#endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */ + +void __weak arch_klp_unpatch_func(struct klp_func *func) +{ +} + +int __weak arch_klp_patch_func(struct klp_func *func) +{ + return -EINVAL; +} + +static void klp_unpatch_func(struct klp_func *func) +{ + if (WARN_ON(!func->patched)) + return; + if (WARN_ON(!func->old_func)) + return; + if (WARN_ON(!func->func_node)) + return; + + arch_klp_unpatch_func(func); + + func->patched = false; +} + +static inline int klp_patch_func(struct klp_func *func) +{ + int ret = 0; + + if (func->patched) + return 0; + if (WARN_ON(!func->old_func)) + return -EINVAL; + if (WARN_ON(!func->func_node)) + return -EINVAL; + + ret = arch_klp_patch_func(func); + if (!ret) + func->patched = true; + + return ret; +} + +static void klp_unpatch_object(struct klp_object *obj) +{ + struct klp_func *func; + + klp_for_each_func(obj, func) { + if (func->patched) + klp_unpatch_func(func); + } + obj->patched = false; +} + +static int klp_patch_object(struct klp_object *obj) +{ + struct klp_func *func; + int ret; + + if (obj->patched) + return 0; + + klp_for_each_func(obj, func) { + ret = klp_patch_func(func); + if (ret) { + klp_unpatch_object(obj); + return ret; + } + } + obj->patched = true; + + return 0; +} + +static void klp_unpatch_objects(struct klp_patch *patch) +{ + struct klp_object *obj; + + klp_for_each_object(patch, obj) + if (obj->patched) + klp_unpatch_object(obj); +} + +void __weak arch_klp_code_modify_prepare(void) +{ +} + +void __weak arch_klp_code_modify_post_process(void) +{ +} + +static int klp_stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) +{ + int ret; + + /* + * Cpu hotplug locking is a "percpu" rw semaphore, however write + * lock and read lock on it are globally mutual exclusive, that is + * cpus_write_lock() on one cpu can block all cpus_read_lock() + * on other cpus, vice versa. + * + * Since cpu hotplug take the cpus_write_lock() before text_mutex, + * here take cpus_read_lock() before text_mutex to avoid deadlock. + */ + cpus_read_lock(); + arch_klp_code_modify_prepare(); + ret = stop_machine_cpuslocked(fn, data, cpus); + arch_klp_code_modify_post_process(); + cpus_read_unlock(); + return ret; +} + +static int disable_patch(struct klp_patch *patch) +{ + pr_notice("disabling patch '%s'\n", patch->mod->name); + + klp_unpatch_objects(patch); + patch->enabled = false; + module_put(patch->mod); + return 0; +} + +static int klp_try_disable_patch(void *data) +{ + int ret = 0; + struct patch_data *pd = (struct patch_data *)data; + + if (atomic_inc_return(&pd->cpu_count) == 1) { + struct klp_patch *patch = pd->patch; + + if (klp_check_patch_kprobed(patch)) { + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + + ret = klp_check_calltrace(patch, 0); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + ret = disable_patch(patch); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + atomic_inc(&pd->cpu_count); + } else { + while (atomic_read(&pd->cpu_count) <= num_online_cpus()) + cpu_relax(); + + klp_smp_isb(); + } + + return ret; +} + +static int __klp_disable_patch(struct klp_patch *patch) +{ + int ret; + struct patch_data patch_data = { + .patch = patch, + .cpu_count = ATOMIC_INIT(0), + }; + + if (WARN_ON(!patch->enabled)) + return -EINVAL; + +#ifdef CONFIG_LIVEPATCH_STACK + /* enforce stacking: only the last enabled patch can be disabled */ + if (!list_is_last(&patch->list, &klp_patches) && + list_next_entry(patch, list)->enabled) { + pr_err("only the last enabled patch can be disabled\n"); + return -EBUSY; + } +#endif + + ret = klp_stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask); + if (ret) + return ret; + + klp_mem_recycle(patch); + return 0; +} + +/* + * This function is called from stop_machine() context. + */ +static int enable_patch(struct klp_patch *patch) +{ + struct klp_object *obj; + int ret; + + pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n"); + add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK); + + if (!patch->enabled) { + if (!try_module_get(patch->mod)) + return -ENODEV; + + patch->enabled = true; + + pr_notice("enabling patch '%s'\n", patch->mod->name); + } + + klp_for_each_object(patch, obj) { + if (!klp_is_object_loaded(obj)) + continue; + + ret = klp_patch_object(obj); + if (ret) { + pr_warn("failed to patch object '%s'\n", + klp_is_module(obj) ? obj->name : "vmlinux"); + goto disable; + } + } + + return 0; + +disable: + disable_patch(patch); + return ret; +} + +static int klp_try_enable_patch(void *data) +{ + int ret = 0; + struct patch_data *pd = (struct patch_data *)data; + + if (atomic_inc_return(&pd->cpu_count) == 1) { + struct klp_patch *patch = pd->patch; + + if (klp_check_patch_kprobed(patch)) { + atomic_inc(&pd->cpu_count); + return -EINVAL; + } + + ret = klp_check_calltrace(patch, 1); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + ret = enable_patch(patch); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + atomic_inc(&pd->cpu_count); + } else { + while (atomic_read(&pd->cpu_count) <= num_online_cpus()) + cpu_relax(); + + klp_smp_isb(); + } + + return ret; +} + +static int __klp_enable_patch(struct klp_patch *patch) +{ + int ret; + struct patch_data patch_data = { + .patch = patch, + .cpu_count = ATOMIC_INIT(0), + }; + + if (WARN_ON(patch->enabled)) + return -EINVAL; + +#ifdef CONFIG_LIVEPATCH_STACK + /* enforce stacking: only the first disabled patch can be enabled */ + if (patch->list.prev != &klp_patches && + !list_prev_entry(patch, list)->enabled) { + pr_err("only the first disabled patch can be enabled\n"); + return -EBUSY; + } +#endif + + ret = klp_mem_prepare(patch); + if (ret) + return ret; + + ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); + if (ret) + goto err_out; + +#ifndef CONFIG_LIVEPATCH_STACK + /* move the enabled patch to the list tail */ + list_del(&patch->list); + list_add_tail(&patch->list, &klp_patches); +#endif + + return 0; + +err_out: + klp_mem_recycle(patch); + return ret; +} + + +static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct klp_patch *patch; + int ret; + bool enabled; + + ret = kstrtobool(buf, &enabled); + if (ret) + return ret; + + patch = container_of(kobj, struct klp_patch, kobj); + + mutex_lock(&klp_mutex); + + if (!klp_is_patch_registered(patch)) { + /* + * Module with the patch could either disappear meanwhile or is + * not properly initialized yet. + */ + ret = -EINVAL; + goto out; + } + + if (patch->enabled == enabled) { + /* already in requested state */ + ret = -EINVAL; + goto out; + } + + if (enabled) + ret = __klp_enable_patch(patch); + else + ret = __klp_disable_patch(patch); + +out: + mutex_unlock(&klp_mutex); + + if (ret) + return ret; + return count; +} + +/** + * klp_register_patch() - registers a patch + * @patch: Patch to be registered + * + * Initializes the data structure associated with the patch and + * creates the sysfs interface. + * + * Return: 0 on success, otherwise error + */ +int klp_register_patch(struct klp_patch *patch) +{ + int ret; + struct klp_object *obj; + + if (!patch) { + pr_err("patch invalid\n"); + return -EINVAL; + } + if (!patch->mod) { + pr_err("patch->mod invalid\n"); + return -EINVAL; + } + if (!patch->objs) { + pr_err("patch->objs invalid\n"); + return -EINVAL; + } + + klp_for_each_object_static(patch, obj) { + if (!obj->funcs) { + pr_err("obj->funcs invalid\n"); + return -EINVAL; + } + } + + if (!is_livepatch_module(patch->mod)) { + pr_err("module %s is not marked as a livepatch module\n", + patch->mod->name); + return -EINVAL; + } + + if (!klp_initialized()) { + pr_err("kernel live patch not available\n"); + return -ENODEV; + } + + mutex_lock(&klp_mutex); + + if (klp_is_patch_registered(patch)) { + mutex_unlock(&klp_mutex); + return -EINVAL; + } + + klp_init_patch_early(patch); + + ret = klp_init_patch(patch); + if (ret) + goto err; + + mutex_unlock(&klp_mutex); + + return 0; + +err: + klp_free_patch_start(patch); + + mutex_unlock(&klp_mutex); + + kobject_put(&patch->kobj); + wait_for_completion(&patch->finish); + + return ret; +} +EXPORT_SYMBOL_GPL(klp_register_patch); + +/** + * klp_unregister_patch() - unregisters a patch + * @patch: Disabled patch to be unregistered + * + * Frees the data structures and removes the sysfs interface. + * + * Return: 0 on success, otherwise error + */ +int klp_unregister_patch(struct klp_patch *patch) +{ + int ret = 0; + struct klp_object *obj; + + mutex_lock(&klp_mutex); + + if (!klp_is_patch_registered(patch)) { + ret = -EINVAL; + goto out; + } + + if (patch->enabled) { + ret = -EBUSY; + goto out; + } + + klp_for_each_object(patch, obj) + klp_unload_hook(obj); + + klp_free_patch_start(patch); + + mutex_unlock(&klp_mutex); + + kobject_put(&patch->kobj); + wait_for_completion(&patch->finish); + + return 0; +out: + mutex_unlock(&klp_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(klp_unregister_patch); + +static int __init klp_init(void) +{ + struct proc_dir_entry *root_klp_dir, *res; + + root_klp_dir = proc_mkdir("livepatch", NULL); + if (!root_klp_dir) + goto error_out; + + res = proc_create("livepatch/state", 0, NULL, + &proc_klpstate_operations); + if (!res) + goto error_remove; + + klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); + if (!klp_root_kobj) + goto error_remove_state; + + return 0; + +error_remove_state: + remove_proc_entry("livepatch/state", NULL); +error_remove: + remove_proc_entry("livepatch", NULL); +error_out: + return -ENOMEM; +} + +#endif /* CONFIG_LIVEPATCH_FTRACE */ + module_init(klp_init); diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h index 38209c7361b65b9549bff0ea93bf9c45cc29db45..20f91623912515430c608eef2332afaaf02d7263 100644 --- a/kernel/livepatch/core.h +++ b/kernel/livepatch/core.h @@ -13,16 +13,19 @@ extern struct list_head klp_patches; #define klp_for_each_patch(patch) \ list_for_each_entry(patch, &klp_patches, list) +#ifdef CONFIG_LIVEPATCH_FTRACE void klp_free_patch_async(struct klp_patch *patch); void klp_free_replaced_patches_async(struct klp_patch *new_patch); void klp_unpatch_replaced_patches(struct klp_patch *new_patch); void klp_discard_nops(struct klp_patch *new_patch); +#endif /* CONFIG_LIVEPATCH_FTRACE */ static inline bool klp_is_object_loaded(struct klp_object *obj) { return !obj->name || obj->mod; } +#ifdef CONFIG_LIVEPATCH_FTRACE static inline int klp_pre_patch_callback(struct klp_object *obj) { int ret = 0; @@ -55,5 +58,6 @@ static inline void klp_post_unpatch_callback(struct klp_object *obj) obj->callbacks.post_unpatch_enabled = false; } +#endif /* CONFIG_LIVEPATCH_FTRACE */ #endif /* _LIVEPATCH_CORE_H */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 98fedfdb8db52fc358c2d3838ce9fd8a35bea068..4f15920352290b08fbec7a08db8ef499f5d538b1 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1918,9 +1918,17 @@ static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned l static int check_modinfo_livepatch(struct module *mod, struct load_info *info) { +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (!get_modinfo(info, "livepatch")) { + set_mod_klp_rel_state(mod, MODULE_KLP_REL_NONE); + return 0; + } + set_mod_klp_rel_state(mod, MODULE_KLP_REL_UNDO); +#else /* !CONFIG_LIVEPATCH_WO_FTRACE */ if (!get_modinfo(info, "livepatch")) /* Nothing more to do */ return 0; +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ if (set_livepatch_module(mod)) return 0; @@ -2309,7 +2317,7 @@ static int check_export_symbol_versions(struct module *mod) return 0; } -static void flush_module_icache(const struct module *mod) +void flush_module_icache(const struct module *mod) { /* * Flush the instruction cache, since we've played with text. diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c index a2b656b4e3d2bec27c7cf5541b8a126904f36fc1..397e18f50517577c743281e279a675ddae7843f8 100644 --- a/kernel/module/strict_rwx.c +++ b/kernel/module/strict_rwx.c @@ -32,6 +32,23 @@ void module_enable_x(const struct module *mod) module_set_memory(mod, type, set_memory_x); } +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +void module_disable_ro(const struct module *mod) +{ + if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + return; +#ifdef CONFIG_STRICT_MODULE_RWX + if (!rodata_enabled) + return; +#endif + + module_set_memory(mod, MOD_TEXT, set_memory_rw); + module_set_memory(mod, MOD_INIT_TEXT, set_memory_rw); + module_set_memory(mod, MOD_RODATA, set_memory_rw); + module_set_memory(mod, MOD_INIT_RODATA, set_memory_rw); +} +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + void module_enable_ro(const struct module *mod, bool after_init) { if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c index 639397b5491ca0ff6eabf18d1431dd2ab9036686..6d0e1f185e02a11a0c95a7ce06781b3ccbfaeced 100644 --- a/kernel/static_call_inline.c +++ b/kernel/static_call_inline.c @@ -367,6 +367,11 @@ static int static_call_add_module(struct module *mod) struct static_call_site *stop = start + mod->num_static_call_sites; struct static_call_site *site; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (unlikely(!mod_klp_rel_completed(mod))) + return 0; +#endif + for (site = start; site != stop; site++) { unsigned long s_key = __static_call_key(site); unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS; @@ -409,6 +414,11 @@ static void static_call_del_module(struct module *mod) struct static_call_mod *site_mod, **prev; struct static_call_site *site; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (unlikely(!mod_klp_rel_completed(mod))) + return; +#endif + for (site = start; site < stop; site++) { key = static_call_key(site); if (key == prev_key) @@ -461,6 +471,16 @@ static struct notifier_block static_call_module_nb = { .notifier_call = static_call_module_notify, }; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +int klp_static_call_register(struct module *mod) +{ + int ret; + + ret = static_call_module_notify(&static_call_module_nb, MODULE_STATE_COMING, mod); + return notifier_to_errno(ret); +} +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + #else static inline int __static_call_mod_text_reserved(void *start, void *end) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index cee4d3f7582097b426730170f0caa6d93465e64e..7241c304cb5c7e4463a60195e172e2a8660c3467 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2875,7 +2875,7 @@ config TEST_LIVEPATCH tristate "Test livepatching" default n depends on DYNAMIC_DEBUG - depends on LIVEPATCH + depends on LIVEPATCH_FTRACE depends on m help Test kernel livepatching features for correctness. The tests will diff --git a/samples/livepatch/Makefile b/samples/livepatch/Makefile index 9f853eeb61404cf24981993a2f08162a773f3ecc..1e384d50c73f5a26fb7e2cda7d7ef0ff501662ef 100644 --- a/samples/livepatch/Makefile +++ b/samples/livepatch/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o +ifeq ($(CONFIG_LIVEPATCH_FTRACE), y) obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o +endif diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c index cd76d7ebe59859c9f6eeb5cb5b72390b072bc633..6649e7fb91496b904a90df8f0317e41dd0abab28 100644 --- a/samples/livepatch/livepatch-sample.c +++ b/samples/livepatch/livepatch-sample.c @@ -30,6 +30,31 @@ */ #include + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +void load_hook(void) +{ + pr_info("load_hook\n"); +} + +void unload_hook(void) +{ + pr_info("unload_hook\n"); +} + +static struct klp_hook hooks_load[] = { + { + .hook = load_hook + }, { } +}; + +static struct klp_hook hooks_unload[] = { + { + .hook = unload_hook + }, { } +}; +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + static int livepatch_cmdline_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%s\n", "this has been live patched"); @@ -47,6 +72,10 @@ static struct klp_object objs[] = { { /* name being NULL means vmlinux */ .funcs = funcs, +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + .hooks_load = hooks_load, + .hooks_unload = hooks_unload, +#endif }, { } }; @@ -57,11 +86,18 @@ static struct klp_patch patch = { static int livepatch_init(void) { +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + return klp_register_patch(&patch); +#else return klp_enable_patch(&patch); +#endif } static void livepatch_exit(void) { +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + WARN_ON(klp_unregister_patch(&patch)); +#endif } module_init(livepatch_init); diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h index 65c0d9ce1e295bbe4dc25a6c8c6f8aff09203c7e..c77831f518be3f5f1f81ac02051e9a79f788456b 100644 --- a/tools/arch/x86/include/asm/insn.h +++ b/tools/arch/x86/include/asm/insn.h @@ -180,6 +180,13 @@ static inline int insn_has_emulate_prefix(struct insn *insn) return !!insn->emulate_prefix_size; } +/* Ensure this instruction is decoded completely */ +static inline int insn_complete(struct insn *insn) +{ + return insn->opcode.got && insn->modrm.got && insn->sib.got && + insn->displacement.got && insn->immediate.got; +} + static inline insn_byte_t insn_vex_m_bits(struct insn *insn) { if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c index 8fd63a067308a83ea9fe44085db306c555a1288d..47a6f1e8972d943146236f212a8613023e79d123 100644 --- a/tools/arch/x86/lib/insn.c +++ b/tools/arch/x86/lib/insn.c @@ -715,13 +715,6 @@ int insn_get_length(struct insn *insn) return 0; } -/* Ensure this instruction is decoded completely */ -static inline int insn_complete(struct insn *insn) -{ - return insn->opcode.got && insn->modrm.got && insn->sib.got && - insn->displacement.got && insn->immediate.got; -} - /** * insn_decode() - Decode an x86 instruction * @insn: &struct insn to be initialized diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64 index 253821494884835ee4b06b7f8fceefd816f9ee28..279df18b29c4e670c9421d6aca4d5a8d660a6912 100644 --- a/tools/testing/selftests/bpf/config.aarch64 +++ b/tools/testing/selftests/bpf/config.aarch64 @@ -90,6 +90,7 @@ CONFIG_KRETPROBES=y CONFIG_KSM=y CONFIG_LATENCYTOP=y CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_FTRACE=y CONFIG_LOCK_STAT=y CONFIG_MACVLAN=y CONFIG_MACVTAP=y diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x index 2ba92167be35857114820025236c24fe99bd610b..90fc5ba62fe2279436becdbb091c1bdbc1b520b7 100644 --- a/tools/testing/selftests/bpf/config.s390x +++ b/tools/testing/selftests/bpf/config.s390x @@ -73,6 +73,7 @@ CONFIG_KRETPROBES=y CONFIG_KSM=y CONFIG_LATENCYTOP=y CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_FTRACE=y CONFIG_LOCK_STAT=y CONFIG_MACVLAN=y CONFIG_MACVTAP=y diff --git a/tools/testing/selftests/livepatch/README b/tools/testing/selftests/livepatch/README index 0942dd5826f87d546b804d78a0b82f14329f6240..40a2596e65c2b2513c754b0a867d42f758781849 100644 --- a/tools/testing/selftests/livepatch/README +++ b/tools/testing/selftests/livepatch/README @@ -16,6 +16,7 @@ Config Set these config options and their prerequisites: CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_FTRACE=y CONFIG_TEST_LIVEPATCH=m diff --git a/tools/testing/selftests/livepatch/config b/tools/testing/selftests/livepatch/config index ad23100cb27c84da75727efb714244487008c046..ee54bc7e631e22c5975ebe9f62278b50c55ed3ea 100644 --- a/tools/testing/selftests/livepatch/config +++ b/tools/testing/selftests/livepatch/config @@ -1,3 +1,4 @@ CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_FTRACE=y CONFIG_DYNAMIC_DEBUG=y CONFIG_TEST_LIVEPATCH=m