diff --git a/arch/Kconfig b/arch/Kconfig index 98116fbfcff667a7ab8cb4a7d112fef45ff52e85..a9d7c5099b96a7539fe5f95b1594a2c12eee4661 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1573,6 +1573,7 @@ config FAST_IRQ config DEBUG_FEATURE_BYPASS bool "Bypass debug feature in fast syscall" depends on FAST_SYSCALL || FAST_IRQ + depends on !LOCKDEP default y help This to bypass debug feature in fast syscall. diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 60300e133d5f9060966a814553bfa23b0e4be74c..4602c107c40a4cd651d50b9952b9c2bb8c43aa28 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -151,6 +151,62 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) exit_to_user_mode(regs); } +#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_FAST_IRQ) +/* + * Copy from exit_to_user_mode_prepare + */ +static __always_inline void fast_exit_to_user_mode_prepare(struct pt_regs *regs) +{ + unsigned long flags; + + local_daif_mask(); + + flags = read_thread_flags(); + if (unlikely(flags & _TIF_WORK_MASK)) + do_notify_resume(regs, flags); + +#ifndef CONFIG_DEBUG_FEATURE_BYPASS + lockdep_sys_exit(); +#endif +} + +/* Copy from __exit_to_user_mode */ +static __always_inline void __fast_exit_to_user_mode(void) +{ +#ifndef CONFIG_DEBUG_FEATURE_BYPASS + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); +#endif + user_enter_irqoff(); +#ifndef CONFIG_DEBUG_FEATURE_BYPASS + lockdep_hardirqs_on(CALLER_ADDR0); +#endif +} + +static __always_inline void fast_exit_to_user_mode(struct pt_regs *regs) +{ + fast_exit_to_user_mode_prepare(regs); +#ifndef CONFIG_DEBUG_FEATURE_BYPASS + mte_check_tfsr_exit(); +#endif + __fast_exit_to_user_mode(); +} + +/* Copy from __enter_from_user_mode */ +static __always_inline void fast_enter_from_user_mode(struct pt_regs *regs) +{ +#ifndef CONFIG_DEBUG_FEATURE_BYPASS + lockdep_hardirqs_off(CALLER_ADDR0); +#endif + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); +#ifndef CONFIG_DEBUG_FEATURE_BYPASS + trace_hardirqs_off_finish(); + mte_disable_tco_entry(current); +#endif +} +#endif + /* * Handle IRQ/context state management when entering an NMI from user/kernel * mode. Before this function is called it is not safe to call regular kernel @@ -513,80 +569,12 @@ static __always_inline void __el1_pnmi(struct pt_regs *regs, } #ifdef CONFIG_FAST_IRQ -static __always_inline void __el1_xint(struct pt_regs *regs, - void (*handler)(struct pt_regs *)) -{ -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - enter_from_kernel_mode(regs); -#endif - - xint_enter_rcu(); - do_interrupt_handler(regs, handler); - xint_exit_rcu(); - - arm64_preempt_schedule_irq(); - -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - exit_to_kernel_mode(regs); -#endif -} - -static void noinstr el1_xint(struct pt_regs *regs, u64 nmi_flag, - void (*handler)(struct pt_regs *), - void (*nmi_handler)(struct pt_regs *)) -{ - /* Is there a NMI to handle? */ -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - if (system_uses_nmi() && (read_sysreg(isr_el1) & nmi_flag)) { - __el1_nmi(regs, nmi_handler); - return; - } -#endif - - write_sysreg(DAIF_PROCCTX_NOIRQ, daif); - - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) - __el1_pnmi(regs, handler); - else - __el1_xint(regs, handler); -} - -asmlinkage void noinstr el1h_64_xint_handler(struct pt_regs *regs) -{ - el1_xint(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); -} - -static __always_inline void xint_exit_to_user_mode_prepare(struct pt_regs *regs) -{ - unsigned long flags; - - local_daif_mask(); - - flags = read_thread_flags(); - if (unlikely(flags & _TIF_WORK_MASK)) - do_notify_resume(regs, flags); - -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - lockdep_sys_exit(); -#endif -} - -static __always_inline void xint_exit_to_user_mode(struct pt_regs *regs) -{ - xint_exit_to_user_mode_prepare(regs); -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - mte_check_tfsr_exit(); - __exit_to_user_mode(); -#endif -} - static void noinstr el0_xint(struct pt_regs *regs, u64 nmi_flag, void (*handler)(struct pt_regs *), void (*nmi_handler)(struct pt_regs *)) { + fast_enter_from_user_mode(regs); #ifndef CONFIG_DEBUG_FEATURE_BYPASS - enter_from_user_mode(regs); - /* Is there a NMI to handle? */ if (system_uses_nmi() && (read_sysreg(isr_el1) & nmi_flag)) { /* @@ -615,7 +603,7 @@ static void noinstr el0_xint(struct pt_regs *regs, u64 nmi_flag, do_interrupt_handler(regs, handler); xint_exit_rcu(); - xint_exit_to_user_mode(regs); + fast_exit_to_user_mode(regs); } @@ -828,46 +816,17 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) } #ifdef CONFIG_FAST_SYSCALL -/* - * Copy from exit_to_user_mode_prepare - */ -static __always_inline void xcall_exit_to_user_mode_prepare(struct pt_regs *regs) -{ - unsigned long flags; - - local_daif_mask(); - - flags = read_thread_flags(); - if (unlikely(flags & _TIF_WORK_MASK)) - do_notify_resume(regs, flags); - -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - lockdep_sys_exit(); -#endif -} - -static __always_inline void xcall_exit_to_user_mode(struct pt_regs *regs) -{ - xcall_exit_to_user_mode_prepare(regs); -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - mte_check_tfsr_exit(); - __exit_to_user_mode(); -#endif -} - /* Copy from el0_sync */ static void noinstr el0_xcall(struct pt_regs *regs) { -#ifndef CONFIG_DEBUG_FEATURE_BYPASS - enter_from_user_mode(regs); -#endif + fast_enter_from_user_mode(regs); #ifndef CONFIG_SECURITY_FEATURE_BYPASS cortex_a76_erratum_1463225_svc_handler(); #endif fp_user_discard(); local_daif_restore(DAIF_PROCCTX); do_el0_svc(regs); - xcall_exit_to_user_mode(regs); + fast_exit_to_user_mode(regs); } asmlinkage void noinstr el0t_64_xcall_handler(struct pt_regs *regs) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e9fd3094623ffa8439bebbbe0456557d48f857df..da3809632f0f43d7b1a006133ff6de2a585f4961 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -660,7 +660,7 @@ SYM_CODE_END(__bad_stack) #endif #ifdef CONFIG_FAST_IRQ -.macro check_xint_pre_kernel_entry el:req, ht:req +.macro check_xint_pre_kernel_entry stp x0, x1, [sp, #0] stp x2, x3, [sp, #16] @@ -691,20 +691,16 @@ SYM_CODE_END(__bad_stack) ldp x0, x1, [sp, #0] ldp x2, x3, [sp, #16] #ifdef CONFIG_SECURITY_FEATURE_BYPASS - kernel_entry \el, 64, xint + kernel_entry 0, 64, xint #else - kernel_entry \el, 64 + kernel_entry 0, 64 #endif mov x0, sp - bl el\el\ht\()_64_xint_handler + bl el0t_64_xint_handler #ifdef CONFIG_SECURITY_FEATURE_BYPASS - kernel_exit \el, xint + kernel_exit 0, xint #else - .if \el == 0 b ret_to_user - .else - b ret_to_kernel - .endif #endif .Lskip_xint\@: @@ -726,11 +722,11 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) .endif #endif #ifdef CONFIG_FAST_IRQ - .if \regsize == 64 && \label == irq && (( \el == 0 && \ht == t) || (\el == 1 && \ht == h)) + .if \regsize == 64 && \label == irq && \el == 0 && \ht == t alternative_if_not ARM64_HAS_XINT b .Lskip_check_xint\@ alternative_else_nop_endif - check_xint_pre_kernel_entry \el, \ht + check_xint_pre_kernel_entry .Lskip_check_xint\@: .endif #endif diff --git a/kernel/softirq.c b/kernel/softirq.c index 6dc0ea5baf0666c2c02795d14081726a791731c8..cd8770b2f76cca6ed3bd5877ef15db0770c6400d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -669,11 +669,13 @@ void xint_enter_rcu(void) preempt_count_add(HARDIRQ_OFFSET); #ifndef CONFIG_DEBUG_FEATURE_BYPASS lockdep_hardirq_enter(); +#endif if (tick_nohz_full_cpu(smp_processor_id()) || (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))) tick_irq_enter(); +#ifndef CONFIG_DEBUG_FEATURE_BYPASS account_hardirq_enter(current); #endif } @@ -695,9 +697,7 @@ static inline void __xint_exit_rcu(void) if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); -#ifndef CONFIG_DEBUG_FEATURE_BYPASS tick_irq_exit(); -#endif } void xint_exit_rcu(void)