diff --git a/arch/Kconfig b/arch/Kconfig index a4ed5d338dadf8ad635a51f5d2e2881c4307935f..dd7b91f19edcfdff94d51adf6e785d52dee6abe2 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1574,12 +1574,12 @@ config FAST_IRQ framework for latency-sensitive interrupts. config DEBUG_FEATURE_BYPASS - bool "Bypass debug feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ + bool "Bypass debug feature in fast syscall/irq and hardware xcall" + depends on FAST_SYSCALL || FAST_IRQ || ARCH_SUPPORTS_XCALL depends on !LOCKDEP default y help - This to bypass debug feature in fast syscall. + This to bypass debug feature in fast syscall/irq and hardware xcall. The svc exception handling process, which includes auxiliary functions for debug/trace and core functions like KPTI, has been identified as overly "lengthy". @@ -1587,12 +1587,12 @@ config DEBUG_FEATURE_BYPASS Disable this config to keep debug feature in fast syscall. config SECURITY_FEATURE_BYPASS - bool "Bypass security feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ + bool "Bypass security feature in fast syscall and hardware xcall" + depends on FAST_SYSCALL || FAST_IRQ || ARCH_SUPPORTS_XCALL default y help - This to bypass security feature in fast syscall. - The svc exception handling process, which includes auxiliary + This to bypass security feature in fast syscall/irq and hardware + xcall. The svc exception handling process, which includes auxiliary functions for debug/trace and core functions like KPTI, has been identified as overly "lengthy". In fast syscall we only considers necessary features. diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9da9d58f1c024086f5b26c02526d0404585f4a51..14b818fce7a1e4f29f0db184e0371b77725a1e60 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1717,6 +1717,18 @@ config ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG config ARCH_SUPPORTS_CRASH_DUMP def_bool y +config ARCH_SUPPORTS_XINT + bool "Hardware xint support" + default n + depends on ARM64_NMI + depends on ARM_GIC_V3 + depends on !COMPAT + +config ARCH_SUPPORTS_XCALL + bool "Hardware xcall support" + depends on !COMPAT + default n + config TRANS_TABLE def_bool y depends on HIBERNATION || KEXEC_CORE diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 435634a703c6a5d6d384b2075c999fef13413ba7..f97b2b400266b89b14a664b0e4727a156915d66f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -269,6 +269,13 @@ #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) + +#define ACTLR_ELx_XCALL_SHIFT 20 +#define ACTLR_ELx_XCALL (BIT(ACTLR_ELx_XCALL_SHIFT)) + +#define ACTLR_ELx_XINT_SHIFT 21 +#define ACTLR_ELx_XINT (BIT(ACTLR_ELx_XINT_SHIFT)) + #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a1736e9044dad67b67158a0c2e4c8340a8ab9974..055eb16309e2294635119811cf13c8bc4692fc2a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2406,8 +2406,12 @@ static bool has_xcall_support(const struct arm64_cpu_capabilities *entry, int __ } #endif -#ifdef CONFIG_FAST_IRQ +#if defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XINT) bool is_xint_support; +bool hw_xint_support; +#endif + +#ifdef CONFIG_FAST_IRQ static int __init xint_setup(char *str) { if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS)) @@ -2424,6 +2428,89 @@ static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __u } #endif +#if defined(CONFIG_ARCH_SUPPORTS_XINT) || defined(CONFIG_ARCH_SUPPORTS_XCALL) +static bool test_has_xfunc(bool is_xint) +{ + u64 new, old = read_sysreg(actlr_el1); + + if (is_xint) + write_sysreg(old | ACTLR_ELx_XINT, actlr_el1); + else + write_sysreg(old | ACTLR_ELx_XCALL, actlr_el1); + + isb(); + new = read_sysreg(actlr_el1); + if (is_xint && (new & ACTLR_ELx_XINT)) { + write_sysreg(old, actlr_el1); + hw_xint_support = true; + return true; + } + + if (!is_xint && (new & ACTLR_ELx_XCALL)) { + write_sysreg(old, actlr_el1); + return true; + } + + return false; +} + +static void enable_xfunc(bool is_xint) +{ + u64 actlr_el1, actlr_el2; + u64 el; + + el = read_sysreg(CurrentEL); + if (el == CurrentEL_EL2) { + actlr_el2 = read_sysreg(actlr_el2); + actlr_el2 |= (is_xint ? ACTLR_ELx_XINT : ACTLR_ELx_XCALL); + write_sysreg(actlr_el2, actlr_el2); + isb(); + actlr_el2 = read_sysreg(actlr_el2); + pr_info("actlr_el2: %llx, cpu:%d\n", actlr_el2, smp_processor_id()); + } + + actlr_el1 = read_sysreg(actlr_el1); + actlr_el1 |= (is_xint ? ACTLR_ELx_XINT : ACTLR_ELx_XCALL); + write_sysreg(actlr_el1, actlr_el1); + isb(); + actlr_el1 = read_sysreg(actlr_el1); + pr_info("actlr_el1: %llx, cpu:%d\n", actlr_el1, smp_processor_id()); +} +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_XINT +static bool test_has_xint(const struct arm64_cpu_capabilities *entry, int scope) +{ + if (!IS_ENABLED(CONFIG_ARM64_NMI)) + pr_info("CONFIG_ARM64_NMI disabled, using XINTs for guests only\n"); +#ifdef CONFIG_ARM64_PSEUDO_NMI + else if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) { + pr_info("Pseudo NMI enabled, not using architected XINT\n"); + return false; + } +#endif + + return test_has_xfunc(true); +} + +static void xint_enable(const struct arm64_cpu_capabilities *__unused) +{ + enable_xfunc(true); +} +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_XCALL +static bool test_has_xcall(const struct arm64_cpu_capabilities *entry, int scope) +{ + return test_has_xfunc(false); +} + +static void xcall_enable(const struct arm64_cpu_capabilities *__unused) +{ + enable_xfunc(false); +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -2971,6 +3058,24 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_xint_support, }, +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT + { + .desc = "Hardware xint Support", + .capability = ARM64_HAS_HW_XINT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = test_has_xint, + .cpu_enable = xint_enable, + }, +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + { + .desc = "Hardware xcall Support", + .capability = ARM64_HAS_HW_XCALL, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = test_has_xcall, + .cpu_enable = xcall_enable, + }, #endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 4602c107c40a4cd651d50b9952b9c2bb8c43aa28..ea78eb729ff6ea22fd11a2d840c3bc4bc19b54d4 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -6,6 +6,9 @@ */ #include +#ifdef CONFIG_ARCH_SUPPORTS_XINT +#include +#endif #include #include #include @@ -151,7 +154,7 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) exit_to_user_mode(regs); } -#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_FAST_IRQ) +#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XCALL) /* * Copy from exit_to_user_mode_prepare */ @@ -607,7 +610,7 @@ static void noinstr el0_xint(struct pt_regs *regs, u64 nmi_flag, } -asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_sw_xint_handler(struct pt_regs *regs) { el0_xint(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); } @@ -815,7 +818,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) exit_to_user_mode(regs); } -#ifdef CONFIG_FAST_SYSCALL +#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_ARCH_SUPPORTS_XCALL) /* Copy from el0_sync */ static void noinstr el0_xcall(struct pt_regs *regs) { @@ -966,6 +969,30 @@ asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) __el0_error_handler_common(regs); } +#ifdef CONFIG_ARCH_SUPPORTS_XINT +asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +{ + u32 irqnr = read_sysreg_s(SYS_ICC_HPPIR1_EL1); + + if (gic_irqnr_is_special(irqnr)) + return; + + if (is_xint(irqnr)) + fast_handle_xint(regs, irqnr); + else + el0t_64_irq_handler(regs); +} +#else +#ifdef CONFIG_AARCH32_EL0 +asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} +#else /* CONFIG_AARCH32_EL0 */ +UNHANDLED(el0t, 32, irq) +#endif +#endif + #ifdef CONFIG_AARCH32_EL0 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) { @@ -1028,11 +1055,6 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) } } -asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) -{ - __el0_irq_handler_common(regs); -} - asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) { __el0_fiq_handler_common(regs); @@ -1044,7 +1066,6 @@ asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) } #else /* CONFIG_AARCH32_EL0 */ UNHANDLED(el0t, 32, sync) -UNHANDLED(el0t, 32, irq) UNHANDLED(el0t, 32, fiq) UNHANDLED(el0t, 32, error) #endif /* CONFIG_AARCH32_EL0 */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index da3809632f0f43d7b1a006133ff6de2a585f4961..23c1b255e02eb0c3e47c6458e338cc44c6658818 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -564,8 +564,16 @@ SYM_CODE_START(vectors) kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 kernel_ventry 0, t, 64, error // Error 64-bit EL0 +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + kernel_ventry 0, t, 64, xcall // xcall synchronous 64-bit EL0 +#else kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT + kernel_ventry 0, t, 64, xint // XINT 64-bit EL0 +#else kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 +#endif kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) @@ -696,7 +704,7 @@ SYM_CODE_END(__bad_stack) kernel_entry 0, 64 #endif mov x0, sp - bl el0t_64_xint_handler + bl el0t_64_sw_xint_handler #ifdef CONFIG_SECURITY_FEATURE_BYPASS kernel_exit 0, xint #else @@ -718,7 +726,19 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) b .Lret_to_kernel_entry\@ alternative_else_nop_endif check_xcall_pre_kernel_entry - .Lret_to_kernel_entry\@: +.Lret_to_kernel_entry\@: + .endif +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + .if \el == 0 && \regsize == 64 && \label == xcall + alternative_if_not ARM64_HAS_HW_XCALL + b .Lskip_hw_xcall\@ + alternative_else_nop_endif + kernel_entry 0, 64, xcall + mov x0, sp + bl el0t_64_xcall_handler + kernel_exit 0, xcall +.Lskip_hw_xcall\@: .endif #endif #ifdef CONFIG_FAST_IRQ @@ -729,6 +749,18 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) check_xint_pre_kernel_entry .Lskip_check_xint\@: .endif +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT + .if \el == 0 && \regsize == 64 && \label == xint + alternative_if_not ARM64_HAS_HW_XINT + b .Lskip_hw_xint\@ + alternative_else_nop_endif + kernel_entry 0, 64, xint + mov x0, sp + bl el0t_64_xint_handler + kernel_exit 0, xint +.Lskip_hw_xint\@: + .endif #endif kernel_entry \el, \regsize mov x0, sp @@ -759,8 +791,16 @@ SYM_CODE_END(el\el\ht\()_\regsize\()_\label) entry_handler 0, t, 64, fiq entry_handler 0, t, 64, error +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + entry_handler 0, t, 64, xcall +#else entry_handler 0, t, 32, sync +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT + entry_handler 0, t, 64, xint +#else entry_handler 0, t, 32, irq +#endif entry_handler 0, t, 32, fiq entry_handler 0, t, 32, error @@ -905,7 +945,20 @@ alternative_else_nop_endif .rept 4 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb .endr - .rept 4 + +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb +#else + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_XINT + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb +#else + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb +#endif + + .rept 2 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb .endr .endm @@ -955,7 +1008,20 @@ SYM_CODE_END(tramp_exit) .rept 4 tramp_ventry .Lvector_start\@, 64, 0, \bhb .endr - .rept 4 + +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + tramp_ventry .Lvector_start\@, 64, 0, \bhb +#else + tramp_ventry .Lvector_start\@, 32, 0, \bhb +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_XINT + tramp_ventry .Lvector_start\@, 64, 0, \bhb +#else + tramp_ventry .Lvector_start\@, 32, 0, \bhb +#endif + + .rept 2 tramp_ventry .Lvector_start\@, 32, 0, \bhb .endr .endm diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index f2ddced689b5f37f4c8c7d86a12e14fa76f32261..3a6e38f45618ca636a878505aab6614d6d255739 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -110,8 +110,8 @@ WORKAROUND_HISI_HIP08_RU_PREFETCH WORKAROUND_HISILICON_1980005 HAS_XCALL HAS_XINT -KABI_RESERVE_3 -KABI_RESERVE_4 +HAS_HW_XCALL +HAS_HW_XINT KABI_RESERVE_5 KABI_RESERVE_6 KABI_RESERVE_7 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 0af1eed9ad351dbe62823f8ca0e5163f694e11d6..41861be3dcf5cf794c098a075e2548507a543102 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -35,7 +35,7 @@ #include "irq-gic-common.h" -#ifdef CONFIG_FAST_IRQ +#if defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XINT) #include "../../../kernel/irq/internals.h" #endif @@ -828,7 +828,7 @@ static bool gic_rpr_is_nmi_prio(void) return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); } -static bool gic_irqnr_is_special(u32 irqnr) +bool gic_irqnr_is_special(u32 irqnr) { return irqnr >= 1020 && irqnr <= 1023; } @@ -993,7 +993,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs __gic_handle_irq_from_irqson(regs); } -#ifdef CONFIG_FAST_IRQ +#ifdef CONFIG_ARCH_SUPPORTS_XINT +DECLARE_BITMAP(irqnr_nmi_map, 1024); +#endif + +#if defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XINT) DECLARE_BITMAP(irqnr_xint_map, 1024); static bool can_set_xint(unsigned int hwirq) @@ -1002,12 +1006,18 @@ static bool can_set_xint(unsigned int hwirq) __get_intid_range(hwirq) == SPI_RANGE) return true; +#ifdef CONFIG_ARCH_SUPPORTS_XINT + if (hw_xint_support && __get_intid_range(hwirq) == PPI_RANGE) + return true; +#endif + return false; } static bool xint_transform(int irqno, enum xint_op op) { struct irq_data *data = irq_get_irq_data(irqno); + struct irq_desc *desc; int hwirq; while (data->parent_data) @@ -1018,14 +1028,29 @@ static bool xint_transform(int irqno, enum xint_op op) if (!can_set_xint(hwirq)) return false; + desc = irq_data_to_desc(data); + switch (op) { case IRQ_TO_XINT: set_bit(hwirq, irqnr_xint_map); xint_add_debugfs_entry(irqno); +#ifdef CONFIG_ARCH_SUPPORTS_XINT + if (has_v3_3_nmi() && hw_xint_support && !irq_is_nmi(desc)) { + gic_irq_enable_nmi(data); + set_bit(hwirq, irqnr_nmi_map); + } +#endif return true; case XINT_TO_IRQ: clear_bit(hwirq, irqnr_xint_map); xint_remove_debugfs_entry(irqno); +#ifdef CONFIG_ARCH_SUPPORTS_XINT + if (has_v3_3_nmi() && hw_xint_support && irq_is_nmi(desc) && + test_bit(hwirq, irqnr_nmi_map)) { + gic_irq_disable_nmi(data); + clear_bit(hwirq, irqnr_nmi_map); + } +#endif return false; case XINT_SET_CHECK: return test_bit(hwirq, irqnr_xint_map); @@ -1096,7 +1121,7 @@ static const struct proc_ops xint_proc_ops = { void register_irqchip_proc(struct irq_desc *desc, void *irqp) { - if (!is_xint_support) + if (!is_xint_support && !hw_xint_support) return; /* create /proc/irq//xint */ @@ -1105,12 +1130,63 @@ void register_irqchip_proc(struct irq_desc *desc, void *irqp) void unregister_irqchip_proc(struct irq_desc *desc) { - if (!is_xint_support) + if (!is_xint_support && !hw_xint_support) return; remove_proc_entry("xint", desc->dir); } -#endif /* CONFIG_FAST_IRQ */ +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_XINT +bool is_xint(unsigned long hwirq) +{ + return test_bit(hwirq, irqnr_xint_map); +} + +static bool is_spi(unsigned long hwirq) +{ + if (__get_intid_range(hwirq) == SPI_RANGE || + __get_intid_range(hwirq) == ESPI_RANGE) + return true; + + return false; +} + +void fast_handle_xint(struct pt_regs *regs, u32 irqnr) +{ + struct pt_regs *old_regs; + struct irq_domain *domain; + struct irqaction *action; + struct irq_desc *desc; + struct irq_data *data; + + arch_nmi_enter(); + BUG_ON(in_nmi() == NMI_MASK); + __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); + old_regs = set_irq_regs(regs); + + domain = irq_get_default_host(); + data = radix_tree_lookup(&domain->revmap_tree, irqnr); + + desc = irq_data_to_desc(data); + action = desc->action; + + gic_read_nmiar(); + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + + if (is_spi(irqnr)) + action->handler(data->irq, action->dev_id); + else + action->handler(data->irq, raw_cpu_ptr(action->percpu_dev_id)); + gic_write_dir(irqnr); + + set_irq_regs(old_regs); + BUG_ON(!in_nmi()); + __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); + arch_nmi_exit(); +} +#endif static u32 gic_get_pribits(void) { @@ -2358,6 +2434,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base, goto out_free; } + irq_set_default_host(gic_data.domain); irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.has_rss = !!(typer & GICD_TYPER_RSS); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 548b8a5c46cfcd383a3b1304742d28506b267671..3c0f04b86c153dd761abb5c46ae5b596bd4eb4e7 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -721,8 +721,9 @@ static inline enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) } } -#ifdef CONFIG_FAST_IRQ +#if defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XINT) extern bool is_xint_support; +extern bool hw_xint_support; enum xint_op { XINT_TO_IRQ, @@ -733,6 +734,9 @@ enum xint_op { void register_irqchip_proc(struct irq_desc *desc, void *irqp); void unregister_irqchip_proc(struct irq_desc *desc); +bool gic_irqnr_is_special(u32 irqnr); +bool is_xint(unsigned long hwirq); +void fast_handle_xint(struct pt_regs *regs, u32 irqnr); #endif #endif diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index dc94c360b54bb0023bed8f8d64daaa046a4e8e66..2152125f8ae275ef7073a18120cf01b51650dc8b 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -242,7 +242,7 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) &dfs_irq_ops); } -#ifdef CONFIG_FAST_IRQ +#if defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XINT) static struct dentry *xint_dir; void xint_add_debugfs_entry(unsigned int irq) @@ -281,8 +281,8 @@ static int __init irq_debugfs_init(void) irq_dir = debugfs_create_dir("irqs", root_dir); -#ifdef CONFIG_FAST_IRQ - if (is_xint_support) +#if defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XINT) + if (is_xint_support || hw_xint_support) xint_dir = debugfs_create_dir("xints", root_dir); #endif diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 2340441d2a7227628e23f1dedca229ab19009052..46cef0c2cbb497467f77adc30f9c337d841925cd 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -278,6 +278,11 @@ static inline int irq_desc_is_chained(struct irq_desc *desc) return (desc->action && desc->action == &chained_action); } +static inline bool irq_is_nmi(struct irq_desc *desc) +{ + return desc->istate & IRQS_NMI; +} + #ifdef CONFIG_PM_SLEEP bool irq_pm_check_wakeup(struct irq_desc *desc); void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); @@ -498,8 +503,9 @@ static inline void irq_remove_debugfs_entry(struct irq_desc *desc) kfree(desc->dev_name); } -#ifdef CONFIG_FAST_IRQ +#if defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XINT) extern bool is_xint_support; +extern bool hw_xint_support; void xint_add_debugfs_entry(unsigned int irq); void xint_remove_debugfs_entry(unsigned int irq); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 46094f0c9fcdadbc1be510b25df25baec36c0c5c..913c523d472d4c0b854f7fd5b41afe597085029f 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -960,11 +960,6 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; } -static bool irq_is_nmi(struct irq_desc *desc) -{ - return desc->istate & IRQS_NMI; -} - static unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 8a936c1ffad39076f28f6d11ccd48f3a6a649ca3..3bc7eec6e85976735ff155f1b9d067ca89553fe6 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -560,7 +560,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) /* The release function is promised process context */ might_sleep(); - if (!desc || desc->istate & IRQS_NMI) + if (!desc || irq_is_nmi(desc)) return -EINVAL; /* Complete initialisation of *notify */ @@ -898,7 +898,7 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on) return -EINVAL; /* Don't use NMIs as wake up interrupts please */ - if (desc->istate & IRQS_NMI) { + if (irq_is_nmi(desc)) { ret = -EINVAL; goto out_unlock; } @@ -1627,7 +1627,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ unsigned int oldtype; - if (desc->istate & IRQS_NMI) { + if (irq_is_nmi(desc)) { pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", new->name, irq, desc->irq_data.chip->name); ret = -EINVAL; @@ -2080,7 +2080,7 @@ const void *free_nmi(unsigned int irq, void *dev_id) unsigned long flags; const void *devname; - if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) + if (!desc || WARN_ON(!irq_is_nmi(desc))) return NULL; if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) @@ -2546,7 +2546,7 @@ void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) if (!desc || !irq_settings_is_per_cpu_devid(desc)) return; - if (WARN_ON(!(desc->istate & IRQS_NMI))) + if (WARN_ON(!irq_is_nmi(desc))) return; kfree(__free_percpu_irq(irq, dev_id)); @@ -2682,7 +2682,7 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, return -EINVAL; /* The line cannot already be NMI */ - if (desc->istate & IRQS_NMI) + if (irq_is_nmi(desc)) return -EINVAL; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); @@ -2743,7 +2743,7 @@ int prepare_percpu_nmi(unsigned int irq) if (!desc) return -EINVAL; - if (WARN(!(desc->istate & IRQS_NMI), + if (WARN(!irq_is_nmi(desc), KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", irq)) { ret = -EINVAL; @@ -2785,7 +2785,7 @@ void teardown_percpu_nmi(unsigned int irq) if (!desc) return; - if (WARN_ON(!(desc->istate & IRQS_NMI))) + if (WARN_ON(!irq_is_nmi(desc))) goto out; irq_nmi_teardown(desc); diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 5f2c66860ac64f244fef067b8306ac49cbbb6492..b07a2d732ffbcb4e24a1b6cb5c348a157bdc4c45 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -190,7 +190,7 @@ int irq_inject_interrupt(unsigned int irq) * - not NMI type * - activated */ - if ((desc->istate & IRQS_NMI) || !irqd_is_activated(&desc->irq_data)) + if (irq_is_nmi(desc) || !irqd_is_activated(&desc->irq_data)) err = -EINVAL; else err = check_irq_resend(desc, true);