From 860367c555928eee929d871463bc738ad97d585c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Fri, 8 Aug 2025 15:38:52 +0000 Subject: [PATCH 01/15] riscv: add SBI SSE extension definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #27060 cherry-picked from https://patch.msgid.link/20251105082639.342973-2-cleger@rivosinc.com ------------------ Add needed definitions for SBI Supervisor Software Events extension [1]. This extension enables the SBI to inject events into supervisor software much like ARM SDEI. [1] https://lists.riscv.org/g/tech-prs/message/515 Signed-off-by: Clément Léger Link: https://patch.msgid.link/20251105082639.342973-2-cleger@rivosinc.com Signed-off-by: Paul Walmsley Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- arch/riscv/include/asm/sbi.h | 61 ++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 0fab508a65b3..e70f4af72541 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -33,6 +33,7 @@ enum sbi_ext_id { SBI_EXT_PMU = 0x504D55, SBI_EXT_DBCN = 0x4442434E, SBI_EXT_STA = 0x535441, + SBI_EXT_SSE = 0x535345, /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START = 0x08000000, @@ -258,6 +259,66 @@ enum sbi_ext_sta_fid { SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0, }; +enum sbi_ext_sse_fid { + SBI_SSE_EVENT_ATTR_READ = 0, + SBI_SSE_EVENT_ATTR_WRITE, + SBI_SSE_EVENT_REGISTER, + SBI_SSE_EVENT_UNREGISTER, + SBI_SSE_EVENT_ENABLE, + SBI_SSE_EVENT_DISABLE, + SBI_SSE_EVENT_COMPLETE, + SBI_SSE_EVENT_SIGNAL, + SBI_SSE_EVENT_HART_UNMASK, + SBI_SSE_EVENT_HART_MASK, +}; + +enum sbi_sse_state { + SBI_SSE_STATE_UNUSED = 0, + SBI_SSE_STATE_REGISTERED = 1, + SBI_SSE_STATE_ENABLED = 2, + SBI_SSE_STATE_RUNNING = 3, +}; + +/* SBI SSE Event Attributes. */ +enum sbi_sse_attr_id { + SBI_SSE_ATTR_STATUS = 0x00000000, + SBI_SSE_ATTR_PRIO = 0x00000001, + SBI_SSE_ATTR_CONFIG = 0x00000002, + SBI_SSE_ATTR_PREFERRED_HART = 0x00000003, + SBI_SSE_ATTR_ENTRY_PC = 0x00000004, + SBI_SSE_ATTR_ENTRY_ARG = 0x00000005, + SBI_SSE_ATTR_INTERRUPTED_SEPC = 0x00000006, + SBI_SSE_ATTR_INTERRUPTED_FLAGS = 0x00000007, + SBI_SSE_ATTR_INTERRUPTED_A6 = 0x00000008, + SBI_SSE_ATTR_INTERRUPTED_A7 = 0x00000009, + + SBI_SSE_ATTR_MAX = 0x0000000A +}; + +#define SBI_SSE_ATTR_STATUS_STATE_OFFSET 0 +#define SBI_SSE_ATTR_STATUS_STATE_MASK 0x3 +#define SBI_SSE_ATTR_STATUS_PENDING_OFFSET 2 +#define SBI_SSE_ATTR_STATUS_INJECT_OFFSET 3 + +#define SBI_SSE_ATTR_CONFIG_ONESHOT BIT(0) + +#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP BIT(0) +#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE BIT(1) +#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV BIT(2) +#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP BIT(3) + +#define SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS 0x00000000 +#define SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP 0x00000001 +#define SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS 0x00008000 +#define SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW 0x00010000 +#define SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS 0x00100000 +#define SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS 0x00108000 +#define SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED 0xffff0000 +#define SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED 0xffff8000 + +#define SBI_SSE_EVENT_PLATFORM BIT(14) +#define SBI_SSE_EVENT_GLOBAL BIT(15) + struct sbi_sta_struct { __le32 sequence; __le32 flags; -- Gitee From 3b7516fe502c388a968e35f6aa60da3a3994e1fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 27 Aug 2025 09:19:07 +0800 Subject: [PATCH 02/15] riscv: add support for SBI Supervisor Software Events extension MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #27060 cherry-picked from https://patch.msgid.link/20251105082639.342973-3-cleger@rivosinc.com ------------------ The SBI SSE extension allows the supervisor software to be notified by the SBI of specific events that are not maskable. The context switch is handled partially by the firmware which will save registers a6 and a7. When entering kernel we can rely on these 2 registers to setup the stack and save all the registers. Since SSE events can be delivered at any time to the kernel (including during exception handling, we need a way to locate the current_task for context tracking. On RISC-V, it is sotred in scratch when in user space or tp when in kernel space (in which case SSCRATCH is zero). But at a at the beginning of exception handling, SSCRATCH is used to swap tp and check the origin of the exception. If interrupted at that point, then, there is no way to reliably know were is located the current task_struct. Even checking the interruption location won't work as SSE event can be nested on top of each other so the original interruption site might be lost at some point. In order to retrieve it reliably, store the current task in an additional __sse_entry_task per_cpu array. This array is then used to retrieve the current task based on the hart ID that is passed to the SSE event handler in a6. That being said, the way the current task struct is stored should probably be reworked to find a better reliable alternative. Since each events (and each CPU for local events) have their own context and can preempt each other, allocate a stack (and a shadow stack if needed for each of them (and for each cpu for local events). When completing the event, if we were coming from kernel with interrupts disabled, simply return there. If coming from userspace or kernel with interrupts enabled, simulate an interrupt exception by setting IE_SIE in CSR_IP to allow delivery of signals to user task. For instance this can happen, when a RAS event has been generated by a user application and a SIGBUS has been sent to a task. Signed-off-by: Clément Léger Link: https://patch.msgid.link/20251105082639.342973-3-cleger@rivosinc.com [pjw@kernel.org: cleaned up patch description and whitespace] Signed-off-by: Paul Walmsley [Lu Peng: fix up backport conficts] Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- arch/riscv/include/asm/asm.h | 13 +- arch/riscv/include/asm/sse.h | 47 ++++++ arch/riscv/include/asm/switch_to.h | 14 ++ arch/riscv/include/asm/thread_info.h | 1 + arch/riscv/kernel/Makefile | 1 + arch/riscv/kernel/asm-offsets.c | 15 ++ arch/riscv/kernel/sse.c | 154 ++++++++++++++++++++ arch/riscv/kernel/sse_entry.S | 204 +++++++++++++++++++++++++++ 8 files changed, 446 insertions(+), 3 deletions(-) create mode 100644 arch/riscv/include/asm/sse.h create mode 100644 arch/riscv/kernel/sse.c create mode 100644 arch/riscv/kernel/sse_entry.S diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index b5b84c6be01e..c0fd904e0a8f 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -89,16 +89,23 @@ #define PER_CPU_OFFSET_SHIFT 3 #endif -.macro asm_per_cpu dst sym tmp - REG_L \tmp, TASK_TI_CPU_NUM(tp) - slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT +.macro asm_per_cpu_with_cpu dst sym tmp cpu + slli \tmp, \cpu, PER_CPU_OFFSET_SHIFT la \dst, __per_cpu_offset add \dst, \dst, \tmp REG_L \tmp, 0(\dst) la \dst, \sym add \dst, \dst, \tmp .endm +.macro asm_per_cpu dst sym tmp + lw \tmp, TASK_TI_CPU_NUM(tp) + asm_per_cpu_with_cpu \dst \sym \tmp \tmp +.endm #else /* CONFIG_SMP */ +.macro asm_per_cpu_with_cpu dst sym tmp cpu + la \dst, \sym +.endm + .macro asm_per_cpu dst sym tmp la \dst, \sym .endm diff --git a/arch/riscv/include/asm/sse.h b/arch/riscv/include/asm/sse.h new file mode 100644 index 000000000000..8929a268462c --- /dev/null +++ b/arch/riscv/include/asm/sse.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Rivos Inc. + */ +#ifndef __ASM_SSE_H +#define __ASM_SSE_H + +#include + +#ifdef CONFIG_RISCV_SSE + +struct sse_event_interrupted_state { + unsigned long a6; + unsigned long a7; +}; + +struct sse_event_arch_data { + void *stack; + void *shadow_stack; + unsigned long tmp; + struct sse_event_interrupted_state interrupted; + unsigned long interrupted_phys; + u32 evt_id; + unsigned int hart_id; + unsigned int cpu_id; +}; + +static inline bool sse_event_is_global(u32 evt) +{ + return !!(evt & SBI_SSE_EVENT_GLOBAL); +} + +void arch_sse_event_update_cpu(struct sse_event_arch_data *arch_evt, int cpu); +int arch_sse_init_event(struct sse_event_arch_data *arch_evt, u32 evt_id, + int cpu); +void arch_sse_free_event(struct sse_event_arch_data *arch_evt); +int arch_sse_register_event(struct sse_event_arch_data *arch_evt); + +void sse_handle_event(struct sse_event_arch_data *arch_evt, + struct pt_regs *regs); +asmlinkage void handle_sse(void); +asmlinkage void do_sse(struct sse_event_arch_data *arch_evt, + struct pt_regs *reg); + +#endif + +#endif diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 4bec66f30f27..20e6fffb0974 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -102,6 +102,19 @@ static inline void __switch_to_envcfg(struct task_struct *next) :: "r" (next->thread.envcfg) : "memory"); } +#ifdef CONFIG_RISCV_SSE +DECLARE_PER_CPU(struct task_struct *, __sse_entry_task); + +static inline void __switch_sse_entry_task(struct task_struct *next) +{ + __this_cpu_write(__sse_entry_task, next); +} +#else +static inline void __switch_sse_entry_task(struct task_struct *next) +{ +} +#endif + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); @@ -114,6 +127,7 @@ do { \ if (has_vector()) \ __switch_to_vector(__prev, __next); \ __switch_to_envcfg(__next); \ + __switch_sse_entry_task(__next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index a9d93a4ee09b..59357bb8ffb3 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -35,6 +35,7 @@ #define OVERFLOW_STACK_SIZE SZ_4K #define IRQ_STACK_SIZE THREAD_SIZE +#define SSE_STACK_SIZE THREAD_SIZE #ifndef __ASSEMBLY__ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 7e7a7a83c969..6f45a3890eb5 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -84,6 +84,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o obj-$(CONFIG_RISCV_SBI) += sbi.o +obj-$(CONFIG_RISCV_SSE) += sse.o sse_entry.o ifeq ($(CONFIG_RISCV_SBI), y) obj-$(CONFIG_SMP) += sbi-ipi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 8c66184be58e..bfa99b5ac897 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include void asm_offsets(void); @@ -485,4 +487,17 @@ void asm_offsets(void) OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr); OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr); OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr); + +#ifdef CONFIG_RISCV_SSE + OFFSET(SSE_REG_EVT_STACK, sse_event_arch_data, stack); + OFFSET(SSE_REG_EVT_SHADOW_STACK, sse_event_arch_data, shadow_stack); + OFFSET(SSE_REG_EVT_TMP, sse_event_arch_data, tmp); + OFFSET(SSE_REG_HART_ID, sse_event_arch_data, hart_id); + OFFSET(SSE_REG_CPU_ID, sse_event_arch_data, cpu_id); + + DEFINE(SBI_EXT_SSE, SBI_EXT_SSE); + DEFINE(SBI_SSE_EVENT_COMPLETE, SBI_SSE_EVENT_COMPLETE); + #define ASM_MAX_CPUS NR_CPUS + DEFINE(ASM_NR_CPUS, ASM_MAX_CPUS); +#endif } diff --git a/arch/riscv/kernel/sse.c b/arch/riscv/kernel/sse.c new file mode 100644 index 000000000000..d2da7e23a74a --- /dev/null +++ b/arch/riscv/kernel/sse.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2024 Rivos Inc. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +DEFINE_PER_CPU(struct task_struct *, __sse_entry_task); + +void __weak sse_handle_event(struct sse_event_arch_data *arch_evt, struct pt_regs *regs) +{ +} + +void do_sse(struct sse_event_arch_data *arch_evt, struct pt_regs *regs) +{ + nmi_enter(); + + /* Retrieve missing GPRs from SBI */ + sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_READ, arch_evt->evt_id, + SBI_SSE_ATTR_INTERRUPTED_A6, + (SBI_SSE_ATTR_INTERRUPTED_A7 - SBI_SSE_ATTR_INTERRUPTED_A6) + 1, + arch_evt->interrupted_phys, 0, 0); + + memcpy(®s->a6, &arch_evt->interrupted, sizeof(arch_evt->interrupted)); + + sse_handle_event(arch_evt, regs); + + /* + * The SSE delivery path does not uses the "standard" exception path + * (see sse_entry.S) and does not process any pending signal/softirqs + * due to being similar to a NMI. + * Some drivers (PMU, RAS) enqueue pending work that needs to be handled + * as soon as possible by bottom halves. For that purpose, set the SIP + * software interrupt pending bit which will force a software interrupt + * to be serviced once interrupts are reenabled in the interrupted + * context if they were masked or directly if unmasked. + */ + csr_set(CSR_IP, IE_SIE); + + nmi_exit(); +} + +static void *alloc_to_stack_pointer(void *alloc) +{ + return alloc ? alloc + SSE_STACK_SIZE : NULL; +} + +static void *stack_pointer_to_alloc(void *stack) +{ + return stack - SSE_STACK_SIZE; +} + +#ifdef CONFIG_VMAP_STACK +static void *sse_stack_alloc(unsigned int cpu) +{ + void *stack = arch_alloc_vmap_stack(SSE_STACK_SIZE, cpu_to_node(cpu)); + + return alloc_to_stack_pointer(stack); +} + +static void sse_stack_free(void *stack) +{ + vfree(stack_pointer_to_alloc(stack)); +} +#else /* CONFIG_VMAP_STACK */ +static void *sse_stack_alloc(unsigned int cpu) +{ + void *stack = kmalloc(SSE_STACK_SIZE, GFP_KERNEL); + + return alloc_to_stack_pointer(stack); +} + +static void sse_stack_free(void *stack) +{ + kfree(stack_pointer_to_alloc(stack)); +} +#endif /* CONFIG_VMAP_STACK */ + +static int sse_init_scs(int cpu, struct sse_event_arch_data *arch_evt) +{ + void *stack; + + if (!scs_is_enabled()) + return 0; + + stack = scs_alloc(cpu_to_node(cpu)); + if (!stack) + return -ENOMEM; + + arch_evt->shadow_stack = stack; + + return 0; +} + +void arch_sse_event_update_cpu(struct sse_event_arch_data *arch_evt, int cpu) +{ + arch_evt->cpu_id = cpu; + arch_evt->hart_id = cpuid_to_hartid_map(cpu); +} + +int arch_sse_init_event(struct sse_event_arch_data *arch_evt, u32 evt_id, int cpu) +{ + void *stack; + + arch_evt->evt_id = evt_id; + stack = sse_stack_alloc(cpu); + if (!stack) + return -ENOMEM; + + arch_evt->stack = stack; + + if (sse_init_scs(cpu, arch_evt)) { + sse_stack_free(arch_evt->stack); + return -ENOMEM; + } + + if (sse_event_is_global(evt_id)) { + arch_evt->interrupted_phys = + virt_to_phys(&arch_evt->interrupted); + } else { + arch_evt->interrupted_phys = + per_cpu_ptr_to_phys(&arch_evt->interrupted); + } + + arch_sse_event_update_cpu(arch_evt, cpu); + + return 0; +} + +void arch_sse_free_event(struct sse_event_arch_data *arch_evt) +{ + scs_free(arch_evt->shadow_stack); + sse_stack_free(arch_evt->stack); +} + +int arch_sse_register_event(struct sse_event_arch_data *arch_evt) +{ + struct sbiret sret; + + sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_REGISTER, arch_evt->evt_id, + (unsigned long)handle_sse, (unsigned long)arch_evt, 0, + 0, 0); + + return sbi_err_map_linux_errno(sret.error); +} diff --git a/arch/riscv/kernel/sse_entry.S b/arch/riscv/kernel/sse_entry.S new file mode 100644 index 000000000000..3837a22b6e4f --- /dev/null +++ b/arch/riscv/kernel/sse_entry.S @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Rivos Inc. + */ + +#include +#include + +#include +#include +#include + +/* When entering handle_sse, the following registers are set: + * a6: contains the hartid + * a7: contains a sse_event_arch_data struct pointer + */ + +#ifdef CONFIG_SHADOW_CALL_STACK +/* gp is used as the shadow call stack pointer instead */ +.macro load_global_pointer +.endm + +/* Load the per-CPU SSE shadow call stack to gp. */ +.macro scs_load_sse_stack reg_evt + REG_L gp, SSE_REG_EVT_SHADOW_STACK(\reg_evt) +.endm +#else +/* load __global_pointer to gp */ +.macro load_global_pointer +.option push +.option norelax + la gp, __global_pointer$ +.option pop +.endm + +/* Don't load shadow stack if SCS is disabled */ +.macro scs_load_sse_stack reg_evt +.endm +#endif /* CONFIG_SHADOW_CALL_STACK */ + +SYM_CODE_START(handle_sse) + /* Save stack temporarily */ + REG_S sp, SSE_REG_EVT_TMP(a7) + /* Set entry stack */ + REG_L sp, SSE_REG_EVT_STACK(a7) + + addi sp, sp, -(PT_SIZE_ON_STACK) + REG_S ra, PT_RA(sp) + REG_S s0, PT_S0(sp) + REG_S s1, PT_S1(sp) + REG_S s2, PT_S2(sp) + REG_S s3, PT_S3(sp) + REG_S s4, PT_S4(sp) + REG_S s5, PT_S5(sp) + REG_S s6, PT_S6(sp) + REG_S s7, PT_S7(sp) + REG_S s8, PT_S8(sp) + REG_S s9, PT_S9(sp) + REG_S s10, PT_S10(sp) + REG_S s11, PT_S11(sp) + REG_S tp, PT_TP(sp) + REG_S t0, PT_T0(sp) + REG_S t1, PT_T1(sp) + REG_S t2, PT_T2(sp) + REG_S t3, PT_T3(sp) + REG_S t4, PT_T4(sp) + REG_S t5, PT_T5(sp) + REG_S t6, PT_T6(sp) + REG_S gp, PT_GP(sp) + REG_S a0, PT_A0(sp) + REG_S a1, PT_A1(sp) + REG_S a2, PT_A2(sp) + REG_S a3, PT_A3(sp) + REG_S a4, PT_A4(sp) + REG_S a5, PT_A5(sp) + + /* Retrieve entry sp */ + REG_L a4, SSE_REG_EVT_TMP(a7) + /* Save CSRs */ + csrr a0, CSR_EPC + csrr a1, CSR_SSTATUS + csrr a2, CSR_STVAL + csrr a3, CSR_SCAUSE + + REG_S a0, PT_EPC(sp) + REG_S a1, PT_STATUS(sp) + REG_S a2, PT_BADADDR(sp) + REG_S a3, PT_CAUSE(sp) + REG_S a4, PT_SP(sp) + + /* Disable user memory access and floating/vector computing */ + li t0, SR_SUM | SR_FS_VS + csrc CSR_STATUS, t0 + + load_global_pointer + scs_load_sse_stack a7 + + /* Restore current task struct from __sse_entry_task */ + li t1, ASM_NR_CPUS + mv t3, zero + +#ifdef CONFIG_SMP + REG_L t4, SSE_REG_HART_ID(a7) + REG_L t3, SSE_REG_CPU_ID(a7) + + bne t4, a6, .Lfind_hart_id_slowpath + +.Lcpu_id_found: +#endif + asm_per_cpu_with_cpu t2 __sse_entry_task t1 t3 + REG_L tp, 0(t2) + + mv a1, sp /* pt_regs on stack */ + + /* + * Save sscratch for restoration since we might have interrupted the + * kernel in early exception path and thus, we don't know the content of + * sscratch. + */ + csrr s4, CSR_SSCRATCH + /* In-kernel scratch is 0 */ + csrw CSR_SCRATCH, x0 + + mv a0, a7 + + call do_sse + + csrw CSR_SSCRATCH, s4 + + REG_L a0, PT_STATUS(sp) + REG_L a1, PT_EPC(sp) + REG_L a2, PT_BADADDR(sp) + REG_L a3, PT_CAUSE(sp) + csrw CSR_SSTATUS, a0 + csrw CSR_EPC, a1 + csrw CSR_STVAL, a2 + csrw CSR_SCAUSE, a3 + + REG_L ra, PT_RA(sp) + REG_L s0, PT_S0(sp) + REG_L s1, PT_S1(sp) + REG_L s2, PT_S2(sp) + REG_L s3, PT_S3(sp) + REG_L s4, PT_S4(sp) + REG_L s5, PT_S5(sp) + REG_L s6, PT_S6(sp) + REG_L s7, PT_S7(sp) + REG_L s8, PT_S8(sp) + REG_L s9, PT_S9(sp) + REG_L s10, PT_S10(sp) + REG_L s11, PT_S11(sp) + REG_L tp, PT_TP(sp) + REG_L t0, PT_T0(sp) + REG_L t1, PT_T1(sp) + REG_L t2, PT_T2(sp) + REG_L t3, PT_T3(sp) + REG_L t4, PT_T4(sp) + REG_L t5, PT_T5(sp) + REG_L t6, PT_T6(sp) + REG_L gp, PT_GP(sp) + REG_L a0, PT_A0(sp) + REG_L a1, PT_A1(sp) + REG_L a2, PT_A2(sp) + REG_L a3, PT_A3(sp) + REG_L a4, PT_A4(sp) + REG_L a5, PT_A5(sp) + + REG_L sp, PT_SP(sp) + + li a7, SBI_EXT_SSE + li a6, SBI_SSE_EVENT_COMPLETE + ecall + +#ifdef CONFIG_SMP +.Lfind_hart_id_slowpath: + +/* Slowpath to find the CPU id associated to the hart id */ +la t0, __cpuid_to_hartid_map + +.Lhart_id_loop: + REG_L t2, 0(t0) + beq t2, a6, .Lcpu_id_found + + /* Increment pointer and CPU number */ + addi t3, t3, 1 + addi t0, t0, RISCV_SZPTR + bltu t3, t1, .Lhart_id_loop + + /* + * This should never happen since we expect the hart_id to match one + * of our CPU, but better be safe than sorry + */ + la tp, init_task + la a0, sse_hart_id_panic_string + la t0, panic + jalr t0 + +#endif + +SYM_CODE_END(handle_sse) + +SYM_DATA_START_LOCAL(sse_hart_id_panic_string) + .ascii "Unable to match hart_id with cpu\0" +SYM_DATA_END(sse_hart_id_panic_string) -- Gitee From 29770f3bca58574ac6818ef50ececa3fc488c3f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 27 Aug 2025 09:40:00 +0800 Subject: [PATCH 03/15] drivers: firmware: add riscv SSE support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #27060 cherry-picked from https://patch.msgid.link/20251105082639.342973-4-cleger@rivosinc.com -------------- Add driver level interface to use RISC-V SSE arch support. This interface allows registering SSE handlers, and receive them. This will be used by PMU and GHES driver. Co-developed-by: Himanshu Chauhan Signed-off-by: Himanshu Chauhan Signed-off-by: Clément Léger Acked-by: Conor Dooley Link: https://patch.msgid.link/20251105082639.342973-4-cleger@rivosinc.com Signed-off-by: Paul Walmsley Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- MAINTAINERS | 15 + drivers/firmware/Kconfig | 1 + drivers/firmware/Makefile | 1 + drivers/firmware/riscv/Kconfig | 15 + drivers/firmware/riscv/Makefile | 3 + drivers/firmware/riscv/riscv_sse.c | 694 +++++++++++++++++++++++++++++ include/linux/riscv_sse.h | 58 +++ 7 files changed, 787 insertions(+) create mode 100644 drivers/firmware/riscv/Kconfig create mode 100644 drivers/firmware/riscv/Makefile create mode 100644 drivers/firmware/riscv/riscv_sse.c create mode 100644 include/linux/riscv_sse.h diff --git a/MAINTAINERS b/MAINTAINERS index bd9daeb31fe6..041c261ee1f0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18509,6 +18509,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git F: Documentation/devicetree/bindings/iommu/riscv,iommu.yaml F: drivers/iommu/riscv/ +RISC-V FIRMWARE DRIVERS +M: Conor Dooley +L: linux-riscv@lists.infradead.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git +F: drivers/firmware/riscv/* + RISC-V MICROCHIP FPGA SUPPORT M: Conor Dooley M: Daire McNamara @@ -18559,6 +18566,14 @@ F: drivers/perf/riscv_pmu.c F: drivers/perf/riscv_pmu_legacy.c F: drivers/perf/riscv_pmu_sbi.c +RISC-V SSE DRIVER +M: Clément Léger +R: Himanshu Chauhan +L: linux-riscv@lists.infradead.org +S: Maintained +F: drivers/firmware/riscv/riscv_sse.c +F: include/linux/riscv_sse.h + RISC-V THEAD SoC SUPPORT M: Jisheng Zhang M: Guo Ren diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 11e8d19658aa..51b23362bead 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -311,6 +311,7 @@ source "drivers/firmware/efi/Kconfig" source "drivers/firmware/imx/Kconfig" source "drivers/firmware/meson/Kconfig" source "drivers/firmware/psci/Kconfig" +source "drivers/firmware/riscv/Kconfig" source "drivers/firmware/smccc/Kconfig" source "drivers/firmware/tegra/Kconfig" source "drivers/firmware/xilinx/Kconfig" diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 28fcddcd688f..a7d598631cec 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-y += efi/ obj-y += imx/ obj-y += psci/ +obj-y += riscv/ obj-y += smccc/ obj-y += tegra/ obj-y += xilinx/ diff --git a/drivers/firmware/riscv/Kconfig b/drivers/firmware/riscv/Kconfig new file mode 100644 index 000000000000..8056ed3262d9 --- /dev/null +++ b/drivers/firmware/riscv/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "Risc-V Specific firmware drivers" +depends on RISCV + +config RISCV_SSE + bool "Enable SBI Supervisor Software Events support" + depends on RISCV_SBI + default y + help + The Supervisor Software Events support allow the SBI to deliver + NMI-like notifications to the supervisor mode software. When enable, + this option provides support to register callbacks on specific SSE + events. + +endmenu diff --git a/drivers/firmware/riscv/Makefile b/drivers/firmware/riscv/Makefile new file mode 100644 index 000000000000..4ccfcbbc28ea --- /dev/null +++ b/drivers/firmware/riscv/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_RISCV_SSE) += riscv_sse.o diff --git a/drivers/firmware/riscv/riscv_sse.c b/drivers/firmware/riscv/riscv_sse.c new file mode 100644 index 000000000000..edd107ade94a --- /dev/null +++ b/drivers/firmware/riscv/riscv_sse.c @@ -0,0 +1,694 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Rivos Inc. + */ + +#define pr_fmt(fmt) "sse: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct sse_event { + struct list_head list; + u32 evt_id; + u32 priority; + sse_event_handler *handler; + void *handler_arg; + /* Only valid for global events */ + unsigned int cpu; + + union { + struct sse_registered_event *global; + struct sse_registered_event __percpu *local; + }; +}; + +static int sse_hp_state; +static bool sse_available __ro_after_init; +static DEFINE_SPINLOCK(events_list_lock); +static LIST_HEAD(events); +static DEFINE_MUTEX(sse_mutex); + +struct sse_registered_event { + struct sse_event_arch_data arch; + struct sse_event *event; + unsigned long attr; + bool is_enabled; +}; + +void sse_handle_event(struct sse_event_arch_data *arch_event, + struct pt_regs *regs) +{ + int ret; + struct sse_registered_event *reg_evt = + container_of(arch_event, struct sse_registered_event, arch); + struct sse_event *evt = reg_evt->event; + + ret = evt->handler(evt->evt_id, evt->handler_arg, regs); + if (ret) + pr_warn("event %x handler failed with error %d\n", evt->evt_id, ret); +} + +static struct sse_event *sse_event_get(u32 evt) +{ + struct sse_event *event = NULL; + + scoped_guard(spinlock, &events_list_lock) { + list_for_each_entry(event, &events, list) { + if (event->evt_id == evt) + return event; + } + } + + return NULL; +} + +static phys_addr_t sse_event_get_attr_phys(struct sse_registered_event *reg_evt) +{ + phys_addr_t phys; + void *addr = ®_evt->attr; + + if (sse_event_is_global(reg_evt->event->evt_id)) + phys = virt_to_phys(addr); + else + phys = per_cpu_ptr_to_phys(addr); + + return phys; +} + +static struct sse_registered_event *sse_get_reg_evt(struct sse_event *event) +{ + if (sse_event_is_global(event->evt_id)) + return event->global; + else + return per_cpu_ptr(event->local, smp_processor_id()); +} + +static int sse_sbi_event_func(struct sse_event *event, unsigned long func) +{ + struct sbiret ret; + u32 evt = event->evt_id; + struct sse_registered_event *reg_evt = sse_get_reg_evt(event); + + ret = sbi_ecall(SBI_EXT_SSE, func, evt, 0, 0, 0, 0, 0); + if (ret.error) { + pr_warn("Failed to execute func %lx, event %x, error %ld\n", + func, evt, ret.error); + return sbi_err_map_linux_errno(ret.error); + } + + if (func == SBI_SSE_EVENT_DISABLE) + reg_evt->is_enabled = false; + else if (func == SBI_SSE_EVENT_ENABLE) + reg_evt->is_enabled = true; + + return 0; +} + +int sse_event_disable_local(struct sse_event *event) +{ + return sse_sbi_event_func(event, SBI_SSE_EVENT_DISABLE); +} +EXPORT_SYMBOL_GPL(sse_event_disable_local); + +int sse_event_enable_local(struct sse_event *event) +{ + return sse_sbi_event_func(event, SBI_SSE_EVENT_ENABLE); +} +EXPORT_SYMBOL_GPL(sse_event_enable_local); + +static int sse_event_attr_get_no_lock(struct sse_registered_event *reg_evt, + unsigned long attr_id, unsigned long *val) +{ + struct sbiret sret; + u32 evt = reg_evt->event->evt_id; + unsigned long phys; + + phys = sse_event_get_attr_phys(reg_evt); + + sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_READ, evt, attr_id, 1, + phys, 0, 0); + if (sret.error) { + pr_debug("Failed to get event %x attr %lx, error %ld\n", evt, + attr_id, sret.error); + return sbi_err_map_linux_errno(sret.error); + } + + *val = reg_evt->attr; + + return 0; +} + +static int sse_event_attr_set_nolock(struct sse_registered_event *reg_evt, + unsigned long attr_id, unsigned long val) +{ + struct sbiret sret; + u32 evt = reg_evt->event->evt_id; + unsigned long phys; + + reg_evt->attr = val; + phys = sse_event_get_attr_phys(reg_evt); + + sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_WRITE, evt, attr_id, 1, + phys, 0, 0); + if (sret.error) + pr_debug("Failed to set event %x attr %lx, error %ld\n", evt, + attr_id, sret.error); + + return sbi_err_map_linux_errno(sret.error); +} + +static void sse_global_event_update_cpu(struct sse_event *event, + unsigned int cpu) +{ + struct sse_registered_event *reg_evt = event->global; + + event->cpu = cpu; + arch_sse_event_update_cpu(®_evt->arch, cpu); +} + +static int sse_event_set_target_cpu_nolock(struct sse_event *event, + unsigned int cpu) +{ + unsigned int hart_id = cpuid_to_hartid_map(cpu); + struct sse_registered_event *reg_evt = event->global; + u32 evt = event->evt_id; + bool was_enabled; + int ret; + + if (!sse_event_is_global(evt)) + return -EINVAL; + + was_enabled = reg_evt->is_enabled; + if (was_enabled) + sse_event_disable_local(event); + + ret = sse_event_attr_set_nolock(reg_evt, SBI_SSE_ATTR_PREFERRED_HART, + hart_id); + if (ret == 0) + sse_global_event_update_cpu(event, cpu); + + if (was_enabled) + sse_event_enable_local(event); + + return 0; +} + +int sse_event_set_target_cpu(struct sse_event *event, unsigned int cpu) +{ + int ret; + + scoped_guard(mutex, &sse_mutex) { + scoped_guard(cpus_read_lock) { + if (!cpu_online(cpu)) + return -EINVAL; + + ret = sse_event_set_target_cpu_nolock(event, cpu); + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(sse_event_set_target_cpu); + +static int sse_event_init_registered(unsigned int cpu, + struct sse_registered_event *reg_evt, + struct sse_event *event) +{ + reg_evt->event = event; + + return arch_sse_init_event(®_evt->arch, event->evt_id, cpu); +} + +static void sse_event_free_registered(struct sse_registered_event *reg_evt) +{ + arch_sse_free_event(®_evt->arch); +} + +static int sse_event_alloc_global(struct sse_event *event) +{ + int err; + struct sse_registered_event *reg_evt; + + reg_evt = kzalloc(sizeof(*reg_evt), GFP_KERNEL); + if (!reg_evt) + return -ENOMEM; + + event->global = reg_evt; + err = sse_event_init_registered(smp_processor_id(), reg_evt, event); + if (err) + kfree(reg_evt); + + return err; +} + +static int sse_event_alloc_local(struct sse_event *event) +{ + int err; + unsigned int cpu, err_cpu; + struct sse_registered_event *reg_evt; + struct sse_registered_event __percpu *reg_evts; + + reg_evts = alloc_percpu(struct sse_registered_event); + if (!reg_evts) + return -ENOMEM; + + event->local = reg_evts; + + for_each_possible_cpu(cpu) { + reg_evt = per_cpu_ptr(reg_evts, cpu); + err = sse_event_init_registered(cpu, reg_evt, event); + if (err) { + err_cpu = cpu; + goto err_free_per_cpu; + } + } + + return 0; + +err_free_per_cpu: + for_each_possible_cpu(cpu) { + if (cpu == err_cpu) + break; + reg_evt = per_cpu_ptr(reg_evts, cpu); + sse_event_free_registered(reg_evt); + } + + free_percpu(reg_evts); + + return err; +} + +static struct sse_event *sse_event_alloc(u32 evt, u32 priority, + sse_event_handler *handler, void *arg) +{ + int err; + struct sse_event *event; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return ERR_PTR(-ENOMEM); + + event->evt_id = evt; + event->priority = priority; + event->handler_arg = arg; + event->handler = handler; + + if (sse_event_is_global(evt)) + err = sse_event_alloc_global(event); + else + err = sse_event_alloc_local(event); + + if (err) { + kfree(event); + return ERR_PTR(err); + } + + return event; +} + +static int sse_sbi_register_event(struct sse_event *event, + struct sse_registered_event *reg_evt) +{ + int ret; + + ret = sse_event_attr_set_nolock(reg_evt, SBI_SSE_ATTR_PRIO, + event->priority); + if (ret) + return ret; + + return arch_sse_register_event(®_evt->arch); +} + +static int sse_event_register_local(struct sse_event *event) +{ + int ret; + struct sse_registered_event *reg_evt; + + reg_evt = per_cpu_ptr(event->local, smp_processor_id()); + ret = sse_sbi_register_event(event, reg_evt); + if (ret) + pr_debug("Failed to register event %x: err %d\n", event->evt_id, + ret); + + return ret; +} + +static int sse_sbi_unregister_event(struct sse_event *event) +{ + return sse_sbi_event_func(event, SBI_SSE_EVENT_UNREGISTER); +} + +struct sse_per_cpu_evt { + struct sse_event *event; + unsigned long func; + cpumask_t error; +}; + +static void sse_event_per_cpu_func(void *info) +{ + int ret; + struct sse_per_cpu_evt *cpu_evt = info; + + if (cpu_evt->func == SBI_SSE_EVENT_REGISTER) + ret = sse_event_register_local(cpu_evt->event); + else + ret = sse_sbi_event_func(cpu_evt->event, cpu_evt->func); + + if (ret) + cpumask_set_cpu(smp_processor_id(), &cpu_evt->error); +} + +static void sse_event_free(struct sse_event *event) +{ + unsigned int cpu; + struct sse_registered_event *reg_evt; + + if (sse_event_is_global(event->evt_id)) { + sse_event_free_registered(event->global); + kfree(event->global); + } else { + for_each_possible_cpu(cpu) { + reg_evt = per_cpu_ptr(event->local, cpu); + sse_event_free_registered(reg_evt); + } + free_percpu(event->local); + } + + kfree(event); +} + +static void sse_on_each_cpu(struct sse_event *event, unsigned long func, + unsigned long revert_func) +{ + struct sse_per_cpu_evt cpu_evt; + + cpu_evt.event = event; + cpumask_clear(&cpu_evt.error); + cpu_evt.func = func; + on_each_cpu(sse_event_per_cpu_func, &cpu_evt, 1); + /* + * If there are some error reported by CPUs, revert event state on the + * other ones + */ + if (!cpumask_empty(&cpu_evt.error)) { + cpumask_t revert; + + cpumask_andnot(&revert, cpu_online_mask, &cpu_evt.error); + cpu_evt.func = revert_func; + on_each_cpu_mask(&revert, sse_event_per_cpu_func, &cpu_evt, 1); + } +} + +int sse_event_enable(struct sse_event *event) +{ + int ret = 0; + + scoped_guard(mutex, &sse_mutex) { + scoped_guard(cpus_read_lock) { + if (sse_event_is_global(event->evt_id)) { + ret = sse_event_enable_local(event); + } else { + sse_on_each_cpu(event, SBI_SSE_EVENT_ENABLE, + SBI_SSE_EVENT_DISABLE); + } + } + } + return ret; +} +EXPORT_SYMBOL_GPL(sse_event_enable); + +static int sse_events_mask(void) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_HART_MASK, 0, 0, 0, 0, 0, 0); + + return sbi_err_map_linux_errno(ret.error); +} + +static int sse_events_unmask(void) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_HART_UNMASK, 0, 0, 0, 0, 0, 0); + + return sbi_err_map_linux_errno(ret.error); +} + +static void sse_event_disable_nolock(struct sse_event *event) +{ + struct sse_per_cpu_evt cpu_evt; + + if (sse_event_is_global(event->evt_id)) { + sse_event_disable_local(event); + } else { + cpu_evt.event = event; + cpu_evt.func = SBI_SSE_EVENT_DISABLE; + on_each_cpu(sse_event_per_cpu_func, &cpu_evt, 1); + } +} + +void sse_event_disable(struct sse_event *event) +{ + scoped_guard(mutex, &sse_mutex) { + scoped_guard(cpus_read_lock) { + sse_event_disable_nolock(event); + } + } +} +EXPORT_SYMBOL_GPL(sse_event_disable); + +struct sse_event *sse_event_register(u32 evt, u32 priority, + sse_event_handler *handler, void *arg) +{ + struct sse_event *event; + int cpu; + int ret = 0; + + if (!sse_available) + return ERR_PTR(-EOPNOTSUPP); + + guard(mutex)(&sse_mutex); + if (sse_event_get(evt)) + return ERR_PTR(-EEXIST); + + event = sse_event_alloc(evt, priority, handler, arg); + if (IS_ERR(event)) + return event; + + scoped_guard(cpus_read_lock) { + if (sse_event_is_global(evt)) { + unsigned long preferred_hart; + + ret = sse_event_attr_get_no_lock(event->global, + SBI_SSE_ATTR_PREFERRED_HART, + &preferred_hart); + if (ret) + goto err_event_free; + + cpu = riscv_hartid_to_cpuid(preferred_hart); + sse_global_event_update_cpu(event, cpu); + + ret = sse_sbi_register_event(event, event->global); + if (ret) + goto err_event_free; + + } else { + sse_on_each_cpu(event, SBI_SSE_EVENT_REGISTER, + SBI_SSE_EVENT_DISABLE); + } + } + + scoped_guard(spinlock, &events_list_lock) + list_add(&event->list, &events); + + return event; + +err_event_free: + sse_event_free(event); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(sse_event_register); + +static void sse_event_unregister_nolock(struct sse_event *event) +{ + struct sse_per_cpu_evt cpu_evt; + + if (sse_event_is_global(event->evt_id)) { + sse_sbi_unregister_event(event); + } else { + cpu_evt.event = event; + cpu_evt.func = SBI_SSE_EVENT_UNREGISTER; + on_each_cpu(sse_event_per_cpu_func, &cpu_evt, 1); + } +} + +void sse_event_unregister(struct sse_event *event) +{ + scoped_guard(mutex, &sse_mutex) { + scoped_guard(cpus_read_lock) + sse_event_unregister_nolock(event); + + scoped_guard(spinlock, &events_list_lock) + list_del(&event->list); + + sse_event_free(event); + } +} +EXPORT_SYMBOL_GPL(sse_event_unregister); + +static int sse_cpu_online(unsigned int cpu) +{ + struct sse_event *event; + + scoped_guard(spinlock, &events_list_lock) { + list_for_each_entry(event, &events, list) { + if (sse_event_is_global(event->evt_id)) + continue; + + sse_event_register_local(event); + if (sse_get_reg_evt(event)) + sse_event_enable_local(event); + } + } + + /* Ready to handle events. Unmask SSE. */ + return sse_events_unmask(); +} + +static int sse_cpu_teardown(unsigned int cpu) +{ + int ret = 0; + unsigned int next_cpu; + struct sse_event *event; + + /* Mask the sse events */ + ret = sse_events_mask(); + if (ret) + return ret; + + scoped_guard(spinlock, &events_list_lock) { + list_for_each_entry(event, &events, list) { + if (!sse_event_is_global(event->evt_id)) { + if (event->global->is_enabled) + sse_event_disable_local(event); + + sse_sbi_unregister_event(event); + continue; + } + + if (event->cpu != smp_processor_id()) + continue; + + /* Update destination hart for global event */ + next_cpu = cpumask_any_but(cpu_online_mask, cpu); + ret = sse_event_set_target_cpu_nolock(event, next_cpu); + } + } + + return ret; +} + +static void sse_reset(void) +{ + struct sse_event *event; + + list_for_each_entry(event, &events, list) { + sse_event_disable_nolock(event); + sse_event_unregister_nolock(event); + } +} + +static int sse_pm_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + WARN_ON_ONCE(preemptible()); + + switch (action) { + case CPU_PM_ENTER: + sse_events_mask(); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + sse_events_unmask(); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block sse_pm_nb = { + .notifier_call = sse_pm_notifier, +}; + +/* + * Mask all CPUs and unregister all events on panic, reboot or kexec. + */ +static int sse_reboot_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + cpuhp_remove_state(sse_hp_state); + sse_reset(); + + return NOTIFY_OK; +} + +static struct notifier_block sse_reboot_nb = { + .notifier_call = sse_reboot_notifier, +}; + +static int __init sse_init(void) +{ + int ret; + + if (sbi_probe_extension(SBI_EXT_SSE) <= 0) { + pr_err("Missing SBI SSE extension\n"); + return -EOPNOTSUPP; + } + pr_info("SBI SSE extension detected\n"); + + ret = cpu_pm_register_notifier(&sse_pm_nb); + if (ret) { + pr_warn("Failed to register CPU PM notifier...\n"); + return ret; + } + + ret = register_reboot_notifier(&sse_reboot_nb); + if (ret) { + pr_warn("Failed to register reboot notifier...\n"); + goto remove_cpupm; + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sse:online", + sse_cpu_online, sse_cpu_teardown); + if (ret < 0) + goto remove_reboot; + + sse_hp_state = ret; + sse_available = true; + + return 0; + +remove_reboot: + unregister_reboot_notifier(&sse_reboot_nb); + +remove_cpupm: + cpu_pm_unregister_notifier(&sse_pm_nb); + + return ret; +} +arch_initcall(sse_init); diff --git a/include/linux/riscv_sse.h b/include/linux/riscv_sse.h new file mode 100644 index 000000000000..8ae113169a3c --- /dev/null +++ b/include/linux/riscv_sse.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Rivos Inc. + */ + +#ifndef __LINUX_RISCV_SSE_H +#define __LINUX_RISCV_SSE_H + +#include +#include +#include +#include + +struct sse_event; +struct pt_regs; + +typedef int (sse_event_handler)(u32 event_num, void *arg, struct pt_regs *regs); + +#ifdef CONFIG_RISCV_SSE + +struct sse_event *sse_event_register(u32 event_num, u32 priority, + sse_event_handler *handler, void *arg); + +void sse_event_unregister(struct sse_event *evt); + +int sse_event_set_target_cpu(struct sse_event *sse_evt, unsigned int cpu); + +int sse_event_enable(struct sse_event *sse_evt); + +void sse_event_disable(struct sse_event *sse_evt); + +int sse_event_enable_local(struct sse_event *sse_evt); +int sse_event_disable_local(struct sse_event *sse_evt); + +#else +static inline struct sse_event *sse_event_register(u32 event_num, u32 priority, + sse_event_handler *handler, + void *arg) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void sse_event_unregister(struct sse_event *evt) {} + +static inline int sse_event_set_target_cpu(struct sse_event *sse_evt, + unsigned int cpu) +{ + return -EOPNOTSUPP; +} + +static inline int sse_event_enable(struct sse_event *sse_evt) +{ + return -EOPNOTSUPP; +} + +static inline void sse_event_disable(struct sse_event *sse_evt) {} +#endif +#endif /* __LINUX_RISCV_SSE_H */ -- Gitee From 3803d5706579c28913c949da7cb54086fb8d8605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 27 Aug 2025 09:47:32 +0800 Subject: [PATCH 04/15] selftests/riscv: add SSE test module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #27060 cherry-picked from https://patch.msgid.link/20251105082639.342973-6-cleger@rivosinc.com ---------------- This module, once loaded, will execute a series of tests using the SSE framework. The provided script will check for any error reported by the test module. Signed-off-by: Clément Léger Link: https://patch.msgid.link/20251105082639.342973-6-cleger@rivosinc.com Signed-off-by: Paul Walmsley Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- tools/testing/selftests/riscv/Makefile | 2 +- tools/testing/selftests/riscv/sse/Makefile | 5 + .../selftests/riscv/sse/module/Makefile | 16 + .../riscv/sse/module/riscv_sse_test.c | 513 ++++++++++++++++++ .../selftests/riscv/sse/run_sse_test.sh | 44 ++ 5 files changed, 579 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/riscv/sse/Makefile create mode 100644 tools/testing/selftests/riscv/sse/module/Makefile create mode 100644 tools/testing/selftests/riscv/sse/module/riscv_sse_test.c create mode 100644 tools/testing/selftests/riscv/sse/run_sse_test.sh diff --git a/tools/testing/selftests/riscv/Makefile b/tools/testing/selftests/riscv/Makefile index 32f315254333..449b880541e3 100644 --- a/tools/testing/selftests/riscv/Makefile +++ b/tools/testing/selftests/riscv/Makefile @@ -5,7 +5,7 @@ ARCH ?= $(shell uname -m 2>/dev/null || echo not) ifneq (,$(filter $(ARCH),riscv)) -RISCV_SUBTARGETS ?= abi hwprobe mm vector +RISCV_SUBTARGETS ?= abi hwprobe mm vector sse else RISCV_SUBTARGETS := endif diff --git a/tools/testing/selftests/riscv/sse/Makefile b/tools/testing/selftests/riscv/sse/Makefile new file mode 100644 index 000000000000..67eaee06f213 --- /dev/null +++ b/tools/testing/selftests/riscv/sse/Makefile @@ -0,0 +1,5 @@ +TEST_GEN_MODS_DIR := module + +TEST_FILES := run_sse_test.sh + +include ../../lib.mk diff --git a/tools/testing/selftests/riscv/sse/module/Makefile b/tools/testing/selftests/riscv/sse/module/Makefile new file mode 100644 index 000000000000..02018f083456 --- /dev/null +++ b/tools/testing/selftests/riscv/sse/module/Makefile @@ -0,0 +1,16 @@ +TESTMODS_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) +KDIR ?= /lib/modules/$(shell uname -r)/build + +obj-m += riscv_sse_test.o + +# Ensure that KDIR exists, otherwise skip the compilation +modules: +ifneq ("$(wildcard $(KDIR))", "") + $(Q)$(MAKE) -C $(KDIR) modules KBUILD_EXTMOD=$(TESTMODS_DIR) +endif + +# Ensure that KDIR exists, otherwise skip the clean target +clean: +ifneq ("$(wildcard $(KDIR))", "") + $(Q)$(MAKE) -C $(KDIR) clean KBUILD_EXTMOD=$(TESTMODS_DIR) +endif diff --git a/tools/testing/selftests/riscv/sse/module/riscv_sse_test.c b/tools/testing/selftests/riscv/sse/module/riscv_sse_test.c new file mode 100644 index 000000000000..65df41a2d40a --- /dev/null +++ b/tools/testing/selftests/riscv/sse/module/riscv_sse_test.c @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Rivos Inc. + */ + +#define pr_fmt(fmt) "riscv_sse_test: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define RUN_LOOP_COUNT 1000 +#define SSE_FAILED_PREFIX "FAILED: " +#define sse_err(...) pr_err(SSE_FAILED_PREFIX __VA_ARGS__) + +struct sse_event_desc { + u32 evt_id; + const char *name; + bool can_inject; +}; + +static struct sse_event_desc sse_event_descs[] = { + { + .evt_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, + .name = "local_high_prio_ras", + }, + { + .evt_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP, + .name = "local_double_trap", + }, + { + .evt_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, + .name = "global_high_prio_ras", + }, + { + .evt_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, + .name = "local_pmu_overflow", + }, + { + .evt_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, + .name = "local_low_prio_ras", + }, + { + .evt_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, + .name = "global_low_prio_ras", + }, + { + .evt_id = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED, + .name = "local_software_injected", + }, + { + .evt_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED, + .name = "global_software_injected", + } +}; + +static struct sse_event_desc *sse_get_evt_desc(u32 evt) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sse_event_descs); i++) { + if (sse_event_descs[i].evt_id == evt) + return &sse_event_descs[i]; + } + + return NULL; +} + +static const char *sse_evt_name(u32 evt) +{ + struct sse_event_desc *desc = sse_get_evt_desc(evt); + + return desc != NULL ? desc->name : NULL; +} + +static bool sse_test_can_inject_event(u32 evt) +{ + struct sse_event_desc *desc = sse_get_evt_desc(evt); + + return desc != NULL ? desc->can_inject : false; +} + +static struct sbiret sbi_sse_ecall(int fid, unsigned long arg0, unsigned long arg1) +{ + return sbi_ecall(SBI_EXT_SSE, fid, arg0, arg1, 0, 0, 0, 0); +} + +static int sse_event_attr_get(u32 evt, unsigned long attr_id, + unsigned long *val) +{ + struct sbiret sret; + unsigned long *attr_buf, phys; + + attr_buf = kmalloc(sizeof(unsigned long), GFP_KERNEL); + if (!attr_buf) + return -ENOMEM; + + phys = virt_to_phys(attr_buf); + + sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_READ, evt, attr_id, 1, + phys, 0, 0); + if (sret.error) + return sbi_err_map_linux_errno(sret.error); + + *val = *attr_buf; + + return 0; +} + +static int sse_test_signal(u32 evt, unsigned int cpu) +{ + unsigned int hart_id = cpuid_to_hartid_map(cpu); + struct sbiret ret; + + ret = sbi_sse_ecall(SBI_SSE_EVENT_SIGNAL, evt, hart_id); + if (ret.error) { + sse_err("Failed to signal event %x, error %ld\n", evt, ret.error); + return sbi_err_map_linux_errno(ret.error); + } + + return 0; +} + +static int sse_test_inject_event(struct sse_event *event, u32 evt, unsigned int cpu) +{ + int res; + unsigned long status; + + if (sse_event_is_global(evt)) { + /* + * Due to the fact the completion might happen faster than + * the call to SBI_SSE_COMPLETE in the handler, if the event was + * running on another CPU, we need to wait for the event status + * to be !RUNNING. + */ + do { + res = sse_event_attr_get(evt, SBI_SSE_ATTR_STATUS, &status); + if (res) { + sse_err("Failed to get status for evt %x, error %d\n", evt, res); + return res; + } + status = status & SBI_SSE_ATTR_STATUS_STATE_MASK; + } while (status == SBI_SSE_STATE_RUNNING); + + res = sse_event_set_target_cpu(event, cpu); + if (res) { + sse_err("Failed to set cpu for evt %x, error %d\n", evt, res); + return res; + } + } + + return sse_test_signal(evt, cpu); +} + +struct fast_test_arg { + u32 evt; + int cpu; + bool completion; +}; + +static int sse_test_handler(u32 evt, void *arg, struct pt_regs *regs) +{ + int ret = 0; + struct fast_test_arg *targ = arg; + u32 test_evt = READ_ONCE(targ->evt); + int cpu = READ_ONCE(targ->cpu); + + if (evt != test_evt) { + sse_err("Received SSE event id %x instead of %x\n", test_evt, evt); + ret = -EINVAL; + } + + if (cpu != smp_processor_id()) { + sse_err("Received SSE event %d on CPU %d instead of %d\n", evt, smp_processor_id(), + cpu); + ret = -EINVAL; + } + + WRITE_ONCE(targ->completion, true); + + return ret; +} + +static void sse_run_fast_test(struct fast_test_arg *test_arg, struct sse_event *event, u32 evt) +{ + unsigned long timeout; + int ret, cpu; + + for_each_online_cpu(cpu) { + WRITE_ONCE(test_arg->completion, false); + WRITE_ONCE(test_arg->cpu, cpu); + /* Test arg is used on another CPU */ + smp_wmb(); + + ret = sse_test_inject_event(event, evt, cpu); + if (ret) { + sse_err("event %s injection failed, err %d\n", sse_evt_name(evt), ret); + return; + } + + timeout = jiffies + HZ / 100; + /* We can not use since they are not NMI safe */ + while (!READ_ONCE(test_arg->completion) && + time_before(jiffies, timeout)) { + cpu_relax(); + } + if (!time_before(jiffies, timeout)) { + sse_err("Failed to wait for event %s completion on CPU %d\n", + sse_evt_name(evt), cpu); + return; + } + } +} + +static void sse_test_injection_fast(void) +{ + int i, ret = 0, j; + u32 evt; + struct fast_test_arg test_arg; + struct sse_event *event; + + pr_info("Starting SSE test (fast)\n"); + + for (i = 0; i < ARRAY_SIZE(sse_event_descs); i++) { + evt = sse_event_descs[i].evt_id; + WRITE_ONCE(test_arg.evt, evt); + + if (!sse_event_descs[i].can_inject) + continue; + + event = sse_event_register(evt, 0, sse_test_handler, + (void *)&test_arg); + if (IS_ERR(event)) { + sse_err("Failed to register event %s, err %ld\n", sse_evt_name(evt), + PTR_ERR(event)); + goto out; + } + + ret = sse_event_enable(event); + if (ret) { + sse_err("Failed to enable event %s, err %d\n", sse_evt_name(evt), ret); + goto err_unregister; + } + + pr_info("Starting testing event %s\n", sse_evt_name(evt)); + + for (j = 0; j < RUN_LOOP_COUNT; j++) + sse_run_fast_test(&test_arg, event, evt); + + pr_info("Finished testing event %s\n", sse_evt_name(evt)); + + sse_event_disable(event); +err_unregister: + sse_event_unregister(event); + } +out: + pr_info("Finished SSE test (fast)\n"); +} + +struct priority_test_arg { + unsigned long evt; + struct sse_event *event; + bool called; + u32 prio; + struct priority_test_arg *next_evt_arg; + void (*check_func)(struct priority_test_arg *arg); +}; + +static int sse_hi_priority_test_handler(u32 evt, void *arg, + struct pt_regs *regs) +{ + struct priority_test_arg *targ = arg; + struct priority_test_arg *next = READ_ONCE(targ->next_evt_arg); + + WRITE_ONCE(targ->called, 1); + + if (next) { + sse_test_signal(next->evt, smp_processor_id()); + if (!READ_ONCE(next->called)) { + sse_err("Higher priority event %s was not handled %s\n", + sse_evt_name(next->evt), sse_evt_name(evt)); + } + } + + return 0; +} + +static int sse_low_priority_test_handler(u32 evt, void *arg, struct pt_regs *regs) +{ + struct priority_test_arg *targ = arg; + struct priority_test_arg *next = READ_ONCE(targ->next_evt_arg); + + WRITE_ONCE(targ->called, 1); + + if (next) { + sse_test_signal(next->evt, smp_processor_id()); + if (READ_ONCE(next->called)) { + sse_err("Lower priority event %s was handle before %s\n", + sse_evt_name(next->evt), sse_evt_name(evt)); + } + } + + return 0; +} + +static void sse_test_injection_priority_arg(struct priority_test_arg *args, unsigned int args_size, + sse_event_handler handler, const char *test_name) +{ + unsigned int i; + int ret; + struct sse_event *event; + struct priority_test_arg *arg, *first_arg = NULL, *prev_arg = NULL; + + pr_info("Starting SSE priority test (%s)\n", test_name); + for (i = 0; i < args_size; i++) { + arg = &args[i]; + + if (!sse_test_can_inject_event(arg->evt)) + continue; + + WRITE_ONCE(arg->called, false); + WRITE_ONCE(arg->next_evt_arg, NULL); + if (prev_arg) + WRITE_ONCE(prev_arg->next_evt_arg, arg); + + prev_arg = arg; + + if (!first_arg) + first_arg = arg; + + event = sse_event_register(arg->evt, arg->prio, handler, (void *)arg); + if (IS_ERR(event)) { + sse_err("Failed to register event %s, err %ld\n", sse_evt_name(arg->evt), + PTR_ERR(event)); + goto release_events; + } + arg->event = event; + + if (sse_event_is_global(arg->evt)) { + /* Target event at current CPU */ + ret = sse_event_set_target_cpu(event, smp_processor_id()); + if (ret) { + sse_err("Failed to set event %s target CPU, err %d\n", + sse_evt_name(arg->evt), ret); + goto release_events; + } + } + + ret = sse_event_enable(event); + if (ret) { + sse_err("Failed to enable event %s, err %d\n", sse_evt_name(arg->evt), ret); + goto release_events; + } + } + + if (!first_arg) { + sse_err("No injectable event available\n"); + return; + } + + /* Inject first event, handler should trigger the others in chain. */ + ret = sse_test_inject_event(first_arg->event, first_arg->evt, smp_processor_id()); + if (ret) { + sse_err("SSE event %s injection failed\n", sse_evt_name(first_arg->evt)); + goto release_events; + } + + /* + * Event are injected directly on the current CPU after calling sse_test_inject_event() + * so that execution is premmpted right away, no need to wait for timeout. + */ + arg = first_arg; + while (arg) { + if (!READ_ONCE(arg->called)) { + sse_err("Event %s handler was not called\n", + sse_evt_name(arg->evt)); + ret = -EINVAL; + } + + + event = arg->event; + arg = READ_ONCE(arg->next_evt_arg); + } + +release_events: + + arg = first_arg; + while (arg) { + event = arg->event; + if (!event) + break; + + sse_event_disable(event); + sse_event_unregister(event); + arg = READ_ONCE(arg->next_evt_arg); + } + + pr_info("Finished SSE priority test (%s)\n", test_name); +} + +static void sse_test_injection_priority(void) +{ + struct priority_test_arg default_hi_prio_args[] = { + { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED }, + { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED }, + { .evt = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS }, + { .evt = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS }, + { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW }, + { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS }, + { .evt = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP }, + { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS }, + }; + + struct priority_test_arg default_low_prio_args[] = { + { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS }, + { .evt = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP }, + { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS }, + { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW }, + { .evt = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS }, + { .evt = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS }, + { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED }, + { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED }, + + }; + struct priority_test_arg set_prio_args[] = { + { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED, .prio = 5 }, + { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED, .prio = 10 }, + { .evt = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, .prio = 15 }, + { .evt = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, .prio = 20 }, + { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, .prio = 25 }, + { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, .prio = 30 }, + { .evt = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP, .prio = 35 }, + { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, .prio = 40 }, + }; + + struct priority_test_arg same_prio_args[] = { + { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, .prio = 0 }, + { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, .prio = 10 }, + { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED, .prio = 10 }, + { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED, .prio = 10 }, + { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, .prio = 20 }, + }; + + sse_test_injection_priority_arg(default_hi_prio_args, ARRAY_SIZE(default_hi_prio_args), + sse_hi_priority_test_handler, "high"); + + sse_test_injection_priority_arg(default_low_prio_args, ARRAY_SIZE(default_low_prio_args), + sse_low_priority_test_handler, "low"); + + sse_test_injection_priority_arg(set_prio_args, ARRAY_SIZE(set_prio_args), + sse_low_priority_test_handler, "set"); + + sse_test_injection_priority_arg(same_prio_args, ARRAY_SIZE(same_prio_args), + sse_low_priority_test_handler, "same_prio_args"); +} + + +static bool sse_get_inject_status(u32 evt) +{ + int ret; + unsigned long val; + + /* Check if injection is supported */ + ret = sse_event_attr_get(evt, SBI_SSE_ATTR_STATUS, &val); + if (ret) + return false; + + return !!(val & BIT(SBI_SSE_ATTR_STATUS_INJECT_OFFSET)); +} + +static void sse_init_events(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sse_event_descs); i++) { + struct sse_event_desc *desc = &sse_event_descs[i]; + + desc->can_inject = sse_get_inject_status(desc->evt_id); + if (!desc->can_inject) + pr_info("Can not inject event %s, tests using this event will be skipped\n", + desc->name); + } +} + +static int __init sse_test_init(void) +{ + sse_init_events(); + + sse_test_injection_fast(); + sse_test_injection_priority(); + + return 0; +} + +static void __exit sse_test_exit(void) +{ +} + +module_init(sse_test_init); +module_exit(sse_test_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Clément Léger "); +MODULE_DESCRIPTION("Test module for SSE"); diff --git a/tools/testing/selftests/riscv/sse/run_sse_test.sh b/tools/testing/selftests/riscv/sse/run_sse_test.sh new file mode 100644 index 000000000000..888bc4a99cb3 --- /dev/null +++ b/tools/testing/selftests/riscv/sse/run_sse_test.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2025 Rivos Inc. + +MODULE_NAME=riscv_sse_test +DRIVER="./module/${MODULE_NAME}.ko" + +check_test_failed_prefix() { + if dmesg | grep -q "${MODULE_NAME}: FAILED:";then + echo "${MODULE_NAME} failed, please check dmesg" + exit 1 + fi +} + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +check_test_requirements() +{ + uid=$(id -u) + if [ $uid -ne 0 ]; then + echo "$0: Must be run as root" + exit $ksft_skip + fi + + if ! which insmod > /dev/null 2>&1; then + echo "$0: You need insmod installed" + exit $ksft_skip + fi + + if [ ! -f $DRIVER ]; then + echo "$0: You need to compile ${MODULE_NAME} module" + exit $ksft_skip + fi +} + +check_test_requirements + +insmod $DRIVER > /dev/null 2>&1 +rmmod $MODULE_NAME +check_test_failed_prefix + +exit 0 -- Gitee From 0401150e37dd648dacba46772d496f07b5b2d3c6 Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 09:51:23 +0800 Subject: [PATCH 05/15] riscv: Define ioremap_cache for RISC-V ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-2-hchauhan@ventanamicro.com/ ---------------- bert and einj drivers use ioremap_cache for mapping entries but ioremap_cache is not defined for RISC-V. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- arch/riscv/include/asm/io.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 42497d487a17..662f3dd15ac5 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -30,6 +30,9 @@ #define PCI_IOBASE ((void __iomem *)PCI_IO_START) #endif /* CONFIG_MMU */ +#define ioremap_cache(addr, size) \ + ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) + /* * Emulation routines for the port-mapped IO space used by some PCI drivers. * These are defined as being "fully synchronous", but also "not guaranteed to -- Gitee From b7c9dc5124e8f588fdc3d09e1c96d5214ddc3a7b Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 09:52:42 +0800 Subject: [PATCH 06/15] riscv: Define arch_apei_get_mem_attribute for RISC-V ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-3-hchauhan@ventanamicro.com/ ---------------- ghes_map function uses arch_apei_get_mem_attribute to get the protection bits for a given physical address. These protection bits are then used to map the physical address. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- arch/riscv/include/asm/acpi.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h index ffe4c9c0a5b7..2c813c89975e 100644 --- a/arch/riscv/include/asm/acpi.h +++ b/arch/riscv/include/asm/acpi.h @@ -27,6 +27,26 @@ extern int acpi_disabled; extern int acpi_noirq; extern int acpi_pci_disabled; +#ifdef CONFIG_ACPI_APEI +/* + * acpi_disable_cmcff is used in drivers/acpi/apei/hest.c for disabling + * IA-32 Architecture Corrected Machine Check (CMC) Firmware-First mode + * with a kernel command line parameter "acpi=nocmcoff". But we don't + * have this IA-32 specific feature on ARM64, this definition is only + * for compatibility. + */ +#define acpi_disable_cmcff 1 +static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) +{ + /* + * Until we have a way to look for EFI memory attributes. + */ + return PAGE_KERNEL; +} +#else /* CONFIG_ACPI_APEI */ +#define acpi_disable_cmcff 0 +#endif /* !CONFIG_ACPI_APEI */ + static inline void disable_acpi(void) { acpi_disabled = 1; -- Gitee From 7551c384c131ce53c1b59ed48f42962579350363 Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 09:54:29 +0800 Subject: [PATCH 07/15] acpi: Introduce SSE in HEST notification types ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-4-hchauhan@ventanamicro.com/ ---------------- Introduce a new HEST notification type for RISC-V SSE events. The GHES entry's notification structure contains the notification to be used for a given error source. For error sources delivering events over SSE, it should contain the new SSE notification type. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- include/acpi/actbl1.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index e490b4c37a48..32ef0bc220f1 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -1529,7 +1529,8 @@ enum acpi_hest_notify_types { ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */ ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */ ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, /* ACPI 6.2 */ - ACPI_HEST_NOTIFY_RESERVED = 12 /* 12 and greater are reserved */ + ACPI_HEST_NOTIFY_SSE = 12, /* RISCV SSE */ + ACPI_HEST_NOTIFY_RESERVED = 13 /* 13 and greater are reserved */ }; /* Values for config_write_enable bitfield above */ -- Gitee From ae816604681774d93971f5c7497244b64cabb38b Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 09:55:49 +0800 Subject: [PATCH 08/15] riscv: Add fixmap indices for GHES IRQ and SSE contexts ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-5-hchauhan@ventanamicro.com/ ---------------- GHES error handling requires fixmap entries for IRQ notifications. Add fixmap indices for IRQ, SSE Low and High priority notifications. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- arch/riscv/include/asm/fixmap.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 0a55099bb734..fa3a0ec0c55c 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h @@ -38,6 +38,14 @@ enum fixed_addresses { FIX_TEXT_POKE0, FIX_EARLYCON_MEM_BASE, +#ifdef CONFIG_ACPI_APEI_GHES + /* Used for GHES mapping from assorted contexts */ + FIX_APEI_GHES_IRQ, +#ifdef CONFIG_RISCV_SSE + FIX_APEI_GHES_SSE_LOW_PRIORITY, + FIX_APEI_GHES_SSE_HIGH_PRIORITY, +#endif /* CONFIG_RISCV_SSE */ +#endif /* CONFIG_ACPI_APEI_GHES */ __end_of_permanent_fixed_addresses, /* * Temporary boot-time mappings, used by early_ioremap(), -- Gitee From 64983d5d0daaf7c4cb246e1434b562407979e20c Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 09:57:35 +0800 Subject: [PATCH 09/15] riscv: conditionally compile GHES NMI spool function ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-6-hchauhan@ventanamicro.com/ ---------------- Compile ghes_in_nmi_spool_from_list only when NMI and SEA is enabled. Otherwise compilation fails with "defined but not used" error. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- drivers/acpi/apei/ghes.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index cf978aa40af8..c3abf34f82db 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1323,6 +1323,7 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, return rc; } +#if defined(CONFIG_HAVE_ACPI_APEI_NMI) || defined(CONFIG_ACPI_APEI_SEA) static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list, enum fixed_addresses fixmap_idx) { @@ -1341,6 +1342,7 @@ static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list, return ret; } +#endif #ifdef CONFIG_ACPI_APEI_SEA static LIST_HEAD(ghes_sea); -- Gitee From dfaa151d8cf7ef9ac02fbf9950a632db3a66ad02 Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 10:01:42 +0800 Subject: [PATCH 10/15] riscv: Add functions to register ghes having SSE notification ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-7-hchauhan@ventanamicro.com/ ---------------- Add functions to register the ghes entries which have SSE as notification type. The vector inside the ghes is the SSE event ID that should be registered. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- drivers/firmware/riscv/riscv_sse.c | 146 +++++++++++++++++++++++++++++ include/linux/riscv_sse.h | 17 ++++ 2 files changed, 163 insertions(+) diff --git a/drivers/firmware/riscv/riscv_sse.c b/drivers/firmware/riscv/riscv_sse.c index edd107ade94a..672b9970ad5d 100644 --- a/drivers/firmware/riscv/riscv_sse.c +++ b/drivers/firmware/riscv/riscv_sse.c @@ -5,6 +5,8 @@ #define pr_fmt(fmt) "sse: " fmt +#include +#include #include #include #include @@ -692,3 +694,147 @@ static int __init sse_init(void) return ret; } arch_initcall(sse_init); + +struct sse_ghes_callback { + struct list_head head; + struct ghes *ghes; + sse_event_handler *callback; +}; + +struct sse_ghes_event_data { + struct list_head head; + u32 event_num; + struct list_head callback_list; + struct sse_event *event; +}; + +static DEFINE_SPINLOCK(sse_ghes_event_list_lock); +static LIST_HEAD(sse_ghes_event_list); + +static int sse_ghes_handler(u32 event_num, void *arg, struct pt_regs *regs) +{ + struct sse_ghes_event_data *ev_data = arg; + struct sse_ghes_callback *cb = NULL; + + list_for_each_entry(cb, &ev_data->callback_list, head) { + if (cb && cb->ghes && cb->callback) + cb->callback(ev_data->event_num, cb->ghes, regs); + } + + return 0; +} + +int sse_register_ghes(struct ghes *ghes, sse_event_handler *lo_cb, + sse_event_handler *hi_cb) +{ + struct sse_ghes_event_data *ev_data, *evd; + struct sse_ghes_callback *cb; + u32 ev_num; + int err; + + if (!sse_available) + return -EOPNOTSUPP; + if (!ghes || !lo_cb || !hi_cb) + return -EINVAL; + + ev_num = ghes->generic->notify.vector; + + ev_data = NULL; + spin_lock(&sse_ghes_event_list_lock); + list_for_each_entry(evd, &sse_ghes_event_list, head) { + if (evd->event_num == ev_num) { + ev_data = evd; + break; + } + } + spin_unlock(&sse_ghes_event_list_lock); + + if (!ev_data) { + ev_data = kzalloc(sizeof(*ev_data), GFP_KERNEL); + if (!ev_data) + return -ENOMEM; + + INIT_LIST_HEAD(&ev_data->head); + ev_data->event_num = ev_num; + + INIT_LIST_HEAD(&ev_data->callback_list); + + ev_data->event = sse_event_register(ev_num, ev_num, + sse_ghes_handler, ev_data); + if (IS_ERR(ev_data->event)) { + pr_err("%s: Couldn't register event 0x%x\n", __func__, ev_num); + kfree(ev_data); + return -ENOMEM; + } + + err = sse_event_enable(ev_data->event); + if (err) { + pr_err("%s: Couldn't enable event 0x%x\n", __func__, ev_num); + sse_event_unregister(ev_data->event); + kfree(ev_data); + return err; + } + + spin_lock(&sse_ghes_event_list_lock); + list_add_tail(&ev_data->head, &sse_ghes_event_list); + spin_unlock(&sse_ghes_event_list_lock); + } + + list_for_each_entry(cb, &ev_data->callback_list, head) { + if (cb->ghes == ghes) + return -EALREADY; + } + + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return -ENOMEM; + INIT_LIST_HEAD(&cb->head); + cb->ghes = ghes; + cb->callback = lo_cb; + list_add_tail(&cb->head, &ev_data->callback_list); + + return 0; +} + +int sse_unregister_ghes(struct ghes *ghes) +{ + struct sse_ghes_event_data *ev_data, *tmp; + struct sse_ghes_callback *cb; + int free_ev_data = 0; + + if (!ghes) + return -EINVAL; + + spin_lock(&sse_ghes_event_list_lock); + + list_for_each_entry_safe(ev_data, tmp, &sse_ghes_event_list, head) { + list_for_each_entry(cb, &ev_data->callback_list, head) { + if (cb->ghes != ghes) + continue; + + list_del(&cb->head); + kfree(cb); + break; + } + + if (list_empty(&ev_data->callback_list)) + free_ev_data = 1; + + if (free_ev_data) { + spin_unlock(&sse_ghes_event_list_lock); + + sse_event_disable(ev_data->event); + sse_event_unregister(ev_data->event); + ev_data->event = NULL; + + spin_lock(&sse_ghes_event_list_lock); + + list_del(&ev_data->head); + kfree(ev_data); + } + } + + spin_unlock(&sse_ghes_event_list_lock); + + return 0; +} diff --git a/include/linux/riscv_sse.h b/include/linux/riscv_sse.h index 8ae113169a3c..f4a0a7add790 100644 --- a/include/linux/riscv_sse.h +++ b/include/linux/riscv_sse.h @@ -14,6 +14,8 @@ struct sse_event; struct pt_regs; +struct ghes; + typedef int (sse_event_handler)(u32 event_num, void *arg, struct pt_regs *regs); #ifdef CONFIG_RISCV_SSE @@ -32,6 +34,9 @@ void sse_event_disable(struct sse_event *sse_evt); int sse_event_enable_local(struct sse_event *sse_evt); int sse_event_disable_local(struct sse_event *sse_evt); +int sse_register_ghes(struct ghes *ghes, sse_event_handler *lo_cb, + sse_event_handler *hi_cb); +int sse_unregister_ghes(struct ghes *ghes); #else static inline struct sse_event *sse_event_register(u32 event_num, u32 priority, sse_event_handler *handler, @@ -54,5 +59,17 @@ static inline int sse_event_enable(struct sse_event *sse_evt) } static inline void sse_event_disable(struct sse_event *sse_evt) {} + +static inline int sse_register_ghes(struct ghes *ghes, sse_event_handler *lo_cb, + sse_event_handler *hi_cb) +{ + return -EOPNOTSUPP; +} + +static inline int sse_unregister_ghes(struct ghes *ghes) +{ + return -EOPNOTSUPP; +} + #endif #endif /* __LINUX_RISCV_SSE_H */ -- Gitee From 6b45b014ec6cecd27bf8f12483af3f63edf0ca3a Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 10:02:55 +0800 Subject: [PATCH 11/15] riscv: Add RISC-V entries in processor type and ISA strings ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-8-hchauhan@ventanamicro.com/ ---------------- - Add RISCV in processor type - Add RISCV32/64 in ISA Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- drivers/firmware/efi/cper.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 2ff95d995f67..81cb85b5a9fa 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -110,6 +110,7 @@ static const char * const proc_type_strs[] = { "IA32/X64", "IA64", "ARM", + "RISCV", }; static const char * const proc_isa_strs[] = { @@ -118,6 +119,8 @@ static const char * const proc_isa_strs[] = { "X64", "ARM A32/T32", "ARM A64", + "RISCV32", + "RISCV64", }; const char * const cper_proc_error_type_strs[] = { -- Gitee From 38eb79e7c053bbb8e6a8877d2be246eea29edd43 Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 10:08:02 +0800 Subject: [PATCH 12/15] riscv: Introduce HEST SSE notification handlers ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-9-hchauhan@ventanamicro.com/ ---------------- - Functions to register a ghes entry with SSE - Add Handlers for low/high priority events - Call ghes common handler to handle an error event Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- drivers/acpi/apei/ghes.c | 90 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 86 insertions(+), 4 deletions(-) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index c3abf34f82db..b41823998b9c 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -17,6 +17,7 @@ * Author: Huang Ying */ +#include #include #include #include @@ -97,6 +98,11 @@ #define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses #endif +#ifndef CONFIG_RISCV_SSE +#define FIX_APEI_GHES_SSE_LOW_PRIORITY __end_of_fixed_addresses +#define FIX_APEI_GHES_SSE_HIGH_PRIORITY __end_of_fixed_addresses +#endif + static ATOMIC_NOTIFIER_HEAD(ghes_report_chain); static inline bool is_hest_type_generic_v2(struct ghes *ghes) @@ -1497,6 +1503,63 @@ static int apei_sdei_unregister_ghes(struct ghes *ghes) return sdei_unregister_ghes(ghes); } +#if defined(CONFIG_ACPI_APEI_SSE) +/* SSE Handlers */ +static int __ghes_sse_callback(struct ghes *ghes, + enum fixed_addresses fixmap_idx) +{ + if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) { + irq_work_queue(&ghes_proc_irq_work); + + return 0; + } + + return -ENOENT; +} + +/* Low priority */ +static int ghes_sse_lo_callback(u32 event_num, void *arg, struct pt_regs *regs) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sse_lo); + struct ghes *ghes = arg; + int err; + + raw_spin_lock(&ghes_notify_lock_sse_lo); + err = __ghes_sse_callback(ghes, FIX_APEI_GHES_SSE_LOW_PRIORITY); + raw_spin_unlock(&ghes_notify_lock_sse_lo); + + return err; +} + +/* High priority */ +static int ghes_sse_hi_callback(u32 event_num, void *arg, struct pt_regs *regs) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sse_hi); + struct ghes *ghes = arg; + int err; + + raw_spin_lock(&ghes_notify_lock_sse_hi); + err = __ghes_sse_callback(ghes, FIX_APEI_GHES_SSE_HIGH_PRIORITY); + raw_spin_unlock(&ghes_notify_lock_sse_hi); + + return err; +} + +static int apei_sse_register_ghes(struct ghes *ghes) +{ + return sse_register_ghes(ghes, ghes_sse_lo_callback, + ghes_sse_hi_callback); +} + +static int apei_sse_unregister_ghes(struct ghes *ghes) +{ + return sse_unregister_ghes(ghes); +} +#else /* CONFIG_ACPI_APEI_SSE */ +static int apei_sse_register_ghes(struct ghes *ghes) { return -EOPNOTSUPP; } +static int apei_sse_unregister_ghes(struct ghes *ghes) { return -EOPNOTSUPP; } +#endif + static int ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; @@ -1543,6 +1606,14 @@ static int ghes_probe(struct platform_device *ghes_dev) pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", generic->header.source_id); goto err; + case ACPI_HEST_NOTIFY_SSE: + if (!IS_ENABLED(CONFIG_ACPI_APEI_SSE)) { + pr_warn(GHES_PFX "Generic hardware error source: %d notified via SSE is not supported\n", + generic->header.source_id); + rc = -EOPNOTSUPP; + goto err; + } + break; default: pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", generic->notify.type, generic->header.source_id); @@ -1606,6 +1677,17 @@ static int ghes_probe(struct platform_device *ghes_dev) if (rc) goto err; break; + + case ACPI_HEST_NOTIFY_SSE: + rc = apei_sse_register_ghes(ghes); + if (rc) { + pr_err(GHES_PFX "Failed to register for SSE notification on vector %d\n", + generic->notify.vector); + goto err; + } + pr_err(GHES_PFX "Registered SSE notification on vector %d\n", + generic->notify.vector); + break; default: BUG(); } @@ -1635,7 +1717,6 @@ static int ghes_probe(struct platform_device *ghes_dev) static int ghes_remove(struct platform_device *ghes_dev) { - int rc; struct ghes *ghes; struct acpi_hest_generic *generic; @@ -1669,9 +1750,10 @@ static int ghes_remove(struct platform_device *ghes_dev) ghes_nmi_remove(ghes); break; case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: - rc = apei_sdei_unregister_ghes(ghes); - if (rc) - return rc; + apei_sdei_unregister_ghes(ghes); + break; + case ACPI_HEST_NOTIFY_SSE: + apei_sse_unregister_ghes(ghes); break; default: BUG(); -- Gitee From 2641fc6b9904ae00ab2a5318048edfce5b13309d Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 10:09:21 +0800 Subject: [PATCH 13/15] riscv: Add config option to enable APEI SSE handler ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-10-hchauhan@ventanamicro.com/ ---------------- APEI SSE handlers can be enabled/disabled with this config option. When enabled, the SSE registration is done for GHES entries having notification type set to SSE. When disabled, registration function return not supported error. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- drivers/acpi/apei/Kconfig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 070c07d68dfb..897ff8ce7637 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -46,6 +46,11 @@ config ACPI_APEI_SEA depends on ARM64 && ACPI_APEI_GHES default y +config ACPI_APEI_SSE + bool + depends on RISCV && RISCV_SSE && ACPI_APEI_GHES + default y + config ACPI_APEI_MEMORY_FAILURE bool "APEI memory error recovering support" depends on ACPI_APEI && MEMORY_FAILURE -- Gitee From e6e5efc20ca0596eeffb5ab0230f5e697a52bb11 Mon Sep 17 00:00:00 2001 From: Himanshu Chauhan Date: Wed, 27 Aug 2025 10:12:16 +0800 Subject: [PATCH 14/15] riscv: Enable APEI and NMI safe cmpxchg options required for RAS ANBZ: #27060 cherry-picked from https://lore.kernel.org/all/20250227123628.2931490-11-hchauhan@ventanamicro.com/ ---------------- Enable the APEI option so that APEI GHES options are visible. Enable SAFE_CMPXCHG option required for GHES error handling. Signed-off-by: Himanshu Chauhan Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- arch/riscv/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 39e53afab8cf..e1b2c975f7bd 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -47,6 +47,7 @@ config RISCV select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_VDSO_DATA + select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if 64BIT && MMU select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX @@ -141,6 +142,7 @@ config RISCV select HAVE_MOVE_PMD select HAVE_MOVE_PUD select HAVE_PCI + select HAVE_ACPI_APEI if ACPI select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP -- Gitee From 0ed93973d7f47e4b93627107f2d7af32cb0f0e1d Mon Sep 17 00:00:00 2001 From: Lu Peng Date: Thu, 13 Nov 2025 15:58:19 +0800 Subject: [PATCH 15/15] anolis: riscv: Enable APEI GHES driver and SSE driver in anolis defconfig ANBZ: #27060 ------------ The APEI GHES driver is very important for error handling on ACPI based platforms so enable it in defconfig. It depends on the SSE driver, so also enable SSE driver in defconfig. Signed-off-by: Lu Peng Signed-off-by: liuqingtao --- anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI | 1 + anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI_GHES | 2 +- .../L1-RECOMMEND/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | 1 + anolis/configs/L1-RECOMMEND/riscv/CONFIG_HAVE_ACPI_APEI | 1 + anolis/configs/L1-RECOMMEND/riscv/CONFIG_RISCV_SSE | 1 + .../configs/L2-OPTIONAL/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | 1 - anolis/configs/L2-OPTIONAL/riscv/CONFIG_HAVE_ACPI_APEI | 1 - 7 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI create mode 100644 anolis/configs/L1-RECOMMEND/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG create mode 100644 anolis/configs/L1-RECOMMEND/riscv/CONFIG_HAVE_ACPI_APEI create mode 100644 anolis/configs/L1-RECOMMEND/riscv/CONFIG_RISCV_SSE delete mode 100644 anolis/configs/L2-OPTIONAL/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG delete mode 100644 anolis/configs/L2-OPTIONAL/riscv/CONFIG_HAVE_ACPI_APEI diff --git a/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI new file mode 100644 index 000000000000..42bf989960ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI @@ -0,0 +1 @@ +CONFIG_ACPI_APEI=y \ No newline at end of file diff --git a/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI_GHES b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI_GHES index 1c74c4cf2516..cf59acd80b0d 100644 --- a/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI_GHES +++ b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ACPI_APEI_GHES @@ -1 +1 @@ -# CONFIG_ACPI_APEI_GHES is not set +CONFIG_ACPI_APEI_GHES=y \ No newline at end of file diff --git a/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG new file mode 100644 index 000000000000..b6a5f67dd75c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -0,0 +1 @@ +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y \ No newline at end of file diff --git a/anolis/configs/L1-RECOMMEND/riscv/CONFIG_HAVE_ACPI_APEI b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_HAVE_ACPI_APEI new file mode 100644 index 000000000000..42bf989960ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_HAVE_ACPI_APEI @@ -0,0 +1 @@ +CONFIG_ACPI_APEI=y \ No newline at end of file diff --git a/anolis/configs/L1-RECOMMEND/riscv/CONFIG_RISCV_SSE b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_RISCV_SSE new file mode 100644 index 000000000000..8bbcd54f2025 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/riscv/CONFIG_RISCV_SSE @@ -0,0 +1 @@ +CONFIG_RISCV_SSE=y diff --git a/anolis/configs/L2-OPTIONAL/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG b/anolis/configs/L2-OPTIONAL/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG deleted file mode 100644 index 9093c39d3cdd..000000000000 --- a/anolis/configs/L2-OPTIONAL/riscv/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG is not set diff --git a/anolis/configs/L2-OPTIONAL/riscv/CONFIG_HAVE_ACPI_APEI b/anolis/configs/L2-OPTIONAL/riscv/CONFIG_HAVE_ACPI_APEI deleted file mode 100644 index b73d529766bd..000000000000 --- a/anolis/configs/L2-OPTIONAL/riscv/CONFIG_HAVE_ACPI_APEI +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_HAVE_ACPI_APEI is not set -- Gitee