diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 6d16939ac10c032be13b7710dbccdd970f67d5e1..15b44e998c6e7c80dc966d03270147a047a8450a 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -7976,6 +7976,17 @@ error/annotated fault. See KVM_EXIT_MEMORY_FAULT for more information. +7.35 KVM_CAP_RISCV_MP_STATE_RESET +--------------------------------- + +:Architectures: riscv +:Type: VM +:Parameters: None +:Returns: 0 on success, -EINVAL if arg[0] is not zero + +When this capability is enabled, KVM resets the VCPU when setting +MP_STATE_INIT_RECEIVED through IOCTL. The original MP_STATE is preserved. + 8. Other capabilities. ====================== diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h index 1f37b600ca47925ee45d67e5b4822ff358843119..3b643b9efc07b9ad7f35c304b81ff6ac71eb21d1 100644 --- a/arch/riscv/include/asm/kvm_aia.h +++ b/arch/riscv/include/asm/kvm_aia.h @@ -63,9 +63,6 @@ struct kvm_vcpu_aia { /* CPU AIA CSR context of Guest VCPU */ struct kvm_vcpu_aia_csr guest_csr; - /* CPU AIA CSR context upon Guest VCPU reset */ - struct kvm_vcpu_aia_csr guest_reset_csr; - /* Guest physical address of IMSIC for this VCPU */ gpa_t imsic_addr; diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h deleted file mode 100644 index 6dd1a4809ec181bff5373fd7d651090729ea7a41..0000000000000000000000000000000000000000 --- a/arch/riscv/include/asm/kvm_aia_aplic.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Western Digital Corporation or its affiliates. - * Copyright (C) 2022 Ventana Micro Systems Inc. - */ -#ifndef __KVM_RISCV_AIA_IMSIC_H -#define __KVM_RISCV_AIA_IMSIC_H - -#include - -#define APLIC_MAX_IDC BIT(14) -#define APLIC_MAX_SOURCE 1024 - -#define APLIC_DOMAINCFG 0x0000 -#define APLIC_DOMAINCFG_RDONLY 0x80000000 -#define APLIC_DOMAINCFG_IE BIT(8) -#define APLIC_DOMAINCFG_DM BIT(2) -#define APLIC_DOMAINCFG_BE BIT(0) - -#define APLIC_SOURCECFG_BASE 0x0004 -#define APLIC_SOURCECFG_D BIT(10) -#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff -#define APLIC_SOURCECFG_SM_MASK 0x00000007 -#define APLIC_SOURCECFG_SM_INACTIVE 0x0 -#define APLIC_SOURCECFG_SM_DETACH 0x1 -#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 -#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 -#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 -#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 - -#define APLIC_IRQBITS_PER_REG 32 - -#define APLIC_SETIP_BASE 0x1c00 -#define APLIC_SETIPNUM 0x1cdc - -#define APLIC_CLRIP_BASE 0x1d00 -#define APLIC_CLRIPNUM 0x1ddc - -#define APLIC_SETIE_BASE 0x1e00 -#define APLIC_SETIENUM 0x1edc - -#define APLIC_CLRIE_BASE 0x1f00 -#define APLIC_CLRIENUM 0x1fdc - -#define APLIC_SETIPNUM_LE 0x2000 -#define APLIC_SETIPNUM_BE 0x2004 - -#define APLIC_GENMSI 0x3000 - -#define APLIC_TARGET_BASE 0x3004 -#define APLIC_TARGET_HART_IDX_SHIFT 18 -#define APLIC_TARGET_HART_IDX_MASK 0x3fff -#define APLIC_TARGET_GUEST_IDX_SHIFT 12 -#define APLIC_TARGET_GUEST_IDX_MASK 0x3f -#define APLIC_TARGET_IPRIO_MASK 0xff -#define APLIC_TARGET_EIID_MASK 0x7ff - -#endif diff --git a/arch/riscv/include/asm/kvm_aia_imsic.h b/arch/riscv/include/asm/kvm_aia_imsic.h deleted file mode 100644 index da5881d2bde09f928351f7fe9b964dc7b220739a..0000000000000000000000000000000000000000 --- a/arch/riscv/include/asm/kvm_aia_imsic.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Western Digital Corporation or its affiliates. - * Copyright (C) 2022 Ventana Micro Systems Inc. - */ -#ifndef __KVM_RISCV_AIA_IMSIC_H -#define __KVM_RISCV_AIA_IMSIC_H - -#include -#include - -#define IMSIC_MMIO_PAGE_SHIFT 12 -#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT) -#define IMSIC_MMIO_PAGE_LE 0x00 -#define IMSIC_MMIO_PAGE_BE 0x04 - -#define IMSIC_MIN_ID 63 -#define IMSIC_MAX_ID 2048 - -#define IMSIC_EIDELIVERY 0x70 - -#define IMSIC_EITHRESHOLD 0x72 - -#define IMSIC_EIP0 0x80 -#define IMSIC_EIP63 0xbf -#define IMSIC_EIPx_BITS 32 - -#define IMSIC_EIE0 0xc0 -#define IMSIC_EIE63 0xff -#define IMSIC_EIEx_BITS 32 - -#define IMSIC_FIRST IMSIC_EIDELIVERY -#define IMSIC_LAST IMSIC_EIE63 - -#define IMSIC_MMIO_SETIPNUM_LE 0x00 -#define IMSIC_MMIO_SETIPNUM_BE 0x04 - -#endif diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index f56f0838abc0a69a71d0d26485c51e2cbff004ca..bfbf9e74ad75257ebe25e91654c38d905092e51a 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -76,6 +76,11 @@ struct kvm_vcpu_stat { u64 csr_exit_kernel; u64 signal_exits; u64 exits; + u64 instr_illegal_exits; + u64 load_misaligned_exits; + u64 store_misaligned_exits; + u64 load_access_exits; + u64 store_access_exits; }; struct kvm_arch_memory_slot { @@ -103,6 +108,9 @@ struct kvm_arch { /* AIA Guest/VM context */ struct kvm_aia aia; + + /* KVM_CAP_RISCV_MP_STATE_RESET */ + bool mp_state_reset; }; struct kvm_cpu_trap { @@ -176,6 +184,12 @@ struct kvm_vcpu_smstateen_csr { unsigned long sstateen0; }; +struct kvm_vcpu_reset_state { + spinlock_t lock; + unsigned long pc; + unsigned long a1; +}; + struct kvm_vcpu_arch { /* VCPU ran at least once */ bool ran_atleast_once; @@ -210,11 +224,8 @@ struct kvm_vcpu_arch { /* CPU Smstateen CSR context of Guest VCPU */ struct kvm_vcpu_smstateen_csr smstateen_csr; - /* CPU context upon Guest VCPU reset */ - struct kvm_cpu_context guest_reset_context; - - /* CPU CSR context upon Guest VCPU reset */ - struct kvm_vcpu_csr guest_reset_csr; + /* CPU reset state of Guest VCPU */ + struct kvm_vcpu_reset_state reset_state; /* * VCPU interrupts diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index b96705258cf9641fbc43810cda7c01f970db1f58..439ab2b3534f8ed1f5e0578b96e51ecda140fb58 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -55,6 +55,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, struct kvm_run *run, u32 type, u64 flags); +void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu, + unsigned long pc, unsigned long a1); +void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); @@ -85,6 +88,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h index 27f5bccdd8b02faf9e97cd225b89f3ee84f56065..57a798a4cb0d7d57de99462c7d12e5d5dc7f1306 100644 --- a/arch/riscv/include/asm/kvm_vcpu_vector.h +++ b/arch/riscv/include/asm/kvm_vcpu_vector.h @@ -33,8 +33,7 @@ void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, unsigned long *isa); void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx); void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx); -int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *cntx); +int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu); #else @@ -62,8 +61,7 @@ static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cn { } -static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *cntx) +static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu) { return 0; } diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 825e2fa66dfb906ab753b1562212a17195611358..85cf54a2d71701de4271c90d18cd022336dd3c22 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -203,6 +203,7 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_VENDOR, KVM_RISCV_SBI_EXT_DBCN, KVM_RISCV_SBI_EXT_STA, + KVM_RISCV_SBI_EXT_SUSP, KVM_RISCV_SBI_EXT_MAX, }; diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index ecf008b5b2a1514100a5ebcb3d24a0aecc40d6a0..756bf0b68c3c1f49ffe72841c98db8df25240913 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -134,6 +134,12 @@ secondary_start_sbi: csrw CSR_IE, zero csrw CSR_IP, zero +#ifndef CONFIG_RISCV_M_MODE + /* Enable time CSR */ + li t0, 0x2 + csrw CSR_SCOUNTEREN, t0 +#endif + /* Load the global pointer */ .option push .option norelax @@ -231,6 +237,10 @@ SYM_CODE_START(_start_kernel) * to hand it to us. */ csrr a0, CSR_MHARTID +#else + /* Enable time CSR */ + li t0, 0x2 + csrw CSR_SCOUNTEREN, t0 #endif /* CONFIG_RISCV_M_MODE */ /* Load the global pointer */ diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index c9646521f1132e5a9ea8b1e1c327823224927bbc..27375eecdb974189993efacee73fd6c08f9d6298 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -27,6 +27,7 @@ kvm-y += vcpu_sbi_base.o kvm-y += vcpu_sbi_replace.o kvm-y += vcpu_sbi_hsm.o kvm-y += vcpu_sbi_sta.o +kvm-y += vcpu_sbi_system.o kvm-y += vcpu_timer.o kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o kvm-y += aia.o diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index a944294f6f23a70335070dc877588321429da0de..596209f1a6ff2fb548caf7194a22206df5a9fdf4 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -10,12 +10,12 @@ #include #include #include +#include #include #include #include #include #include -#include struct aia_hgei_control { raw_spinlock_t lock; @@ -394,6 +394,8 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, { int ret = -ENOENT; unsigned long flags; + const struct imsic_global_config *gc; + const struct imsic_local_config *lc; struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu); if (!kvm_riscv_aia_available() || !hgctrl) @@ -409,11 +411,14 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, raw_spin_unlock_irqrestore(&hgctrl->lock, flags); - /* TODO: To be updated later by AIA IMSIC HW guest file support */ - if (hgei_va) - *hgei_va = NULL; - if (hgei_pa) - *hgei_pa = 0; + gc = imsic_get_global_config(); + lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL; + if (lc && ret > 0) { + if (hgei_va) + *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ); + if (hgei_pa) + *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ); + } return ret; } @@ -600,9 +605,11 @@ void kvm_riscv_aia_disable(void) int kvm_riscv_aia_init(void) { int rc; + const struct imsic_global_config *gc; if (!riscv_isa_extension_available(NULL, SxAIA)) return -ENODEV; + gc = imsic_get_global_config(); /* Figure-out number of bits in HGEIE */ csr_write(CSR_HGEIE, -1UL); @@ -614,17 +621,17 @@ int kvm_riscv_aia_init(void) /* * Number of usable HGEI lines should be minimum of per-HART * IMSIC guest files and number of bits in HGEIE - * - * TODO: To be updated later by AIA IMSIC HW guest file support */ - kvm_riscv_aia_nr_hgei = 0; + if (gc) + kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei, + BIT(gc->guest_index_bits) - 1); + else + kvm_riscv_aia_nr_hgei = 0; - /* - * Find number of guest MSI IDs - * - * TODO: To be updated later by AIA IMSIC HW guest file support - */ + /* Find number of guest MSI IDs */ kvm_riscv_aia_max_ids = IMSIC_MAX_ID; + if (gc && kvm_riscv_aia_nr_hgei) + kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1; /* Initialize guest external interrupt line management */ rc = aia_hgei_init(); diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c index 9d5b04c971c4d96229ac7faa83ae40e284b700cb..f59d1c0c8c43a70a20a1250d641b3758e632878d 100644 --- a/arch/riscv/kvm/aia_aplic.c +++ b/arch/riscv/kvm/aia_aplic.c @@ -7,12 +7,12 @@ * Anup Patel */ +#include #include #include #include #include #include -#include struct aplic_irq { raw_spinlock_t lock; diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c index 5cd407c6a8e4f82389ad3d0a63081d6dcfec7688..43e472ff3e1a29c9e27710394be2131b60e4b554 100644 --- a/arch/riscv/kvm/aia_device.c +++ b/arch/riscv/kvm/aia_device.c @@ -8,9 +8,9 @@ */ #include +#include #include #include -#include static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) { @@ -526,12 +526,10 @@ int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu) void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu) { struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; - struct kvm_vcpu_aia_csr *reset_csr = - &vcpu->arch.aia_context.guest_reset_csr; if (!kvm_riscv_aia_available()) return; - memcpy(csr, reset_csr, sizeof(*csr)); + memset(csr, 0, sizeof(*csr)); /* Proceed only if AIA was initialized successfully */ if (!kvm_riscv_aia_initialized(vcpu->kvm)) diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index c1585444f856ea7bf729e5b98cd02387c8820eff..29ef9c2133a93317f0f9af284cbbdcbc3b140fa4 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -9,13 +9,13 @@ #include #include +#include #include #include #include #include #include #include -#include #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64)) @@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu, if (imsic->vsfile_cpu >= 0) { writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE); - kvm_vcpu_kick(vcpu); } else { eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)]; set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 0c0aee5b7632682dea8d4688b60f6103efedcf01..8182a58392f62312b6da0dba53faec7425a36c31 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -34,7 +34,12 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, csr_exit_user), STATS_DESC_COUNTER(VCPU, csr_exit_kernel), STATS_DESC_COUNTER(VCPU, signal_exits), - STATS_DESC_COUNTER(VCPU, exits) + STATS_DESC_COUNTER(VCPU, exits), + STATS_DESC_COUNTER(VCPU, instr_illegal_exits), + STATS_DESC_COUNTER(VCPU, load_misaligned_exits), + STATS_DESC_COUNTER(VCPU, store_misaligned_exits), + STATS_DESC_COUNTER(VCPU, load_access_exits), + STATS_DESC_COUNTER(VCPU, store_access_exits), }; const struct kvm_stats_header kvm_vcpu_stats_header = { @@ -46,12 +51,33 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; -static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) +static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu, + bool kvm_sbi_reset) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; + void *vector_datap = cntx->vector.datap; + + memset(cntx, 0, sizeof(*cntx)); + memset(csr, 0, sizeof(*csr)); + memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); + + /* Restore datap as it's not a part of the guest context. */ + cntx->vector.datap = vector_datap; + + if (kvm_sbi_reset) + kvm_riscv_vcpu_sbi_load_reset_state(vcpu); + + /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ + cntx->sstatus = SR_SPP | SR_SPIE; + + cntx->hstatus |= HSTATUS_VTW; + cntx->hstatus |= HSTATUS_SPVP; + cntx->hstatus |= HSTATUS_SPV; +} + +static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset) +{ bool loaded; /** @@ -66,9 +92,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.last_exit_cpu = -1; - memcpy(csr, reset_csr, sizeof(*csr)); - - memcpy(cntx, reset_cntx, sizeof(*cntx)); + kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset); kvm_riscv_vcpu_fp_reset(vcpu); @@ -103,8 +127,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int rc; - struct kvm_cpu_context *cntx; - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; spin_lock_init(&vcpu->arch.mp_state_lock); @@ -124,20 +146,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Setup VCPU hfence queue */ spin_lock_init(&vcpu->arch.hfence_lock); - /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ - cntx = &vcpu->arch.guest_reset_context; - cntx->sstatus = SR_SPP | SR_SPIE; - cntx->hstatus = 0; - cntx->hstatus |= HSTATUS_VTW; - cntx->hstatus |= HSTATUS_SPVP; - cntx->hstatus |= HSTATUS_SPV; + spin_lock_init(&vcpu->arch.reset_state.lock); - if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx)) + if (kvm_riscv_vcpu_alloc_vector_context(vcpu)) return -ENOMEM; - /* By default, make CY, TM, and IR counters accessible in VU mode */ - reset_csr->scounteren = 0x7; - /* Setup VCPU timer */ kvm_riscv_vcpu_timer_init(vcpu); @@ -156,7 +169,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_sbi_init(vcpu); /* Reset VCPU */ - kvm_riscv_reset_vcpu(vcpu); + kvm_riscv_reset_vcpu(vcpu, false); return 0; } @@ -489,6 +502,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, case KVM_MP_STATE_STOPPED: __kvm_riscv_vcpu_power_off(vcpu); break; + case KVM_MP_STATE_INIT_RECEIVED: + if (vcpu->kvm->arch.mp_state_reset) + kvm_riscv_reset_vcpu(vcpu, false); + else + ret = -EINVAL; + break; default: ret = -EINVAL; } @@ -628,7 +647,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) - kvm_riscv_reset_vcpu(vcpu); + kvm_riscv_reset_vcpu(vcpu, true); if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) kvm_riscv_gstage_update_hgatp(vcpu); diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index 2415722c01b8e9194a8a2c87d86d70e97a9ae6f4..e992b2870cc024cf1f7836472c795e9ee91fae24 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -165,6 +165,17 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, vcpu->arch.guest_context.sstatus |= SR_SPP; } +static inline int vcpu_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap) +{ + int ret = -EFAULT; + + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { + kvm_riscv_vcpu_trap_redirect(vcpu, trap); + ret = 1; + } + return ret; +} + /* * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on * proper exit to userspace. @@ -183,12 +194,32 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, run->exit_reason = KVM_EXIT_UNKNOWN; switch (trap->scause) { case EXC_INST_ILLEGAL: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN); + vcpu->stat.instr_illegal_exits++; + ret = vcpu_redirect(vcpu, trap); + break; case EXC_LOAD_MISALIGNED: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD); + vcpu->stat.load_misaligned_exits++; + ret = vcpu_redirect(vcpu, trap); + break; case EXC_STORE_MISALIGNED: - if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { - kvm_riscv_vcpu_trap_redirect(vcpu, trap); - ret = 1; - } + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE); + vcpu->stat.store_misaligned_exits++; + ret = vcpu_redirect(vcpu, trap); + break; + case EXC_LOAD_ACCESS: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD); + vcpu->stat.load_access_exits++; + ret = vcpu_redirect(vcpu, trap); + break; + case EXC_STORE_ACCESS: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE); + vcpu->stat.store_access_exits++; + ret = vcpu_redirect(vcpu, trap); + break; + case EXC_INST_ACCESS: + ret = vcpu_redirect(vcpu, trap); break; case EXC_VIRTUAL_INST_FAULT: if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 1fe70d2bc2ba5aeb3129d47efc071b670e9270cb..6e09b518a5d1a8515a4a3511bf7cfc93634d6d78 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -70,6 +70,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = { .ext_idx = KVM_RISCV_SBI_EXT_DBCN, .ext_ptr = &vcpu_sbi_ext_dbcn, }, + { + .ext_idx = KVM_RISCV_SBI_EXT_SUSP, + .ext_ptr = &vcpu_sbi_ext_susp, + }, { .ext_idx = KVM_RISCV_SBI_EXT_STA, .ext_ptr = &vcpu_sbi_ext_sta, @@ -152,6 +156,34 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, run->exit_reason = KVM_EXIT_SYSTEM_EVENT; } +void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu, + unsigned long pc, unsigned long a1) +{ + spin_lock(&vcpu->arch.reset_state.lock); + vcpu->arch.reset_state.pc = pc; + vcpu->arch.reset_state.a1 = a1; + spin_unlock(&vcpu->arch.reset_state.lock); + + kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); +} + +void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state; + + cntx->a0 = vcpu->vcpu_id; + + spin_lock(&vcpu->arch.reset_state.lock); + cntx->sepc = reset_state->pc; + cntx->a1 = reset_state->a1; + spin_unlock(&vcpu->arch.reset_state.lock); + + cntx->sstatus &= ~SR_SIE; + csr->vsatp = 0; +} + int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; @@ -486,19 +518,22 @@ void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu) struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; const struct kvm_riscv_sbi_extension_entry *entry; const struct kvm_vcpu_sbi_extension *ext; - int i; + int idx, i; for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { entry = &sbi_ext[i]; ext = entry->ext_ptr; + idx = entry->ext_idx; + + if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status)) + continue; if (ext->probe && !ext->probe(vcpu)) { - scontext->ext_status[entry->ext_idx] = - KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE; + scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE; continue; } - scontext->ext_status[entry->ext_idx] = ext->default_disabled ? + scontext->ext_status[idx] = ext->default_disabled ? KVM_RISCV_SBI_EXT_STATUS_DISABLED : KVM_RISCV_SBI_EXT_STATUS_ENABLED; } diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c index 7e349b4ee926cbf2e94a871858a836a89d342422..f958f9741a803a39f7b6c5475090f82e6a4a7da8 100644 --- a/arch/riscv/kvm/vcpu_sbi_hsm.c +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -14,7 +14,6 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *reset_cntx; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; struct kvm_vcpu *target_vcpu; unsigned long target_vcpuid = cp->a0; @@ -31,14 +30,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu) goto out; } - reset_cntx = &target_vcpu->arch.guest_reset_context; - /* start address */ - reset_cntx->sepc = cp->a1; - /* target vcpu id to start */ - reset_cntx->a0 = target_vcpuid; - /* private data passed from kernel */ - reset_cntx->a1 = cp->a2; - kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu); + kvm_riscv_vcpu_sbi_request_reset(target_vcpu, cp->a1, cp->a2); __kvm_riscv_vcpu_power_on(target_vcpu); @@ -106,7 +98,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, } return 0; case SBI_EXT_HSM_HART_SUSPEND: - switch (cp->a0) { + switch (lower_32_bits(cp->a0)) { case SBI_HSM_SUSPEND_RET_DEFAULT: kvm_riscv_vcpu_wfi(vcpu); break; diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c new file mode 100644 index 0000000000000000000000000000000000000000..396f6571651e7328410d02582d78cc0e650026eb --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_system.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Ventana Micro Systems Inc. + */ + +#include + +#include +#include + +static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_vcpu_sbi_return *retdata) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long funcid = cp->a6; + unsigned long hva, i; + struct kvm_vcpu *tmp; + + switch (funcid) { + case SBI_EXT_SUSP_SYSTEM_SUSPEND: + if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) { + retdata->err_val = SBI_ERR_INVALID_PARAM; + return 0; + } + + if (!(cp->sstatus & SR_SPP)) { + retdata->err_val = SBI_ERR_FAILURE; + return 0; + } + + hva = kvm_vcpu_gfn_to_hva_prot(vcpu, cp->a1 >> PAGE_SHIFT, NULL); + if (kvm_is_error_hva(hva)) { + retdata->err_val = SBI_ERR_INVALID_ADDRESS; + return 0; + } + + kvm_for_each_vcpu(i, tmp, vcpu->kvm) { + if (tmp == vcpu) + continue; + if (!kvm_riscv_vcpu_stopped(tmp)) { + retdata->err_val = SBI_ERR_DENIED; + return 0; + } + } + + kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2); + + /* userspace provides the suspend implementation */ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + retdata->uexit = true; + break; + default: + retdata->err_val = SBI_ERR_NOT_SUPPORTED; + break; + } + + return 0; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp = { + .extid_start = SBI_EXT_SUSP, + .extid_end = SBI_EXT_SUSP, + .default_disabled = true, + .handler = kvm_sbi_ext_susp_handler, +}; diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c index d92d1348045c8cfc60ddd7b6d524f8db535e4619..05f3cc2d8e311ac62a56186b3599b5d389bf6f83 100644 --- a/arch/riscv/kvm/vcpu_vector.c +++ b/arch/riscv/kvm/vcpu_vector.c @@ -22,6 +22,9 @@ void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; cntx->sstatus &= ~SR_VS; + + cntx->vector.vlenb = riscv_v_vsize / 32; + if (riscv_isa_extension_available(isa, v)) { cntx->sstatus |= SR_VS_INITIAL; WARN_ON(!cntx->vector.datap); @@ -70,13 +73,11 @@ void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) __kvm_riscv_vector_restore(cntx); } -int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *cntx) +int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu) { - cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); - if (!cntx->vector.datap) + vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); + if (!vcpu->arch.guest_context.vector.datap) return -ENOMEM; - cntx->vector.vlenb = riscv_v_vsize / 32; vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); if (!vcpu->arch.host_context.vector.datap) @@ -87,7 +88,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) { - kfree(vcpu->arch.guest_reset_context.vector.datap); + kfree(vcpu->arch.guest_context.vector.datap); kfree(vcpu->arch.host_context.vector.datap); } #endif @@ -181,6 +182,8 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; unsigned long reg_val; + if (reg_size != sizeof(reg_val)) + return -EINVAL; if (copy_from_user(®_val, uaddr, reg_size)) return -EFAULT; if (reg_val != cntx->vector.vlenb) diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 7e2b50c692c1bb583fbfb8fc187d2107f1615f78..d86024055ff6e51277ea7f320aa856308531db8c 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -209,6 +209,19 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + switch (cap->cap) { + case KVM_CAP_RISCV_MP_STATE_RESET: + if (cap->flags) + return -EINVAL; + kvm->arch.mp_state_reset = true; + return 0; + default: + return -EINVAL; + } +} + int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index afc7d89aa359ad33bb245f2ad1a5850ceb66aa3c..8f28b9ca4a3c9514aaae4ed5fb9d41ae1937a586 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1230,6 +1230,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_GUEST_MEMFD 234 #define KVM_CAP_VM_TYPES 235 +#define KVM_CAP_RISCV_MP_STATE_RESET 242 + #define KVM_CAP_HYGON_COCO_EXT 501 /* support userspace to request firmware to build CSV3 guest's memory space */ #define KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM (1 << 0) diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index 2630a16fe31db921b62e9d972d9b7a2c7dc6fddc..6c81ed23df8b95c7a5e5e7b11785b346747817e2 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -116,6 +116,7 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN: + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SUSP: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR: @@ -552,10 +553,11 @@ static const char *sbi_ext_single_id_to_str(__u64 reg_off) KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU), + KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN), + KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SUSP), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR), - KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN), }; if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) @@ -966,6 +968,7 @@ KVM_SBI_EXT_SUBLIST_CONFIG(base, BASE); KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA); KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU); KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN); +KVM_SBI_EXT_SIMPLE_CONFIG(susp, SUSP); KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA); KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F); @@ -1038,6 +1041,7 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_sbi_sta, &config_sbi_pmu, &config_sbi_dbcn, + &config_sbi_susp, &config_aia, &config_fp_f, &config_fp_d,