diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a6c73443c88d9d809816f5dead8b9515f6fc139f..470b7da2ae41565629c02cc7d0204ce159c996a4 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -7890,6 +7890,24 @@ This capability is aimed to mitigate the threat that malicious VMs can cause CPU stuck (due to event windows don't open up) and make the CPU unavailable to host or other VMs. +7.37 KVM_CAP_ARM_WRITABLE_IMP_ID_REGS +------------------------------------- + +:Architectures: arm64 +:Target: VM +:Parameters: None +:Returns: 0 on success, -EBUSY if vCPUs have been created before enabling this + capability. + +This capability changes the behavior of the registers that identify a PE +implementation of the Arm architecture: MIDR_EL1, REVIDR_EL1, and AIDR_EL1. +By default, these registers are visible to userspace but treated as invariant. + +When this capability is enabled, KVM allows userspace to change the +aforementioned registers before the first KVM_RUN. These registers are VM +scoped, meaning that the same set of values are presented on all vCPUs in a +given VM. + 7.38 KVM_CAP_ARM_RME -------------------- diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 8455df351ef89a30acb8e6f0e9d8942a14598818..01dabe4c41fac76eebc760cd8f69bc50c649c449 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -124,7 +124,7 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void) MIDR_REV(MIDR_HISI_LINXICORE9100, 1, 0), { /* sentinel */ } }; - if (is_midr_in_range_list(read_cpuid_id(), idc_support_list)) + if (is_midr_in_range_list(idc_support_list)) ctr |= BIT(CTR_EL0_IDC_SHIFT); #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 387a4af3f4e457eaf4169d2c079771de6c6371b7..ab24bc0fc04872ff3860d00f1c39d91deec815dd 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -255,6 +255,16 @@ #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) +/* + * The CPU ID never changes at run time, so we might as well tell the + * compiler that it's constant. Use this function to read the CPU ID + * rather than directly reading processor_id or read_cpuid() directly. + */ +static inline u32 __attribute_const__ read_cpuid_id(void) +{ + return read_cpuid(MIDR_EL1); +} + /* * Represent a range of MIDR values for a given CPU model and a * range of variant/revision values. @@ -290,30 +300,14 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min, return _model == model && rv >= rv_min && rv <= rv_max; } -static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) -{ - return midr_is_cpu_model_range(midr, range->model, - range->rv_min, range->rv_max); -} - -static inline bool -is_midr_in_range_list(u32 midr, struct midr_range const *ranges) -{ - while (ranges->model) - if (is_midr_in_range(midr, ranges++)) - return true; - return false; -} +struct target_impl_cpu { + u64 midr; + u64 revidr; + u64 aidr; +}; -/* - * The CPU ID never changes at run time, so we might as well tell the - * compiler that it's constant. Use this function to read the CPU ID - * rather than directly reading processor_id or read_cpuid() directly. - */ -static inline u32 __attribute_const__ read_cpuid_id(void) -{ - return read_cpuid(MIDR_EL1); -} +bool cpu_errata_set_target_impl(u64 num, void *impl_cpus); +bool is_midr_in_range_list(struct midr_range const *ranges); static inline u64 __attribute_const__ read_cpuid_mpidr(void) { diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h index 0ae427f352c8cab9440ae723735c396794cbe208..95bb869f087ea98ae4452caf1f20f34f0937b3e6 100644 --- a/arch/arm64/include/asm/hypervisor.h +++ b/arch/arm64/include/asm/hypervisor.h @@ -6,5 +6,6 @@ void kvm_init_hyp_services(void); bool kvm_arm_hyp_service_available(u32 func_id); +void kvm_arm_target_impl_cpu_init(void); #endif diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 1f38f8b6cc2d513b5577ae0244fb0793b40eb5e6..6f4c9b1b4e9d7624f4f4a7f79240cba775554856 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -99,12 +99,12 @@ * SWIO: Turn set/way invalidates into set/way clean+invalidate * PTW: Take a stage2 fault if a stage1 walk steps in device memory * TID3: Trap EL1 reads of group 3 ID registers - * TID2: Trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and CSSELR_EL1 + * TID1: Trap REVIDR_EL1, AIDR_EL1, and SMIDR_EL1 */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_BSU_IS | HCR_FB | HCR_TACR | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3) + HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5cff08193a2d6ce1effbb0656b854abbbcc1c831..2ebcd14b16123222d44761e2b4125ba107f64ea5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -197,6 +197,7 @@ struct kvm_smccc_features { unsigned long std_bmap; unsigned long std_hyp_bmap; unsigned long vendor_hyp_bmap; + KABI_EXTEND(unsigned long vendor_hyp_bmap_2) }; typedef unsigned int pkvm_handle_t; @@ -249,6 +250,9 @@ struct kvm_arch { #define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 7 /* Initial ID reg values loaded */ #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 8 + +/* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */ +#define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10 unsigned long flags; /* VM-wide vCPU feature set @@ -319,6 +323,9 @@ struct kvm_arch { /* PMCR_EL0.N value for the guest */ KABI_EXTEND(u8 pmcr_n) KABI_EXTEND(u64 ctr_el0) + KABI_EXTEND(u64 midr_el1) + KABI_EXTEND(u64 revidr_el1) + KABI_EXTEND(u64 aidr_el1) }; struct kvm_vcpu_fault_info { @@ -1335,6 +1342,12 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg) return &ka->id_regs[IDREG_IDX(reg)]; case SYS_CTR_EL0: return &ka->ctr_el0; + case SYS_MIDR_EL1: + return &ka->midr_el1; + case SYS_REVIDR_EL1: + return &ka->revidr_el1; + case SYS_AIDR_EL1: + return &ka->aidr_el1; default: WARN_ON_ONCE(1); return NULL; diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 871b751732f5c8ef16dc46e68ea2c6c5ddfa5cc4..99660288bd949a457f9059d4038c37ce4d931394 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -372,6 +372,7 @@ enum { #endif }; +/* Vendor hyper call function numbers 0-63 */ #define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2) enum { @@ -387,6 +388,17 @@ enum { #endif }; +/* Vendor hyper call function numbers 64-127 */ +#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3) + +enum { + KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0, + KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1, +#ifdef __KERNEL__ + KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT, +#endif +}; + /* Device Control API on vm fd */ #define KVM_ARM_VM_SMCCC_CTRL 0 #define KVM_ARM_VM_SMCCC_FILTER 0 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 25bf46c7ce51b81eb9bcfdad0c20a2cc5f60a1a4..37b093bf20c033e24ddc4e4947b321954f2797bc 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -19,31 +19,85 @@ #include #endif +static u64 target_impl_cpu_num; +static struct target_impl_cpu *target_impl_cpus; + +bool cpu_errata_set_target_impl(u64 num, void *impl_cpus) +{ + if (target_impl_cpu_num || !num || !impl_cpus) + return false; + + target_impl_cpu_num = num; + target_impl_cpus = impl_cpus; + return true; +} + +static inline bool is_midr_in_range(struct midr_range const *range) +{ + int i; + + if (!target_impl_cpu_num) + return midr_is_cpu_model_range(read_cpuid_id(), range->model, + range->rv_min, range->rv_max); + + for (i = 0; i < target_impl_cpu_num; i++) { + if (midr_is_cpu_model_range(target_impl_cpus[i].midr, + range->model, + range->rv_min, range->rv_max)) + return true; + } + return false; +} + +bool is_midr_in_range_list(struct midr_range const *ranges) +{ + while (ranges->model) + if (is_midr_in_range(ranges++)) + return true; + return false; +} +EXPORT_SYMBOL_GPL(is_midr_in_range_list); + static bool __maybe_unused -is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) +__is_affected_midr_range(const struct arm64_cpu_capabilities *entry, + u32 midr, u32 revidr) { const struct arm64_midr_revidr *fix; - u32 midr = read_cpuid_id(), revidr; - - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - if (!is_midr_in_range(midr, &entry->midr_range)) + if (!is_midr_in_range(&entry->midr_range)) return false; midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; - revidr = read_cpuid(REVIDR_EL1); for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) return false; - return true; } +static bool __maybe_unused +is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) +{ + int i; + + if (!target_impl_cpu_num) { + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return __is_affected_midr_range(entry, read_cpuid_id(), + read_cpuid(REVIDR_EL1)); + } + + for (i = 0; i < target_impl_cpu_num; i++) { + if (__is_affected_midr_range(entry, target_impl_cpus[i].midr, + target_impl_cpus[i].midr)) + return true; + } + return false; +} + static bool __maybe_unused is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, int scope) { WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); + return is_midr_in_range_list(entry->midr_range_list); } static bool __maybe_unused @@ -80,7 +134,7 @@ hisilicon_1980005_match(const struct arm64_cpu_capabilities *entry, { /* sentinel */ } }; - return is_midr_in_range_list(read_cpuid_id(), idc_support_list); + return is_midr_in_range_list(idc_support_list); } static void @@ -282,12 +336,11 @@ static bool __maybe_unused has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, int scope) { - u32 midr = read_cpuid_id(); bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return is_midr_in_range(midr, &range) && has_dic; + return is_midr_in_range(&range) && has_dic; } #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3417084ac7c0dbd474062701c6ca8a84ab0331c0..7aa13adffcda990e216af10a4ca37d1e03b2e3e0 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -88,6 +88,7 @@ #include #include #include +#include #include #include #include @@ -1688,8 +1689,7 @@ bool kaslr_requires_kpti(void) if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { extern const struct midr_range cavium_erratum_27456_cpus[]; - if (is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus)) + if (is_midr_in_range_list(cavium_erratum_27456_cpus)) return false; } @@ -1724,7 +1724,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, char const *str = "kpti command line option"; bool meltdown_safe; - meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); + meltdown_safe = is_midr_in_range_list(kpti_safe_list); /* Defer to CPU feature registers */ if (has_cpuid_feature(entry, scope)) @@ -1901,7 +1901,7 @@ static bool cpu_has_broken_dbm(void) {}, }; - return is_midr_in_range_list(read_cpuid_id(), cpus); + return is_midr_in_range_list(cpus); } static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) @@ -2458,7 +2458,7 @@ static bool is_arch_xcall_xint_support(void) { /* sentinel */ } }; - if (is_midr_in_range_list(read_cpuid_id(), hip12_cpus)) { + if (is_midr_in_range_list(hip12_cpus)) { if (el == CurrentEL_EL2) return true; else @@ -3711,6 +3711,8 @@ void check_local_cpu_capabilities(void) static void __init setup_boot_cpu_capabilities(void) { + kvm_arm_target_impl_cpu_init(); + /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */ update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU); /* Enable the SCOPE_BOOT_CPU capabilities alone right away */ diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 4d88d14b770bd8a5ec8253d3a28308f550e278d3..22e5e23438af297198c687ebb42a5a939224008d 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -172,7 +172,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) return SPECTRE_UNAFFECTED; /* Alternatively, we have a list of unaffected CPUs */ - if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) + if (is_midr_in_range_list(spectre_v2_safe_list)) return SPECTRE_UNAFFECTED; return SPECTRE_VULNERABLE; @@ -331,7 +331,7 @@ bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope) }; WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list); + return is_midr_in_range_list(spectre_v3a_unsafe_list); } void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused) @@ -475,7 +475,7 @@ static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void) { /* sentinel */ }, }; - if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list)) + if (is_midr_in_range_list(spectre_v4_safe_list)) return SPECTRE_UNAFFECTED; /* CPU features are detected first */ @@ -864,7 +864,7 @@ static bool is_spectre_bhb_safe(int scope) if (scope != SCOPE_LOCAL_CPU) return all_safe; - if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list)) + if (is_midr_in_range_list(spectre_bhb_safe_list)) return true; all_safe = false; @@ -917,17 +917,17 @@ static u8 spectre_bhb_loop_affected(void) {}, }; - if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list)) + if (is_midr_in_range_list(spectre_bhb_k132_list)) k = 132; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list)) + else if (is_midr_in_range_list(spectre_bhb_k38_list)) k = 38; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + else if (is_midr_in_range_list(spectre_bhb_k32_list)) k = 32; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + else if (is_midr_in_range_list(spectre_bhb_k24_list)) k = 24; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + else if (is_midr_in_range_list(spectre_bhb_k11_list)) k = 11; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + else if (is_midr_in_range_list(spectre_bhb_k8_list)) k = 8; return k; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0a8e74d32d8a9a99077c43a4e9b25a0c60181317..d3831c27384348a06ef2a028f30a9731442feef8 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -248,6 +248,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, r = 0; set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); break; + case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS: + mutex_lock(&kvm->lock); + if (!kvm->created_vcpus) { + r = 0; + set_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags); + } + mutex_unlock(&kvm->lock); + break; case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: new_cap = cap->args[0]; @@ -436,6 +444,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_INJECT_EXT_DABT: case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: + case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS: case KVM_CAP_ARM_SYSTEM_SUSPEND: case KVM_CAP_IRQFD_RESAMPLE: r = 1; diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index 595ebff1de9f5f1a9708c994a50d359ac4373358..32262232309e60630289eefbdbe4d2df6b8bedc3 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -17,6 +17,32 @@ #include #include +static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; + + if (!vcpu) + vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); + + return vcpu; +} + +static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) +{ + return host_data_ptr(host_ctxt) != ctxt; +} + +static inline u64 ctxt_midr_el1(struct kvm_cpu_context *ctxt) +{ + struct kvm *kvm = kern_hyp_va(ctxt_to_vcpu(ctxt)->kvm); + + if (!(ctxt_is_guest(ctxt) && + test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))) + return read_cpuid_id(); + + return kvm_read_vm_id_reg(kvm, SYS_MIDR_EL1); +} + static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); @@ -104,8 +130,10 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0); } -static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) +static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt, + u64 midr) { + write_sysreg(midr, vpidr_el2); write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2); if (has_vhe() || diff --git a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c index 29305022bc048f09c4686166153d1784ee8e5c94..b022af32ab2c656cf37cde0092b48599161435b7 100644 --- a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c @@ -28,7 +28,9 @@ void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) { - __sysreg_restore_el1_state(ctxt); + u64 midr = ctxt_midr_el1(ctxt); + + __sysreg_restore_el1_state(ctxt, midr); __sysreg_restore_common_state(ctxt); __sysreg_restore_user_state(ctxt); __sysreg_restore_el2_return_state(ctxt); diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index 283e191275916b97e76adfd125b45d77ec8d00fb..6af9196784a1d44f6f8b44e955e5fc43951f757e 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -89,7 +89,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) */ __sysreg32_restore_state(vcpu); __sysreg_restore_user_state(guest_ctxt); - __sysreg_restore_el1_state(guest_ctxt); + __sysreg_restore_el1_state(guest_ctxt, ctxt_midr_el1(guest_ctxt)); __mpam_guest_load(); #ifdef CONFIG_ARM64_HDBSS diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index b0d19aaf5f8940c3c49dd5315e5c26400015e13f..3e17e46208956dc30f30bdfefa780d0fc0dab17b 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -19,6 +19,8 @@ GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0) #define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \ GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0) +#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2 \ + GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT - 1, 0) static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) { @@ -397,6 +399,8 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) break; case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: val[0] = smccc_feat->vendor_hyp_bmap; + /* Function numbers 2-63 are reserved for pKVM for now */ + val[2] = smccc_feat->vendor_hyp_bmap_2; break; case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: kvm_ptp_get_time(vcpu, val); @@ -424,6 +428,7 @@ static const u64 kvm_arm_fw_reg_ids[] = { KVM_REG_ARM_STD_BMAP, KVM_REG_ARM_STD_HYP_BMAP, KVM_REG_ARM_VENDOR_HYP_BMAP, + KVM_REG_ARM_VENDOR_HYP_BMAP_2, }; void kvm_arm_init_hypercalls(struct kvm *kvm) @@ -534,6 +539,9 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) case KVM_REG_ARM_VENDOR_HYP_BMAP: val = READ_ONCE(smccc_feat->vendor_hyp_bmap); break; + case KVM_REG_ARM_VENDOR_HYP_BMAP_2: + val = READ_ONCE(smccc_feat->vendor_hyp_bmap_2); + break; default: return -ENOENT; } @@ -564,6 +572,10 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val) fw_reg_bmap = &smccc_feat->vendor_hyp_bmap; fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; break; + case KVM_REG_ARM_VENDOR_HYP_BMAP_2: + fw_reg_bmap = &smccc_feat->vendor_hyp_bmap_2; + fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2; + break; default: return -ENOENT; } @@ -668,6 +680,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) case KVM_REG_ARM_STD_BMAP: case KVM_REG_ARM_STD_HYP_BMAP: case KVM_REG_ARM_VENDOR_HYP_BMAP: + case KVM_REG_ARM_VENDOR_HYP_BMAP_2: return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val); default: return -ENOENT; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3f932416d875b70e140df3e59b6ee898ebea29cb..416693a3200aef94fbcd4aa39e1ba14efacc67e2 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1472,15 +1472,24 @@ static bool is_feature_id_reg(u32 encoding) * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID * registers KVM maintains on a per-VM basis. + * + * Additionally, the implementation ID registers and CTR_EL0 are handled as + * per-VM registers. */ static inline bool is_vm_ftr_id_reg(u32 id) { - if (id == SYS_CTR_EL0) + switch (id) { + case SYS_CTR_EL0: + case SYS_MIDR_EL1: + case SYS_REVIDR_EL1: + case SYS_AIDR_EL1: return true; + default: + return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && + sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && + sys_reg_CRm(id) < 8); - return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && - sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && - sys_reg_CRm(id) < 8); + } } static inline bool is_vcpu_ftr_id_reg(u32 id) @@ -2158,6 +2167,121 @@ static bool access_spsr(struct kvm_vcpu *vcpu, return true; } +/* + * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and + * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them. + * The values made visible to userspace were the register values of the boot + * CPU. + * + * At the same time, reads from these registers at EL1 previously were not + * trapped, allowing the guest to read the actual hardware value. On big-little + * machines, this means the VM can see different values depending on where a + * given vCPU got scheduled. + * + * These registers are now trapped as collateral damage from SME, and what + * follows attempts to give a user / guest view consistent with the existing + * ABI. + */ +static bool access_imp_id_reg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + return write_to_read_only(vcpu, p, r); + + /* + * Return the VM-scoped implementation ID register values if userspace + * has made them writable. + */ + if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags)) + return access_id_reg(vcpu, p, r); + + /* + * Otherwise, fall back to the old behavior of returning the value of + * the current CPU. + */ + switch (reg_to_encoding(r)) { + case SYS_REVIDR_EL1: + p->regval = read_sysreg(revidr_el1); + break; + case SYS_AIDR_EL1: + p->regval = read_sysreg(aidr_el1); + break; + default: + WARN_ON_ONCE(1); + } + + return true; +} + +static u64 __ro_after_init boot_cpu_midr_val; +static u64 __ro_after_init boot_cpu_revidr_val; +static u64 __ro_after_init boot_cpu_aidr_val; + +static void init_imp_id_regs(void) +{ + boot_cpu_midr_val = read_sysreg(midr_el1); + boot_cpu_revidr_val = read_sysreg(revidr_el1); + boot_cpu_aidr_val = read_sysreg(aidr_el1); +} + +static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + switch (reg_to_encoding(r)) { + case SYS_MIDR_EL1: + return boot_cpu_midr_val; + case SYS_REVIDR_EL1: + return boot_cpu_revidr_val; + case SYS_AIDR_EL1: + return boot_cpu_aidr_val; + default: + KVM_BUG_ON(1, vcpu->kvm); + return 0; + } +} + +static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 val) +{ + struct kvm *kvm = vcpu->kvm; + u64 expected; + + guard(mutex)(&kvm->arch.config_lock); + + expected = read_id_reg(vcpu, r); + if (expected == val) + return 0; + + if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)) + return -EINVAL; + + /* + * Once the VM has started the ID registers are immutable. Reject the + * write if userspace tries to change it. + */ + if (kvm_vm_has_ran_once(kvm)) + return -EBUSY; + + /* + * Any value is allowed for the implementation ID registers so long as + * it is within the writable mask. + */ + if ((val & r->val) != val) + return -EINVAL; + + kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val); + return 0; +} + +#define IMPLEMENTATION_ID(reg, mask) { \ + SYS_DESC(SYS_##reg), \ + .access = access_imp_id_reg, \ + .get_user = get_id_reg, \ + .set_user = set_imp_id_reg, \ + .reset = reset_imp_id_reg, \ + .val = mask, \ +} + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -2266,13 +2390,14 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, - + IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)), #ifdef CONFIG_ARM64_HISI_IPIV { SYS_DESC(SYS_MPIDR_EL1), .reset = reset_mpidr, .reg = MPIDR_EL1, .set_user = set_mpidr}, #else { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, #endif + IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)), /* * ID regs: all ID_SANITISED() entries here must have corresponding @@ -2524,6 +2649,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 }, { SYS_DESC(SYS_CCSIDR2_EL1), undef_access }, { SYS_DESC(SYS_SMIDR_EL1), undef_access }, + IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)), { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, ID_WRITABLE(CTR_EL0, CTR_EL0_DIC_MASK | CTR_EL0_IDC_MASK | @@ -3435,9 +3561,13 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu) * Certain AArch32 ID registers are handled by rerouting to the AArch64 * system register table. Registers in the ID range where CRm=0 are * excluded from this scheme as they do not trivially map into AArch64 - * system register encodings. + * system register encodings, except for AIDR/REVIDR. */ - if (params.Op1 == 0 && params.CRn == 0 && params.CRm) + if (params.Op1 == 0 && params.CRn == 0 && + (params.CRm || params.Op2 == 6 /* REVIDR */)) + return kvm_emulate_cp15_id_reg(vcpu, ¶ms); + if (params.Op1 == 1 && params.CRn == 0 && + params.CRm == 0 && params.Op2 == 7 /* AIDR */) return kvm_emulate_cp15_id_reg(vcpu, ¶ms); return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); @@ -3638,65 +3768,6 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id, return r; } -/* - * These are the invariant sys_reg registers: we let the guest see the - * host versions of these, so they're part of the guest state. - * - * A future CPU may provide a mechanism to present different values to - * the guest, or a future kvm may trap them. - */ - -#define FUNCTION_INVARIANT(reg) \ - static u64 reset_##reg(struct kvm_vcpu *v, \ - const struct sys_reg_desc *r) \ - { \ - ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ - return ((struct sys_reg_desc *)r)->val; \ - } - -FUNCTION_INVARIANT(midr_el1) -FUNCTION_INVARIANT(revidr_el1) -FUNCTION_INVARIANT(aidr_el1) - -/* ->val is filled in by kvm_sys_reg_table_init() */ -static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = { - { SYS_DESC(SYS_MIDR_EL1), NULL, reset_midr_el1 }, - { SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 }, - { SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 }, -}; - -static int get_invariant_sys_reg(u64 id, u64 __user *uaddr) -{ - const struct sys_reg_desc *r; - - r = get_reg_by_id(id, invariant_sys_regs, - ARRAY_SIZE(invariant_sys_regs)); - if (!r) - return -ENOENT; - - return put_user(r->val, uaddr); -} - -static int set_invariant_sys_reg(u64 id, u64 __user *uaddr) -{ - const struct sys_reg_desc *r; - u64 val; - - r = get_reg_by_id(id, invariant_sys_regs, - ARRAY_SIZE(invariant_sys_regs)); - if (!r) - return -ENOENT; - - if (get_user(val, uaddr)) - return -EFAULT; - - /* This is what we mean by invariant: you can't change it. */ - if (r->val != val) - return -EINVAL; - - return 0; -} - static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) { u32 val; @@ -3778,15 +3849,10 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { void __user *uaddr = (void __user *)(unsigned long)reg->addr; - int err; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_get(vcpu, reg->id, uaddr); - err = get_invariant_sys_reg(reg->id, uaddr); - if (err != -ENOENT) - return err; - return kvm_sys_reg_get_user(vcpu, reg, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); } @@ -3822,15 +3888,10 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { void __user *uaddr = (void __user *)(unsigned long)reg->addr; - int err; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_set(vcpu, reg->id, uaddr); - err = set_invariant_sys_reg(reg->id, uaddr); - if (err != -ENOENT) - return err; - return kvm_sys_reg_set_user(vcpu, reg, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); } @@ -3854,24 +3915,6 @@ static int write_demux_regids(struct kvm_vcpu *vcpu, u64 __user *uindices) return 0; } -static unsigned int num_invariant_regs(struct kvm_vcpu *vcpu) -{ - return _kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(invariant_sys_regs); -} - -static int write_invariant_regids(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int i; - - for (i = 0; i < num_invariant_regs(vcpu); i++) { - if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) - return -EFAULT; - uindices++; - } - return 0; -} - - static u64 sys_reg_to_index(const struct sys_reg_desc *reg) { return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | @@ -3953,8 +3996,7 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) { - return num_invariant_regs(vcpu) - + num_demux_regs(vcpu) + return num_demux_regs(vcpu) + walk_sys_regs(vcpu, (u64 __user *)NULL); } @@ -3962,11 +4004,6 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { int err; - err = write_invariant_regids(vcpu, uindices); - if (err) - return err; - uindices += num_invariant_regs(vcpu); - err = walk_sys_regs(vcpu, uindices); if (err < 0) return err; @@ -4021,7 +4058,6 @@ int __init kvm_sys_reg_table_init(void) { struct sys_reg_params params; bool valid = true; - unsigned int i; /* Make sure tables are unique and in order. */ valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false); @@ -4029,14 +4065,11 @@ int __init kvm_sys_reg_table_init(void) valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true); valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true); valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true); - valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false); if (!valid) return -EINVAL; - /* We abuse the reset function to overwrite the table itself. */ - for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) - invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); + init_imp_id_regs(); /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */ params = encoding_to_params(SYS_ID_PFR0_EL1); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index ffed412e10a6236fb4768b2e87b89f14d7205dc0..378cb6d370b1d24ed47a9a58cd59e077a65aaaed 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -630,7 +630,7 @@ static const struct midr_range broken_seis[] = { static bool vgic_v3_broken_seis(void) { return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) && - is_midr_in_range_list(read_cpuid_id(), broken_seis)); + is_midr_in_range_list(broken_seis)); } /** diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 867b83c37bdbbc4f696716863b134c75998603fa..095238032ac06b81456e68c9dbf3ef57c9ea4366 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -893,7 +893,7 @@ static u64 __arch_timer_check_delta(void) {}, }; - if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) { + if (is_midr_in_range_list(broken_cval_midrs)) { pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n"); return CLOCKSOURCE_MASK(31); } diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c index 446e35eaf3d9434cd11ee1b858a792a88a7fb8b0..ed915d2c9a0c8841d8d0cb629eaa4761cabc3847 100644 --- a/drivers/firmware/efi/libstub/arm64.c +++ b/drivers/firmware/efi/libstub/arm64.c @@ -91,6 +91,23 @@ efi_status_t check_platform_features(void) u32 __weak code_size; +#ifdef CONFIG_HISILICON_ERRATUM_1980005 + +static inline bool is_midr_in_range(struct midr_range const *range) +{ + return midr_is_cpu_model_range(read_cpuid_id(), range->model, + range->rv_min, range->rv_max); +} + +bool is_midr_in_range_list(struct midr_range const *ranges) +{ + while (ranges->model) + if (is_midr_in_range(ranges++)) + return true; + return false; +} +#endif + void efi_cache_sync_image(unsigned long image_base, unsigned long alloc_size) { diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c index 89a68e7eeaa6f5431652beef49a081d7bb2f0aa3..195e8922ebf36b5f3815fad1aac995780706f120 100644 --- a/drivers/firmware/smccc/kvm_guest.c +++ b/drivers/firmware/smccc/kvm_guest.c @@ -6,8 +6,11 @@ #include #include #include +#include #include +#include + #include static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { }; @@ -49,3 +52,66 @@ bool kvm_arm_hyp_service_available(u32 func_id) return test_bit(func_id, __kvm_arm_hyp_services); } EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available); + +#ifdef CONFIG_ARM64 +void __init kvm_arm_target_impl_cpu_init(void) +{ + int i; + u32 ver; + u64 max_cpus; + struct arm_smccc_res res; + struct target_impl_cpu *target; + + if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) || + !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)) + return; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID, + 0, &res); + if (res.a0 != SMCCC_RET_SUCCESS) + return; + + /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */ + ver = lower_32_bits(res.a1); + if (PSCI_VERSION_MAJOR(ver) != 1) { + pr_warn("Unsupported target CPU implementation version v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + return; + } + + if (!res.a2) { + pr_warn("No target implementation CPUs specified\n"); + return; + } + + max_cpus = res.a2; + target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target)); + if (!target) { + pr_warn("Not enough memory for struct target_impl_cpu\n"); + return; + } + + for (i = 0; i < max_cpus; i++) { + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID, + i, 0, 0, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_warn("Discovering target implementation CPUs failed\n"); + goto mem_free; + } + target[i].midr = res.a1; + target[i].revidr = res.a2; + target[i].aidr = res.a3; + }; + + if (!cpu_errata_set_target_impl(max_cpus, target)) { + pr_warn("Failed to set target implementation CPUs\n"); + goto mem_free; + } + + pr_info("Number of target implementation CPUs is %lld\n", max_cpus); + return; + +mem_free: + memblock_free(target, sizeof(*target) * max_cpus); +} +#endif diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index dac5a4dbdf1e920cc0847f863e6ece1585013823..5d750678a70e701fd1caa8c84bc703a87b6a8dae 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -386,7 +386,7 @@ static bool armpmu_support_hisi_hw_metric(void) * which will cause kernel panic in virtual machine because of lack of * authority. Thus, this feature is banned for virtual machines. */ - return is_midr_in_range_list(read_cpuid_id(), hip12_cpus) && + return is_midr_in_range_list(hip12_cpus) && is_kernel_in_hyp_mode(); } #endif diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index fdc13094bac52203ffc829eb09abaed9352575fb..30e4ce54a3c488ba9ed1bacb4eb6324c52be840c 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -584,7 +584,7 @@ u16 mpam_cpbm_wd_hisi_workaround(u16 cpbm_wd, enum mpam_device_features feat, if (cache_level != 3) return cpbm_wd; - if (is_midr_in_range_list(read_cpuid_id(), cpus)) { + if (is_midr_in_range_list(cpus)) { if (feat == mpam_feat_cpor_part) return 19; else if (feat == mpam_feat_ccap_part || @@ -978,7 +978,7 @@ static u64 mpam_csu_hisi_need_halved(struct mpam_msc_ris *ris, u64 now) { /* sentinel */ } }; - if (!is_midr_in_range_list(read_cpuid_id(), cpus)) + if (!is_midr_in_range_list(cpus)) return now; if (ris->comp->class->type != MPAM_CLASS_CACHE || @@ -1279,7 +1279,7 @@ static u32 mpam_cpbm_hisi_workaround(u32 cpbm, u8 cache_level) }; if (cache_level != 3 || - !is_midr_in_range_list(read_cpuid_id(), cpus)) + !is_midr_in_range_list(cpus)) return cpbm; if (cpbm & BIT(18)) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 26ca4fe621c95e55cc41b8bc56e8b3d42b6ed56f..67fb09d9f3bfb3274a4b5356bf18e6e5dcf53f40 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -115,6 +115,10 @@ /* KVM "vendor specific" services */ #define ARM_SMCCC_KVM_FUNC_FEATURES 0 #define ARM_SMCCC_KVM_FUNC_PTP 1 + +#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER 64 +#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS 65 + #define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 #define ARM_SMCCC_KVM_NUM_FUNCS 128 @@ -137,6 +141,18 @@ ARM_SMCCC_OWNER_VENDOR_HYP, \ ARM_SMCCC_KVM_FUNC_PTP) +#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) + +#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS) + /* ptp_kvm counter type ID */ #define KVM_PTP_VIRT_COUNTER 0 #define KVM_PTP_PHYS_COUNTER 1 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 72007f466c1c6968d135168e4b1e6296e29e26d1..3178fbd168923b9d185f607a3c156ef71c39d6a4 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1218,6 +1218,9 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 #define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 + +#define KVM_CAP_ARM_WRITABLE_IMP_ID_REGS 239 + #define KVM_CAP_ARM_RME 300 #define KVM_CAP_SEV_ES_GHCB 500