diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 78a30d4453ad624542e7bb4855e3ec423405905f..a5a76107d2947ab405f5d6c31a70c4e491e6d248 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2709,6 +2709,14 @@ [KVM,ARM] Allow use of GICv4 for direct injection of LPIs. + kvm-arm.ipiv_enabled= + [KVM,ARM] Allow use of HiSilicon ipiv on GICv4.1 + + kvm-arm.ipiv_direct= + [KVM,ARM] Use different modes of ipiv if ipiv enabled + Default is 0 which using vmtable in memory to retrieve aff-vpeid + Directly calculate vpeid according to aff if set 1 + kvm-arm.dvmbm_enabled= [KVM,ARM] Allow use of HiSilicon DVMBM capability. Default: 0 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index ac4e59256f8ed5c6294208bb6f80cfc2293b2db4..bf884f95e6e6d8bbece93cba9c84a3541f78b3b0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1286,5 +1286,6 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); extern bool force_wfi_trap; extern bool kvm_ncsnp_support; extern bool kvm_dvmbm_support; +extern bool kvm_ipiv_support; #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5ba336d1efad598d6c5e9e0ecc24668ecf66c2bc..43ef7742be8216131f218856c1f4fefd302a10a2 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -67,6 +67,9 @@ bool kvm_ncsnp_support; /* Capability of DVMBM */ bool kvm_dvmbm_support; +/* Capability of IPIV */ +bool kvm_ipiv_support; + static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); bool is_kvm_arm_initialised(void) @@ -312,6 +315,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) #endif } +extern struct static_key_false ipiv_enable; +extern struct static_key_false ipiv_direct; + int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; @@ -436,6 +442,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = static_key_enabled(&virtcca_cvm_is_available); break; #endif + case KVM_CAP_ARM_IPIV_MODE: + if (static_branch_unlikely(&ipiv_enable)) { + if (static_branch_unlikely(&ipiv_direct)) + r = 2; /* direct mode */ + else + r = 1; /* indirect mode */ + } else { + r = 0; /* don't enable IPIV */ + } + break; default: r = 0; } @@ -2693,12 +2709,17 @@ static __init int kvm_arm_init(void) probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); + kvm_ipiv_support = hisi_ipiv_supported(); kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); + kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); if (kvm_dvmbm_support) kvm_get_pg_cfg(); + if (kvm_ipiv_support) + ipiv_gicd_init(); + in_hyp_mode = is_kernel_in_hyp_mode(); #ifdef CONFIG_HISI_VIRTCCA_HOST diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index d95b96ee7237c5522f6f4a703c016bb534c0da93..07c0c76c1d09477e14a9683332466880df80830c 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -12,6 +12,8 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; static bool dvmbm_enabled; +static bool ipiv_enabled; +static bool ipiv_direct; static const char * const hisi_cpu_type_str[] = { "Hisi1612", @@ -157,6 +159,46 @@ static void hardware_disable_dvmbm(void *data) write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); } +static int __init early_ipiv_enable(char *buf) +{ + return strtobool(buf, &ipiv_enabled); +} +early_param("kvm-arm.ipiv_enabled", early_ipiv_enable); + +static int __init early_ipiv_direct(char *buf) +{ + return kstrtobool(buf, &ipiv_direct); +} +early_param("kvm-arm.ipiv_direct", early_ipiv_direct); + +bool hisi_ipiv_supported(void) +{ + /* Determine whether IPIV is supported by the hardware */ + if (!(read_sysreg(aidr_el1) & AIDR_EL1_IPIV_MASK)) { + kvm_info("Hisi ipiv not supported by the hardware\n"); + return false; + } else + kvm_info("Hisi ipiv detected on the hardware\n"); + + /* User provided kernel command-line parameter */ + if (!ipiv_enabled || !is_kernel_in_hyp_mode()) + return false; + + /* Enable IPIV feature if necessary */ + if (!is_gicv4p1()) { + kvm_info("Need to enable GICv4p1!\n"); + return false; + } + + kvm_info("Enable Hisi ipiv with %s mode\n", ipiv_direct ? "direct" : "indirect"); + return true; +} + +void ipiv_gicd_init() +{ + gic_dist_enable_ipiv(ipiv_direct); +} + bool hisi_dvmbm_supported(void) { if (cpu_type != HI_IP10 && cpu_type != HI_IP10C && diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index c45d319e7b41d72537c05cfe31637e2273ea9add..09f33f7cc18e35a9bf052251a9b0770b98fc03e7 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -18,6 +18,8 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; +/* HIP12 */ +#define AIDR_EL1_IPIV_MASK GENMASK_ULL(17, 16) /* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) @@ -75,7 +77,9 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +bool hisi_ipiv_supported(void); void kvm_get_pg_cfg(void); +void ipiv_gicd_init(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -94,7 +98,12 @@ static inline bool hisi_dvmbm_supported(void) { return false; } +static inline bool hisi_ipiv_supported(void) +{ + return false; +} static inline void kvm_get_pg_cfg(void) {} +static inline void ipiv_gicd_init(void) {} static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { @@ -111,4 +120,6 @@ static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ +extern bool gic_dist_enable_ipiv(bool direct); +extern bool is_gicv4p1(void); #endif /* __HISI_VIRT_H__ */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0cfa8d93e2ee36ecb14b1db62db3b25a3c8db299..eb6e99e91951422f402ed098c5ec6fa523530e83 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -711,20 +711,44 @@ static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) return actlr; } +extern struct static_key_false ipiv_enable; +extern struct static_key_false ipiv_direct; + static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { + struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; u64 mpidr; - /* - * Map the vcpu_id into the first three affinity level fields of - * the MPIDR. We limit the number of VCPUs in level 0 due to a - * limitation to 16 CPUs in that level in the ICC_SGIxR registers - * of the GICv3 to be able to address each CPU directly when - * sending IPIs. - */ - mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); - mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); - mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); + if (static_branch_unlikely(&ipiv_enable)) { + /* + * For direct ipiv mode, use vpeid as aff2/aff3 + * For indirect ipiv mode, use vcpu_id to index vpeid + * To avoid sending multi-SGIs in guest OS, make aff1/aff2 unique + */ + if (static_branch_unlikely(&ipiv_direct)) { + u64 vpe_id_aff3, vpe_id_aff2; + + vpe_id_aff2 = (vpe->vpe_id >> 8) & 0xff; + vpe_id_aff3 = (vpe->vpe_id & 0xff); + + mpidr = vpe_id_aff2 << MPIDR_LEVEL_SHIFT(2); + mpidr |= vpe_id_aff3 << MPIDR_LEVEL_SHIFT(3); + } else { + mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); + mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); + } + } else { + /* + * Map the vcpu_id into the first three affinity level fields of + * the MPIDR. We limit the number of VCPUs in level 0 due to a + * limitation to 16 CPUs in that level in the ICC_SGIxR registers + * of the GICv3 to be able to address each CPU directly when + * sending IPIs. + */ + mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); + mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); + mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); + } mpidr |= (1ULL << 31); vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index db5db9402c3a1fd08e306a75bd758cfaa2e75618..1be6c9a4970a0207814db7a919315c26b039934f 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -3,6 +3,7 @@ * Copyright (C) 2015, 2016 ARM Ltd. */ +#include #include #include #include @@ -531,17 +532,30 @@ int kvm_vgic_map_resources(struct kvm *kvm) return ret; } +extern struct static_key_false ipiv_enable; +static int ipiv_irq; + /* GENERIC PROBE */ void kvm_vgic_cpu_up(void) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); + if (static_branch_unlikely(&ipiv_enable)) + enable_percpu_irq(ipiv_irq, 0); } void kvm_vgic_cpu_down(void) { disable_percpu_irq(kvm_vgic_global_state.maint_irq); + if (static_branch_unlikely(&ipiv_enable)) + disable_percpu_irq(ipiv_irq); +} + +static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) +{ + kvm_info("IPIV irq handler!\n"); + return IRQ_HANDLED; } static irqreturn_t vgic_maintenance_handler(int irq, void *data) @@ -615,6 +629,15 @@ int kvm_vgic_hyp_init(void) kvm_vgic_global_state.no_hw_deactivation = true; } + if (static_branch_unlikely(&ipiv_enable)) { + ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, + ACPI_ACTIVE_HIGH); + if (ipiv_irq < 0) { + kvm_err("No ipiv exception irq\n"); + return -ENXIO; + } + } + kvm_vgic_global_state.flags = gic_kvm_info->flags; switch (gic_kvm_info->type) { case GIC_V2: @@ -637,7 +660,7 @@ int kvm_vgic_hyp_init(void) gic_kvm_info = NULL; if (ret) - return ret; + goto out_unregister_gsi; if (!has_mask && !kvm_vgic_global_state.maint_irq) return 0; @@ -648,9 +671,66 @@ int kvm_vgic_hyp_init(void) if (ret) { kvm_err("Cannot register interrupt %d\n", kvm_vgic_global_state.maint_irq); - return ret; + goto out_unregister_gsi; } kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); + + if (static_branch_unlikely(&ipiv_enable)) { + ret = request_percpu_irq(ipiv_irq, vgic_ipiv_irq_handler, + "ipiv exception", kvm_get_running_vcpus()); + if (ret) { + kvm_err("Cannot register interrupt %d\n", ipiv_irq); + goto out_free_irq; + } + } + + return 0; +out_free_irq: + free_percpu_irq(kvm_vgic_global_state.maint_irq, + kvm_get_running_vcpus()); +out_unregister_gsi: + if (static_branch_unlikely(&ipiv_enable)) + acpi_unregister_gsi(18); + return ret; +} + +extern int its_vpe_id_alloc(void); +extern void its_vpe_id_free(u16 id); +int kvm_vgic_vpe_id_alloc(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int vpe_id; + + if (vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated) { + kvm_err("[%s]vpe_id already allocated\n", __func__); + return -1; + } + + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) { + kvm_err("[%s]alloc vpe id fail: vpe_id=%d\n", __func__, vpe_id); + return vpe_id; + } + + vgic_cpu->vgic_v3.its_vpe.vpe_id = vpe_id; + vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated = true; + + return 0; +} + +int kvm_vgic_vpe_id_free(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int vpe_id; + + vpe_id = vgic_cpu->vgic_v3.its_vpe.vpe_id; + + if (!vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated) + return 0; + + its_vpe_id_free(vpe_id); + vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated = false; + return 0; } diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 961c3a79f41d9d19a705c185686dc99d60af75b8..0edea419baf378373448c100304e9f43ac97448c 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -136,6 +136,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; + dist->its_vm.nassgireq = dist->nassgireq; if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); @@ -341,6 +342,7 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, u64 value; value = (u64)(mpidr & GENMASK(23, 0)) << 32; + value |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 56; value |= ((target_vcpu_id & 0xffff) << 8); if (vgic_has_its(vcpu->kvm)) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 49a60999f661d09a14b57a334e91f2e9342ebb90..a8d07320c851626189a9295f75dd4c5ca9613946 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -388,6 +388,9 @@ static int alloc_devid_from_rsv_pools(struct rsv_devid_pool **devid_pool, #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +extern struct static_key_false ipiv_enable; +extern struct static_key_false ipiv_direct; + #ifdef CONFIG_VIRT_PLAT_DEV /* * Currently we only build *one* devid pool. @@ -4562,14 +4565,63 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) its_vpe_4_1_send_inv(d); } +static void ipiv_disable_vsgi_trap(u64 val) +{ + /* disable guest access ICC_SGI1R_EL1 trap */ + asm volatile("mrs %0, s3_4_c15_c7_2" : "=r" (val)); + val |= 1ULL; + asm volatile("msr s3_4_c15_c7_2, %0" : : "r" (val)); + asm volatile("mrs %0, s3_4_c15_c7_2" : "=r" (val)); +} + +static void ipiv_enable_vsgi_trap(u64 val) +{ + /* enable guest access ICC_SGI1R_EL1 trap */ + asm volatile("mrs %0, s3_4_c15_c7_2" : "=r" (val)); + val &= ~1UL; + asm volatile("msr s3_4_c15_c7_2, %0" : : "r" (val)); + asm volatile("mrs %0, s3_4_c15_c7_2" : "=r" (val)); +} + static void its_vpe_4_1_schedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + struct its_vm *vm = vpe->its_vm; + unsigned long vpe_addr; u64 val = 0; + u32 nr_vpes; + + if (static_branch_unlikely(&ipiv_enable) && + vm->nassgireq) { + /* wait gicr_ipiv_busy */ + WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, + val, !(val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); + if (!static_branch_unlikely(&ipiv_direct)) { + vpe_addr = virt_to_phys(page_address(vm->vpe_page)); + writel_relaxed(vpe_addr & 0xffffffff, + vlpi_base + GICR_VM_TABLE_BAR_L); + writel_relaxed((vpe_addr >> 32) & 0xffffffff, + vlpi_base + GICR_VM_TABLE_BAR_H); + + /* setup gicr_vcpu_entry_num_max and gicr_ipiv_its_ta_sel */ + nr_vpes = vpe->its_vm->nr_vpes; + val = ((nr_vpes - 1) << GICR_IPIV_CTRL_VCPU_ENTRY_NUM_MAX_SHIFT) | + (0 << GICR_IPIV_CTRL_IPIV_ITS_TA_SEL_SHIFT); + writel_relaxed(val, vlpi_base + GICR_IPIV_CTRL); + } else { + /* setup gicr_ipiv_its_ta_sel */ + val = (0 << GICR_IPIV_CTRL_IPIV_ITS_TA_SEL_SHIFT); + writel_relaxed(val, vlpi_base + GICR_IPIV_CTRL); + } + + ipiv_disable_vsgi_trap(val); + } else { + ipiv_enable_vsgi_trap(val); + } /* Schedule the VPE */ - val |= GICR_VPENDBASER_Valid; + val = GICR_VPENDBASER_Valid; val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); @@ -4581,6 +4633,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + struct its_vm *vm = vpe->its_vm; u64 val; if (info->req_db) { @@ -4612,6 +4665,19 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, GICR_VPENDBASER_PendingLast); vpe->pending_last = true; } + + if (static_branch_unlikely(&ipiv_enable) && + vm->nassgireq) { + if (!static_branch_unlikely(&ipiv_direct)) { + /* wait gicr_ipiv_busy */ + WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, + val, !(val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); + writel_relaxed(0, vlpi_base + GICR_VM_TABLE_BAR_L); + writel_relaxed(0, vlpi_base + GICR_VM_TABLE_BAR_H); + } + + ipiv_enable_vsgi_trap(val); + } } static void its_vpe_4_1_invall(struct its_vpe *vpe) @@ -4940,15 +5006,17 @@ static const struct irq_domain_ops its_sgi_domain_ops = { .deactivate = its_sgi_irq_domain_deactivate, }; -static int its_vpe_id_alloc(void) +int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); } +EXPORT_SYMBOL(its_vpe_id_alloc); -static void its_vpe_id_free(u16 id) +void its_vpe_id_free(u16 id) { ida_simple_remove(&its_vpeid_ida, id); } +EXPORT_SYMBOL(its_vpe_id_free); static int its_vpe_init(struct its_vpe *vpe) { @@ -4956,9 +5024,13 @@ static int its_vpe_init(struct its_vpe *vpe) int vpe_id; /* Allocate vpe_id */ - vpe_id = its_vpe_id_alloc(); - if (vpe_id < 0) - return vpe_id; + if (!vpe->vpe_id_allocated) { + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + } else { + vpe_id = vpe->vpe_id; + } /* Allocate VPT */ vpt_page = its_allocate_pending_table(GFP_KERNEL); @@ -4975,6 +5047,7 @@ static int its_vpe_init(struct its_vpe *vpe) raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; + vpe->vpe_id_allocated = true; vpe->vpt_page = vpt_page; atomic_set(&vpe->vmapp_count, 0); if (!gic_rdists->has_rvpeid) @@ -4987,6 +5060,7 @@ static void its_vpe_teardown(struct its_vpe *vpe) { its_vpe_db_proxy_unmap(vpe); its_vpe_id_free(vpe->vpe_id); + vpe->vpe_id_allocated = false; its_free_pending_table(vpe->vpt_page); } @@ -5014,6 +5088,11 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); + if (static_branch_unlikely(&ipiv_enable) && + !static_branch_unlikely(&ipiv_direct)) { + free_pages((unsigned long)page_address(vm->vpe_page), + get_order(nr_irqs * 2)); + } } } @@ -5023,8 +5102,10 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; - struct page *vprop_page; + struct page *vprop_page, *vpe_page; int base, nr_ids, i, err = 0; + void *vpe_table_va; + u16 *vpe_entry; bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) @@ -5047,14 +5128,31 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->vprop_page = vprop_page; raw_spin_lock_init(&vm->vmapp_lock); - if (gic_rdists->has_rvpeid) + if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; + if (static_branch_unlikely(&ipiv_enable) && + !static_branch_unlikely(&ipiv_direct)) { + vpe_page = alloc_pages(GFP_KERNEL, get_order(nr_irqs * 2)); + if (!vpe_page) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + return -ENOMEM; + } + vm->vpe_page = vpe_page; + vpe_table_va = page_address(vm->vpe_page); + } + } for (i = 0; i < nr_irqs; i++) { vm->vpes[i]->vpe_db_lpi = base + i; err = its_vpe_init(vm->vpes[i]); if (err) break; + if (static_branch_unlikely(&ipiv_enable) && + !static_branch_unlikely(&ipiv_direct)) { + vpe_entry = (u16 *)vpe_table_va + i; + *(u16 *)vpe_entry = vm->vpes[i]->vpe_id; + } err = its_irq_gic_domain_alloc(domain, virq + i, vm->vpes[i]->vpe_db_lpi); if (err) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 0af1eed9ad351dbe62823f8ca0e5163f694e11d6..b1956e1c627383ba746e95eb81568ca74568d1bf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -111,6 +111,12 @@ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); +DEFINE_STATIC_KEY_FALSE(ipiv_enable); +EXPORT_SYMBOL(ipiv_enable); + +DEFINE_STATIC_KEY_FALSE(ipiv_direct); +EXPORT_SYMBOL(ipiv_direct); + /* * When the Non-secure world has access to group 0 interrupts (as a * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will @@ -1511,6 +1517,42 @@ static int gic_dist_supports_lpis(void) !gicv3_nolpi); } +bool is_gicv4p1(void) +{ + if (!gic_data.rdists.has_rvpeid) + return false; + + return true; +} +EXPORT_SYMBOL(is_gicv4p1); + +void gic_dist_enable_ipiv(bool direct) +{ + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); + val |= GICD_MISC_CTRL_CFG_IPIV_EN; + writel_relaxed(val, gic_data.dist_base + GICD_MISC_CTRL); + static_branch_enable(&ipiv_enable); + + val = readl_relaxed(gic_data.dist_base + GICD_IPIV_CTRL); + if (direct) { + val |= (1U << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT); + static_branch_enable(&ipiv_direct); + } else { + val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | + (0 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | + (4 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | + (7 << GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT) | + (2 << GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT); + } + writel_relaxed(val, gic_data.dist_base + GICD_IPIV_CTRL); + + /* Set target ITS address of IPIV feature */ + writel_relaxed(0x4880, gic_data.dist_base + GICD_IPIV_ITS_TA_BASE); +} +EXPORT_SYMBOL(gic_dist_enable_ipiv); + static void gic_cpu_init(void) { void __iomem *rbase; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 0b734d6f3d214ceddd093bd9923d0700f6157806..460d226450afcb12333d81717fa1570bb1532a23 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -454,6 +454,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); void kvm_vgic_init_cpu_hardware(void); +int kvm_vgic_vpe_id_alloc(struct kvm_vcpu *vcpu); +int kvm_vgic_vpe_id_free(struct kvm_vcpu *vcpu); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, bool level, void *owner); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 548b8a5c46cfcd383a3b1304742d28506b267671..5fdc3ae4cd9958ac87659a4e67765586e6c4c1dd 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -113,6 +113,18 @@ #define GIC_PAGE_SIZE_64K 2ULL #define GIC_PAGE_SIZE_MASK 3ULL +#define GICD_MISC_CTRL 0x2084 +#define GICD_MISC_CTRL_CFG_IPIV_EN (1U << 19) + +/* IPIV private register */ +#define GICD_IPIV_CTRL 0xc05c +#define GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT 4 +#define GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT 8 +#define GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT 12 +#define GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT 16 +#define GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT 19 +#define GICD_IPIV_ITS_TA_BASE 0xc010 + /* * Re-Distributor registers, offsets from RD_base */ @@ -362,6 +374,18 @@ #define GICR_VSGIPENDR_BUSY (1U << 31) #define GICR_VSGIPENDR_PENDING GENMASK(15, 0) +/* IPIV VM table address */ +#define GICR_VM_TABLE_BAR_L 0x140 +#define GICR_VM_TABLE_BAR_H 0x144 + +#define GICR_IPIV_CTRL 0x148 +#define GICR_IPIV_CTRL_VCPU_ENTRY_NUM_MAX_SHIFT 8 +#define GICR_IPIV_CTRL_IPIV_ITS_TA_SEL_SHIFT 4 + +#define GICR_IPIV_ST 0x14c +#define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 +#define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) + /* * ITS registers, offsets from ITS_base */ diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 2d6f41001a46fa7872e04db222520120826f7234..4ca3c48cf39992e04ec71c83c3dabf3f0ebcb055 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -34,6 +34,8 @@ struct its_vm { */ raw_spinlock_t vmapp_lock; u32 vlpi_count[GICv4_ITS_LIST_MAX]; + KABI_EXTEND(struct page *vpe_page) + KABI_EXTEND(bool nassgireq) }; /* Embedded in kvm_vcpu.arch */ @@ -93,6 +95,7 @@ struct its_vpe { u16 vpe_id; /* Pending VLPIs on schedule out? */ bool pending_last; + KABI_EXTEND(bool vpe_id_allocated) }; /* diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 1f67e4d6ff7b4ac544ea297c88bb4e0fe79680c9..870be8fcfacf30d041ee382ea411c15ea45b0c8d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1201,6 +1201,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_IPIV_MODE 301 /* ipiv mode */ + #define KVM_CAP_SEV_ES_GHCB 500 #define KVM_CAP_HYGON_COCO_EXT 501 /* support userspace to request firmware to build CSV3 guest's memory space */