diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 5f62a714f65eacdbc7894f206a180500c153ab87..fb3e3f6136ecdf1beb09a07985f3782520b1a1bd 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -90,6 +90,7 @@ static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 &= ~HCR_TWE; if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || + vcpu->kvm->arch.vgic.vtimer_irqbypass || vcpu->kvm->arch.vgic.nassgireq) vcpu->arch.hcr_el2 &= ~HCR_TWI; else diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 64f5d451b73e24ba13a26df51078a1e6f61ce1f1..16edbe15bad761514bd01dbb1b750901670649df 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -27,6 +27,19 @@ static unsigned int host_ptimer_irq; static u32 host_vtimer_irq_flags; static u32 host_ptimer_irq_flags; +bool vtimer_irqbypass; + +static int __init early_vtimer_irqbypass(char *buf) +{ + return strtobool(buf, &vtimer_irqbypass); +} +early_param("kvm-arm.vtimer_irqbypass", early_vtimer_irqbypass); + +static inline bool vtimer_is_irqbypass(void) +{ + return !!vtimer_irqbypass && kvm_vgic_vtimer_irqbypass_support(); +} + static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); static const struct kvm_irq_level default_ptimer_irq = { @@ -612,6 +625,46 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); } +static void kvm_vtimer_mbigen_auto_clr_set(struct kvm_vcpu *vcpu, bool set) +{ + BUG_ON(!vtimer_is_irqbypass()); + + vtimer_mbigen_set_auto_clr(vcpu->cpu, set); +} + +static void kvm_vtimer_gic_auto_clr_set(struct kvm_vcpu *vcpu, bool set) +{ + BUG_ON(!vtimer_is_irqbypass()); + + vtimer_gic_set_auto_clr(vcpu->cpu, set); +} + +static void kvm_vtimer_mbigen_restore_stat(struct kvm_vcpu *vcpu) +{ + struct vtimer_mbigen_context *mbigen_ctx = vcpu_vtimer_mbigen(vcpu); + u16 vpeid = kvm_vgic_get_vcpu_vpeid(vcpu); + unsigned long flags; + + WARN_ON(!vtimer_is_irqbypass()); + + local_irq_save(flags); + + if (mbigen_ctx->loaded) + goto out; + + vtimer_mbigen_set_vector(vcpu->cpu, vpeid); + + if (mbigen_ctx->active) + vtimer_mbigen_set_active(vcpu->cpu, true); + + mbigen_ctx->loaded = true; +out: + local_irq_restore(flags); +} + +bool gic_clr_enable = true; +module_param(gic_clr_enable, bool, S_IRUGO | S_IWUSR); + void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); @@ -622,19 +675,34 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) get_timer_map(vcpu, &map); - if (static_branch_likely(&has_gic_active_state)) { - kvm_timer_vcpu_load_gic(map.direct_vtimer); - if (map.direct_ptimer) - kvm_timer_vcpu_load_gic(map.direct_ptimer); + if (vtimer_is_irqbypass()) + kvm_vtimer_mbigen_auto_clr_set(vcpu, false); + + if (!vtimer_is_irqbypass()) { + if (static_branch_likely(&has_gic_active_state)) + kvm_timer_vcpu_load_gic(map.direct_vtimer); + else + kvm_timer_vcpu_load_nogic(vcpu); } else { - kvm_timer_vcpu_load_nogic(vcpu); + kvm_vtimer_mbigen_restore_stat(vcpu); } + if (static_branch_likely(&has_gic_active_state) && map.direct_ptimer) + kvm_timer_vcpu_load_gic(map.direct_ptimer); + set_cntvoff(timer_get_offset(map.direct_vtimer)); kvm_timer_unblocking(vcpu); timer_restore_state(map.direct_vtimer); + + if (vtimer_is_irqbypass()) { + kvm_vtimer_mbigen_auto_clr_set(vcpu, true); + if (gic_clr_enable) { + kvm_vtimer_gic_auto_clr_set(vcpu, true); + } + } + if (map.direct_ptimer) timer_restore_state(map.direct_ptimer); @@ -659,6 +727,29 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) kvm_timer_should_fire(ptimer) != plevel; } +static void kvm_vtimer_mbigen_save_stat(struct kvm_vcpu *vcpu) +{ + struct vtimer_mbigen_context *mbigen_ctx = vcpu_vtimer_mbigen(vcpu); + unsigned long flags; + + WARN_ON(!vtimer_is_irqbypass()); + + local_irq_save(flags); + + if (!mbigen_ctx->loaded) + goto out; + + mbigen_ctx->active = vtimer_mbigen_get_active(vcpu->cpu); + + /* Clear active state in MBIGEN now that we've saved everything. */ + if (mbigen_ctx->active) + vtimer_mbigen_set_active(vcpu->cpu, false); + + mbigen_ctx->loaded = false; +out: + local_irq_restore(flags); +} + void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); @@ -670,7 +761,18 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) get_timer_map(vcpu, &map); + if (vtimer_is_irqbypass()) { + kvm_vtimer_mbigen_auto_clr_set(vcpu, false); + kvm_vtimer_gic_auto_clr_set(vcpu, false); + } + timer_save_state(map.direct_vtimer); + + if (vtimer_is_irqbypass()) { + kvm_vtimer_mbigen_save_stat(vcpu); + kvm_vtimer_mbigen_auto_clr_set(vcpu, true); + } + if (map.direct_ptimer) timer_save_state(map.direct_ptimer); @@ -745,11 +847,15 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) timer_set_ctl(vcpu_ptimer(vcpu), 0); if (timer->enabled) { - kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); + if (!vtimer_is_irqbypass()) + kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); + kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu)); if (irqchip_in_kernel(vcpu->kvm)) { - kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); + if (!vtimer_is_irqbypass()) + kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); + if (map.direct_ptimer) kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq); } @@ -813,7 +919,8 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) static void kvm_timer_init_interrupt(void *info) { - enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); + if (!vtimer_is_irqbypass()) + enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags); } @@ -984,25 +1091,16 @@ static int kvm_timer_starting_cpu(unsigned int cpu) static int kvm_timer_dying_cpu(unsigned int cpu) { - disable_percpu_irq(host_vtimer_irq); + if (!vtimer_is_irqbypass()) + disable_percpu_irq(host_vtimer_irq); + return 0; } -int kvm_timer_hyp_init(bool has_gic) +static int kvm_vtimer_hyp_init(struct arch_timer_kvm_info *info, bool has_gic) { - struct arch_timer_kvm_info *info; int err; - info = arch_timer_get_kvm_info(); - timecounter = &info->timecounter; - - if (!timecounter->cc) { - kvm_err("kvm_arch_timer: uninitialized timecounter\n"); - return -ENODEV; - } - - /* First, do the virtual EL1 timer irq */ - if (info->virtual_irq <= 0) { kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", info->virtual_irq); @@ -1039,6 +1137,53 @@ int kvm_timer_hyp_init(bool has_gic) kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); + return 0; +out_free_irq: + if (!vtimer_is_irqbypass()) + free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); + return err; +} + +int kvm_timer_hyp_init(bool has_gic) +{ + struct arch_timer_kvm_info *info; + int err; + + info = arch_timer_get_kvm_info(); + timecounter = &info->timecounter; + + if (!timecounter->cc) { + kvm_err("kvm_arch_timer: uninitialized timecounter\n"); + return -ENODEV; + } + + /* First, do the virtual EL1 timer irq */ + + /* + * vtimer-irqbypass depends on: + * + * - HW support at mbigen level (vtimer_irqbypass_hw_support) + * - HW support at GIC level (kvm_vgic_vtimer_irqbypass_support) + * - in_kernel irqchip support + * - "kvm-arm.vtimer_irqbypass=1" + */ + vtimer_irqbypass &= vtimer_irqbypass_hw_support(info); + vtimer_irqbypass &= has_gic; + kvm_info("vtimer-irqbypass %sabled\n", + vtimer_is_irqbypass() ? "en" : "dis"); + + /* + * If vtimer irqbypass is enabled, there's no need to use the vtimer + * forwarded irq inject. + */ + if (!vtimer_is_irqbypass()) { + int err; + + err = kvm_vtimer_hyp_init(info, has_gic); + if (err) + return err; + } + /* Now let's do the physical EL1 timer irq */ if (info->physical_irq > 0) { @@ -1131,6 +1276,70 @@ bool kvm_arch_timer_get_input_level(int vintid) return kvm_timer_should_fire(timer); } +static void vtimer_set_active_stat(struct kvm_vcpu *vcpu, int vintid, bool set) +{ + struct vtimer_mbigen_context *mbigen_ctx = vcpu_vtimer_mbigen(vcpu); + int hwirq = vcpu_vtimer(vcpu)->irq.irq; + + WARN_ON(!vtimer_is_irqbypass() || hwirq != vintid); + + if (!mbigen_ctx->loaded) + mbigen_ctx->active = set; + else + vtimer_mbigen_set_active(vcpu->cpu, set); +} + +static bool vtimer_get_active_stat(struct kvm_vcpu *vcpu, int vintid) +{ + struct vtimer_mbigen_context *mbigen_ctx = vcpu_vtimer_mbigen(vcpu); + int hwirq = vcpu_vtimer(vcpu)->irq.irq; + + WARN_ON(!vtimer_is_irqbypass() || hwirq != vintid); + + if (!mbigen_ctx->loaded) + return mbigen_ctx->active; + else + return vtimer_mbigen_get_active(vcpu->cpu); +} + +int kvm_vtimer_config(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int ret = 0; + int c; + + if (!vtimer_is_irqbypass()) + return 0; + + if (!irqchip_in_kernel(kvm)) + return -EINVAL; + + mutex_lock(&kvm->lock); + if (dist->vtimer_irqbypass) + goto out; + + kvm_for_each_vcpu(c, vcpu, kvm) { + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + int intid; + + WARN_ON(timer->enabled); + + intid = vcpu_vtimer(vcpu)->irq.irq; + ret = kvm_vgic_config_vtimer_irqbypass(vcpu, intid, + vtimer_get_active_stat, + vtimer_set_active_stat); + if (ret) + goto out; + } + + dist->vtimer_irqbypass = true; + +out: + mutex_unlock(&kvm->lock); + return ret; +} + int kvm_timer_enable(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); @@ -1141,8 +1350,12 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) return 0; /* Without a VGIC we do not map virtual IRQs to physical IRQs */ - if (!irqchip_in_kernel(vcpu->kvm)) - goto no_vgic; + if (!irqchip_in_kernel(vcpu->kvm)) { + if (!vtimer_is_irqbypass()) + goto no_vgic; + + return -EINVAL; + } if (!vgic_initialized(vcpu->kvm)) return -ENODEV; @@ -1154,12 +1367,14 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) get_timer_map(vcpu, &map); - ret = kvm_vgic_map_phys_irq(vcpu, - map.direct_vtimer->host_timer_irq, - map.direct_vtimer->irq.irq, - kvm_arch_timer_get_input_level); - if (ret) - return ret; + if (!vtimer_is_irqbypass()) { + ret = kvm_vgic_map_phys_irq(vcpu, + map.direct_vtimer->host_timer_irq, + map.direct_vtimer->irq.irq, + kvm_arch_timer_get_input_level); + if (ret) + return ret; + } if (map.direct_ptimer) { ret = kvm_vgic_map_phys_irq(vcpu, diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index bc5a91d17a713213080b14842abdfac901d19685..885975fcb91817dfc8a840fd2ec2be080a70d52e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -639,6 +639,10 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) kvm_arm_vcpu_init_debug(vcpu); + ret = kvm_vtimer_config(kvm); + if (ret) + return ret; + if (likely(irqchip_in_kernel(kvm))) { /* * Map the VGIC hardware resources before running a vcpu the diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c index f38c40a762519f2d10b6fb8b9f26b3294b78f39d..5f01356c0e530fdd23132eff2361bd995e7f8903 100644 --- a/arch/arm64/kvm/vgic/vgic-debug.c +++ b/arch/arm64/kvm/vgic/vgic-debug.c @@ -193,7 +193,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, print_header(s, irq, vcpu); pending = irq->pending_latch; - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + if (vgic_direct_sgi_or_ppi(irq)) { int err; err = irq_get_irqchip_state(irq->host_irq, diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 980f20418a4234e0d246b94792cc919d856f1407..e150b8dd25383db6756001bdb6a0c4c84a92cdc3 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -222,6 +222,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) /* PPIs */ irq->config = VGIC_CONFIG_LEVEL; } + + /* Needed? */ + irq->vtimer_info = NULL; } if (!irqchip_in_kernel(vcpu->kvm)) diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 15a6c98ee92f05cdf35f19c5ed5cbd7a6738f699..8b90e2cde9b943259deb47af0204e46b08ebace7 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -326,7 +326,7 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); bool state = irq->pending_latch; - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + if (vgic_direct_sgi_or_ppi(irq)) { int err; err = irq_get_irqchip_state(irq->host_irq, @@ -365,6 +365,19 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, irq->pending_latch = true; vgic_queue_irq_unlock(vcpu->kvm, irq, flags); } else { + /** + * workaround: On reset, userspace clears pending status + * for all PPIs and SGIs by writing all 0's to + * GICR_ISPENDR0. The pending state of vtimer interrupt + * is somehow staying in redistributor and we have to + * explicitly clear it... + * + * P.S., irq->vtimer_info is NULL on restore. + */ + if (irq->vtimer_info) + WARN_ON_ONCE(irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + false)); irq->pending_latch = false; raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 5b441777937b43b128adcb834853c4cdc49f1843..a42c812f4a41486606cdec3ffee955ad87f394ff 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -78,7 +78,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->group = !!(val & BIT(i)); - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + if (vgic_direct_sgi_or_ppi(irq)) { vgic_update_vsgi(irq); raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } else { @@ -125,7 +125,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + if (vgic_direct_sgi_or_ppi(irq)) { if (!irq->enabled) { struct irq_data *data; @@ -174,7 +174,7 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled) + if (vgic_direct_sgi_or_ppi(irq) && irq->enabled) disable_irq_nosync(irq->host_irq); irq->enabled = false; @@ -241,7 +241,7 @@ static unsigned long __read_pending(struct kvm_vcpu *vcpu, bool val; raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + if (vgic_direct_sgi_or_ppi(irq)) { int err; val = false; @@ -301,7 +301,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + if (vgic_direct_sgi_or_ppi(irq)) { /* HW SGI? Ask the GIC to inject it */ int err; err = irq_set_irqchip_state(irq->host_irq, @@ -394,7 +394,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + if (vgic_direct_sgi_or_ppi(irq)) { /* HW SGI? Ask the GIC to clear its pending bit */ int err; err = irq_set_irqchip_state(irq->host_irq, @@ -488,12 +488,17 @@ static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu, /* Loop over all IRQs affected by this read */ for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + struct vtimer_info *vtimer = irq->vtimer_info; + bool state = irq->active; + + if (vtimer) + state = vtimer->get_active_stat(vcpu, irq->intid); /* * Even for HW interrupts, don't evaluate the HW state as * all the guest is interested in is the virtual state. */ - if (irq->active) + if (state) value |= (1U << i); vgic_put_irq(vcpu->kvm, irq); @@ -553,6 +558,9 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, * do here. */ irq->active = false; + } else if (irq->vtimer_info) { + /* MMIO trap only */ + irq->vtimer_info->set_active_stat(vcpu, irq->intid, active); } else { u32 model = vcpu->kvm->arch.vgic.vgic_model; u8 active_source; @@ -696,7 +704,7 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, raw_spin_lock_irqsave(&irq->irq_lock, flags); /* Narrow the priority range to what we actually support */ irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); - if (irq->hw && vgic_irq_is_sgi(irq->intid)) + if (vgic_direct_sgi_or_ppi(irq)) vgic_update_vsgi(irq); raw_spin_unlock_irqrestore(&irq->irq_lock, flags); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index bc6ecf4abe58617562f0b555b26fee5d1d8bf846..52a54f415c6da0667307bf3a308a783ff6c7a5d9 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -596,6 +596,10 @@ int vgic_v3_map_resources(struct kvm *kvm) if (kvm_vgic_global_state.has_gicv4_1) vgic_v4_configure_vsgis(kvm); + + if (kvm_vgic_vtimer_irqbypass_support()) + vgic_v4_configure_vtimer(kvm); + dist->ready = true; out: @@ -655,6 +659,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info) kvm_info("GICv4%s support %sabled\n", kvm_vgic_global_state.has_gicv4_1 ? ".1" : "", gicv4_enable ? "en" : "dis"); + + kvm_vgic_global_state.has_direct_vtimer = info->has_vtimer && gicv4_enable; + kvm_info("vtimer-irqbypass support %sabled at GIC level\n", + kvm_vgic_global_state.has_direct_vtimer ? "en" : "dis"); } if (!info->vcpu.start) { diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index c1845d8f5f7e712d4ea7c29f67741855a0a4d70e..0d1cd54fb7e11ef643e1120a1fed1f57ddf9b871 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -203,6 +203,63 @@ void vgic_v4_configure_vsgis(struct kvm *kvm) kvm_arm_resume_guest(kvm); } +static void vgic_v4_enable_vtimer(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vtimer_info *vtimer = &vgic_cpu->vtimer; + struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; + struct vgic_irq *irq; + struct irq_desc *desc; + int ret; + + irq = vgic_get_irq(vcpu->kvm, vcpu, vtimer->intid); + irq->host_irq = irq_find_mapping(vpe->sgi_domain, vtimer->intid); + + /* Transfer the full irq state to the vPE */ + vgic_v4_sync_sgi_config(vpe, irq); + desc = irq_to_desc(irq->host_irq); + ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc), + false); + if (!WARN_ON(ret)) { + /* Transfer pending state */ + ret = irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + irq->pending_latch); + WARN_ON(ret); + irq->pending_latch = false; + + /* Transfer active state */ + vtimer->set_active_stat(vcpu, irq->intid, irq->active); + irq->active = false; + } + + vgic_put_irq(vcpu->kvm, irq); +} + +/* Must be called with the kvm lock held */ +void vgic_v4_configure_vtimer(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int i; + + if (!dist->vtimer_irqbypass) + return; + + kvm_for_each_vcpu(i, vcpu, kvm) + vgic_v4_enable_vtimer(vcpu); +} + +/** + * kvm_vgic_get_vcpu_vpeid - Get the VCPU's vpeid + * + * The vtimer mbigen needs the vcpu vpeid info which will resident. + */ +u16 kvm_vgic_get_vcpu_vpeid(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_id; +} + /* * Must be called with GICv4.1 and the vPE unmapped, which * indicates the invalidation of any VPT caches associated diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index afb077b1cda6bdea2e2e972eac0989884a84bd25..861612641953b69b2005a3705cabc8aa21c83984 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -573,6 +573,30 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) return 0; } +int kvm_vgic_config_vtimer_irqbypass(struct kvm_vcpu *vcpu, u32 vintid, + bool (*get_as)(struct kvm_vcpu *, int), + void (*set_as)(struct kvm_vcpu *, int, bool)) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vtimer_info *vtimer = &vgic_cpu->vtimer; + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); + unsigned long flags; + + WARN_ON(!irq); + WARN_ON(!kvm_vgic_vtimer_irqbypass_support()); + + vtimer->intid = vintid; + vtimer->get_active_stat = get_as; + vtimer->set_active_stat = set_as; + + raw_spin_lock_irqsave(&irq->irq_lock, flags); + irq->vtimer_info = vtimer; + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + vgic_put_irq(vcpu->kvm, irq); + + return 0; +} + /** * kvm_vgic_set_owner - Set the owner of an interrupt for a VM * diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index d8cfd360838c789299746e81d5b0f3a83b1efebb..47abce1ff89eeb4b097553b290044d7b98abc94a 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -126,6 +126,16 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq) return vgic_irq_get_lr_count(irq) > 1; } +static inline bool vgic_direct_sgi_or_ppi(struct vgic_irq *irq) +{ + bool direct_sgi, direct_ppi; + + direct_sgi = irq->hw && vgic_irq_is_sgi(irq->intid); + direct_ppi = !!(irq->vtimer_info); + + return direct_sgi || direct_ppi; +} + /* * This struct provides an intermediate representation of the fields contained * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC @@ -318,5 +328,6 @@ int vgic_v4_init(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); +void vgic_v4_configure_vtimer(struct kvm *kvm); #endif diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c index cb9962c4debb58ea680736eee390f45c6b7d194b..dd5a6f6a44fb685052fe48ff52271d3e541c05a7 100644 --- a/drivers/irqchip/irq-gic-phytium-2500-its.c +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -5547,7 +5547,7 @@ int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, sgi_ops = NULL; if (its_init_vpe_domain() || - its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops, false)) { rdists->has_vlpis = false; pr_err("ITS: Disabling GICv4 support\n"); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9501c2ce56255969cb2fa5ac622fbc078647507f..5574fada436dea5414ead531c7177176e6c1fd27 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -119,12 +119,34 @@ struct its_node { int numa_node; unsigned int msi_domain_flags; u32 pre_its_base; /* for Socionext Synquacer */ + /** + * Hisilicon implement reg used for indicating + * direct vPPI injection capability. + */ + u32 version; int vlpi_redist_offset; }; -#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) -#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) -#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + +#define is_vtimer_irqbypass(its) (!!((its)->version & GITS_VERSION_VTIMER)) + +/* Fetch it from gtdt->virtual_timer_interrupt. */ +#define is_vtimer_irq(irq) ((irq) == 27) + +static inline bool is_its_vsgi_cmd_valid(struct its_node *its, u8 hwirq) +{ + if (__get_intid_range(hwirq) == SGI_RANGE) + return true; + + /* For PPI range, only vtimer interrupt is supported atm. */ + if (is_vtimer_irq(hwirq) && is_vtimer_irqbypass(its)) + return true; + + return false; +} #define ITS_ITT_ALIGN SZ_256 @@ -571,6 +593,16 @@ static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); } +static void its_encode_sgi_intid_extension(struct its_cmd_block *cmd, u8 sgi) +{ + /* + * We reuse the VSGI command in this implementation to configure + * the vPPI or clear its pending state. The vINTID field has been + * therefore extended to [36:32]. + */ + its_mask_encode(&cmd->raw_cmd[0], sgi, 36, 32); +} + static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) { its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); @@ -970,7 +1002,10 @@ static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, its_encode_cmd(cmd, GITS_CMD_VSGI); its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); - its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + if (!is_vtimer_irqbypass(its)) + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + else + its_encode_sgi_intid_extension(cmd, desc->its_vsgi_cmd.sgi); its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); @@ -4436,8 +4471,14 @@ static struct irq_chip its_vpe_4_1_irq_chip = { static void its_configure_sgi(struct irq_data *d, bool clear) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); struct its_cmd_desc desc; + if (!its || !is_its_vsgi_cmd_valid(its, d->hwirq)) { + pr_err("ITS: its_configure_sgi failed\n"); + return; + } + desc.its_vsgi_cmd.vpe = vpe; desc.its_vsgi_cmd.sgi = d->hwirq; desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; @@ -4450,7 +4491,7 @@ static void its_configure_sgi(struct irq_data *d, bool clear) * destination VPE is mapped there. Since we map them eagerly at * activation time, we're pretty sure the first GICv4.1 ITS will do. */ - its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); + its_send_single_vcommand(its, its_build_vsgi_cmd, &desc); } static void its_sgi_mask_irq(struct irq_data *d) @@ -4492,11 +4533,15 @@ static int its_sgi_set_irqchip_state(struct irq_data *d, if (state) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its = find_4_1_its(); + u64 offset = GITS_SGIR; u64 val; + if (__get_intid_range(d->hwirq) == PPI_RANGE) + offset = GITS_PPIR; + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); - writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + writeq_relaxed(val, its->sgir_base + offset - SZ_128K); } else { its_configure_sgi(d, true); } @@ -4508,6 +4553,7 @@ static int its_sgi_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + enum gic_intid_range type; void __iomem *base; unsigned long flags; u32 count = 1000000; /* 1s! */ @@ -4517,6 +4563,17 @@ static int its_sgi_get_irqchip_state(struct irq_data *d, if (which != IRQCHIP_STATE_PENDING) return -EINVAL; + /* + * Plug the HiSilicon implementation details in comment! + * + * For vPPI, we re-use the GICR_VSGIR and GICR_VSGIPENDR in the + * implementation which allows reads to GICR_I{S,C}PENDR to be + * emulated. And note that the pending state of the vtimer + * interrupt is stored at bit[16] of GICR_VSGIPENDR. + */ + type = __get_intid_range(d->hwirq); + WARN_ON(type == PPI_RANGE && !is_vtimer_irq(d->hwirq)); + /* * Locking galore! We can race against two different events: * @@ -4552,7 +4609,10 @@ static int its_sgi_get_irqchip_state(struct irq_data *d, if (!count) return -ENXIO; - *val = !!(status & (1 << d->hwirq)); + if (is_vtimer_irq(d->hwirq)) + *val = !!(status & (1 << 16)); + else + *val = !!(status & (1 << d->hwirq)); return 0; } @@ -4591,10 +4651,10 @@ static int its_sgi_irq_domain_alloc(struct irq_domain *domain, struct its_vpe *vpe = args; int i; - /* Yes, we do want 16 SGIs */ - WARN_ON(nr_irqs != 16); + /* We may want 32 IRQs if vtimer irqbypass is supported. */ + WARN_ON(nr_irqs != 16 && nr_irqs != 32); - for (i = 0; i < 16; i++) { + for (i = 0; i < nr_irqs; i++) { vpe->sgi_config[i].priority = 0; vpe->sgi_config[i].enabled = false; vpe->sgi_config[i].group = false; @@ -5292,6 +5352,7 @@ static int __init its_probe_one(struct resource *res, INIT_LIST_HEAD(&its->its_device_list); typer = gic_read_typer(its_base + GITS_TYPER); its->typer = typer; + its->version = readl_relaxed(its_base + GITS_VERSION); its->base = its_base; its->phys_base = res->start; if (is_v4(its)) { @@ -5686,6 +5747,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, struct its_node *its; bool has_v4 = false; bool has_v4_1 = false; + bool has_vtimer_irqbypass = false; int err; gic_rdists = rdists; @@ -5709,12 +5771,21 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, list_for_each_entry(its, &its_nodes, entry) { has_v4 |= is_v4(its); has_v4_1 |= is_v4_1(its); + has_vtimer_irqbypass |= is_vtimer_irqbypass(its); } /* Don't bother with inconsistent systems */ if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) rdists->has_rvpeid = false; + /* vtimer irqbypass depends on rvpeid support */ + if (WARN_ON(!has_v4_1 && has_vtimer_irqbypass)) { + has_vtimer_irqbypass = false; + rdists->has_vtimer = false; + } + pr_info("ITS: vtimer-irqbypass %sabled\n", + has_vtimer_irqbypass ? "en" : "dis"); + if (has_v4 & rdists->has_vlpis) { const struct irq_domain_ops *sgi_ops; @@ -5724,7 +5795,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, sgi_ops = NULL; if (its_init_vpe_domain() || - its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { + its_init_v4(parent_domain, &its_vpe_domain_ops, + sgi_ops, has_vtimer_irqbypass)) { rdists->has_vlpis = false; pr_err("ITS: Disabling GICv4 support\n"); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2015ee5f3f0c59eb4803f5f6eda751cfd63e9bb6..4d13e951ba2eec79f3f008d2490c09a2ec2302c5 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -135,34 +135,9 @@ static DEFINE_PER_CPU(bool, has_rss); /* Our default, arbitrary priority value. Linux only uses one anyway. */ #define DEFAULT_PMR_VALUE 0xf0 -enum gic_intid_range { - SGI_RANGE, - PPI_RANGE, - SPI_RANGE, - EPPI_RANGE, - ESPI_RANGE, - LPI_RANGE, - __INVALID_RANGE__ -}; - -static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) -{ - switch (hwirq) { - case 0 ... 15: - return SGI_RANGE; - case 16 ... 31: - return PPI_RANGE; - case 32 ... 1019: - return SPI_RANGE; - case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): - return EPPI_RANGE; - case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): - return ESPI_RANGE; - case 8192 ... GENMASK(23, 0): - return LPI_RANGE; - default: - return __INVALID_RANGE__; - } +phys_addr_t get_gicr_paddr(int cpu) +{ + return (per_cpu_ptr(gic_data.rdists.rdist, cpu))->phys_base; } static enum gic_intid_range get_intid_range(struct irq_data *d) @@ -956,6 +931,9 @@ static int __gic_update_rdist_properties(struct redist_region *region, gic_data.rdists.has_rvpeid = false; } + /* Hisilicon implement: if GIC v4.1 is supported, vtimer irqbypass is supported */ + gic_data.rdists.has_vtimer = gic_data.rdists.has_rvpeid; + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); return 1; @@ -1854,6 +1832,7 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; gic_data.rdists.has_vpend_valid_dirty = true; + gic_data.rdists.has_vtimer = false; gic_compute_nr_gicr(); if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { @@ -2033,6 +2012,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + gic_v3_kvm_info.has_vtimer = gic_data.rdists.has_vtimer; gic_set_kvm_info(&gic_v3_kvm_info); } @@ -2349,6 +2329,7 @@ static void __init gic_acpi_setup_kvm_info(void) gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + gic_v3_kvm_info.has_vtimer = gic_data.rdists.has_vtimer; gic_set_kvm_info(&gic_v3_kvm_info); } diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 5d1dc9915272b1892459e5cd1c4de64c1b6b0c13..e6516523b7d47b74d6a041997de13daae6cd8e8f 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -86,20 +86,30 @@ static struct irq_domain *gic_domain; static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *sgi_domain_ops; +static bool vtimer_irqbypass; static bool has_v4_1(void) { return !!sgi_domain_ops; } +static bool has_v4_1_vsgi_extend(void) +{ + return has_v4_1() && vtimer_irqbypass; +} + static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) { char *name; int sgi_base; + int nr_irqs = 16; if (!has_v4_1()) return 0; + if (has_v4_1_vsgi_extend()) + nr_irqs = 32; + name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current)); if (!name) goto err; @@ -111,12 +121,13 @@ static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) kfree(name); name = NULL; - vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, + vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, nr_irqs, sgi_domain_ops, vpe); if (!vpe->sgi_domain) goto err; - sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16, + vpe->nr_irqs = nr_irqs; + sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, nr_irqs, NUMA_NO_NODE, vpe, false, NULL); if (sgi_base <= 0) @@ -125,6 +136,7 @@ static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) return 0; err: + vpe->nr_irqs = 0; if (vpe->sgi_domain) irq_domain_remove(vpe->sgi_domain); if (vpe->fwnode) @@ -191,7 +203,7 @@ static void its_free_sgi_irqs(struct its_vm *vm) if (WARN_ON(!irq)) continue; - irq_domain_free_irqs(irq, 16); + irq_domain_free_irqs(irq, vm->vpes[i]->nr_irqs); irq_domain_remove(vm->vpes[i]->sgi_domain); irq_domain_free_fwnode(vm->vpes[i]->fwnode); } @@ -354,13 +366,16 @@ int its_prop_update_vsgi(int irq, u8 priority, bool group) int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *vpe_ops, - const struct irq_domain_ops *sgi_ops) + const struct irq_domain_ops *sgi_ops, + const bool has_vtimer_irqbypass) { if (domain) { pr_info("ITS: Enabling GICv4 support\n"); gic_domain = domain; vpe_domain_ops = vpe_ops; sgi_domain_ops = sgi_ops; + vtimer_irqbypass = has_vtimer_irqbypass; + return 0; } diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index fc05e23938cd256906e7f73f2eb8c46c7409b534..25bce1480575b5f2d9254edab9f9e80173c96372 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -15,6 +15,14 @@ #include #include #include +#include +#include +#include +#include +#include + +#include +#include /* Interrupt numbers per mbigen node supported */ #define IRQS_PER_MBIGEN_NODE 128 @@ -55,6 +63,63 @@ #define REG_MBIGEN_SPI_TYPE_OFFSET 0x400 #define REG_MBIGEN_LPI_TYPE_OFFSET 0x0 +/** + * MBIX control register + * bit[31:26] SocketID + * bit[25:24] DieID + */ +#define MBIGEN_CTLR 0x0 +#define MBIGEN_AFF3_MASK 0xff000000 +#define MBIGEN_AFF3_SHIFT 24 + +/** + * MBIX config register + * bit[31:24] mbi_type: + * - 0x10b support vTimer irqbypass + */ +#define MBIGEN_NODE_CFG_OFFSET 0x0004 +#define MBIGEN_TYPE_MASK 0x03000000 +#define MBIGEN_TYPE_SHIFT 24 +#define TYPE_VITMER_ENABLED 0x02 + +#define REG_VTIMER_MBIGEN_WIDTH 0x0004 +#define PPIS_PER_MBIGEN_NODE 32 +#define REG_VTIMER_MBIGEN_TYPE_OFFSET 0x1000 +#define REG_VTIMER_MBIGEN_SET_AUTO_CLR_OFFSET 0x1100 +#define REG_VTIMER_MBIGEN_CLR_AUTO_CLR_OFFSET 0x1110 +#define REG_VTIMER_GIC_SET_AUTO_CLR_OFFSET 0x1150 +#define REG_VTIMER_GIC_CLR_AUTO_CLR_OFFSET 0x1160 +#define REG_VTIMER_MBIGEN_ATV_STAT_OFFSET 0x1120 +#define REG_VTIMER_MBIGEN_VEC_OFFSET 0x1200 +#define REG_VTIMER_MBIGEN_ATV_CLR_OFFSET 0xa008 + +/** + * Due to the existence of hyper-threading technology, We need to get the + * absolute offset of a cpu relative to the base cpu. + */ +#define GICR_LENGTH 0x40000 +#define get_abs_offset(cpu, cpu_base) \ + ((get_gicr_paddr(cpu) - get_gicr_paddr(cpu_base)) / GICR_LENGTH) + +/** + * struct vtimer_mbigen_device - holds the information of vtimer mbigen device. + * + * @base: mapped address of this mbigen chip. + * @cpu_base : the base cpu_id attached to the mbigen chip. + * @cpu_num : the num of the cpus attached to the mbigen chip. + * @mpidr_aff3 : [socket_id : die_id] of the mbigen chip. + * @entry: list_head connecting this vtimer_mbigen to the full list. + * @vmgn_lock: spinlock for set type. + */ +struct vtimer_mbigen_device { + void __iomem *base; + int cpu_base; + int cpu_num; + int mpidr_aff3; + struct list_head entry; + spinlock_t vmgn_lock; +}; + /** * struct mbigen_device - holds the information of mbigen device. * @@ -62,10 +127,177 @@ * @base: mapped address of this mbigen chip. */ struct mbigen_device { - struct platform_device *pdev; - void __iomem *base; + struct platform_device *pdev; + void __iomem *base; + bool vtimer_bypass_enabled; + struct vtimer_mbigen_device *vtimer_mbigen_chip; }; +static struct arch_timer_kvm_info *vtimer_kvm_info; + +static LIST_HEAD(vtimer_mgn_list); + +cpumask_t vtimer_cpu_mask; + +static struct vtimer_mbigen_device *get_vtimer_mbigen(int cpu_id) +{ + unsigned int mpidr_aff3; + struct vtimer_mbigen_device *chip; + + mpidr_aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu_id), 3); + + list_for_each_entry(chip, &vtimer_mgn_list, entry) { + if (chip->mpidr_aff3 == mpidr_aff3) + return chip; + } + + pr_debug("Failed to get vtimer mbigen of cpu%d!\n", cpu_id); + return NULL; +} + +void vtimer_mbigen_set_vector(int cpu_id, u16 vpeid) +{ + + struct vtimer_mbigen_device *chip; + void __iomem *addr; + int cpu_abs_offset, count = 100; + + chip = get_vtimer_mbigen(cpu_id); + if (!chip) + return; + + cpu_abs_offset = get_abs_offset(cpu_id, chip->cpu_base); + addr = chip->base + REG_VTIMER_MBIGEN_VEC_OFFSET + + cpu_abs_offset * REG_VTIMER_MBIGEN_WIDTH; + + writel_relaxed(vpeid, addr); + + /* Make sure correct vpeid set */ + do { + if (readl_relaxed(addr) == vpeid) + break; + } while (count--); + + if (!count) + pr_err("Failed to set mbigen vector of CPU%d!\n", cpu_id); +} + +bool vtimer_mbigen_get_active(int cpu_id) +{ + struct vtimer_mbigen_device *chip; + void __iomem *addr; + int cpu_abs_offset; + u32 val; + + chip = get_vtimer_mbigen(cpu_id); + if (!chip) + return false; + + cpu_abs_offset = get_abs_offset(cpu_id, chip->cpu_base); + addr = chip->base + REG_VTIMER_MBIGEN_ATV_STAT_OFFSET + + (cpu_abs_offset / PPIS_PER_MBIGEN_NODE) * REG_VTIMER_MBIGEN_WIDTH; + + dsb(sy); + val = readl_relaxed(addr); + return (!!(val & (1 << (cpu_abs_offset % PPIS_PER_MBIGEN_NODE)))); +} + +void vtimer_mbigen_set_auto_clr(int cpu_id, bool set) +{ + struct vtimer_mbigen_device *chip; + void __iomem *addr; + int cpu_abs_offset; + u64 offset; + u32 val; + + chip = get_vtimer_mbigen(cpu_id); + if (!chip) + return; + + cpu_abs_offset = get_abs_offset(cpu_id, chip->cpu_base); + offset = set ? REG_VTIMER_MBIGEN_SET_AUTO_CLR_OFFSET : + REG_VTIMER_MBIGEN_CLR_AUTO_CLR_OFFSET; + addr = chip->base + offset + + (cpu_abs_offset / PPIS_PER_MBIGEN_NODE) * REG_VTIMER_MBIGEN_WIDTH; + val = 1 << (cpu_abs_offset % PPIS_PER_MBIGEN_NODE); + + writel_relaxed(val, addr); + dsb(sy); +} + +void vtimer_gic_set_auto_clr(int cpu_id, bool set) +{ + struct vtimer_mbigen_device *chip; + void __iomem *addr; + int cpu_abs_offset; + u64 offset; + u32 val; + + chip = get_vtimer_mbigen(cpu_id); + if (!chip) + return; + + cpu_abs_offset = get_abs_offset(cpu_id, chip->cpu_base); + offset = set ? REG_VTIMER_GIC_SET_AUTO_CLR_OFFSET : + REG_VTIMER_GIC_CLR_AUTO_CLR_OFFSET; + addr = chip->base + offset + + (cpu_abs_offset / PPIS_PER_MBIGEN_NODE) * REG_VTIMER_MBIGEN_WIDTH; + val = 1 << (cpu_abs_offset % PPIS_PER_MBIGEN_NODE); + + writel_relaxed(val, addr); + dsb(sy); +} + +void vtimer_mbigen_set_active(int cpu_id, bool set) +{ + struct vtimer_mbigen_device *chip; + void __iomem *addr; + int cpu_abs_offset; + u64 offset; + u32 val; + + chip = get_vtimer_mbigen(cpu_id); + if (!chip) + return; + + cpu_abs_offset = get_abs_offset(cpu_id, chip->cpu_base); + offset = set ? REG_VTIMER_MBIGEN_ATV_STAT_OFFSET : + REG_VTIMER_MBIGEN_ATV_CLR_OFFSET; + addr = chip->base + offset + + (cpu_abs_offset / PPIS_PER_MBIGEN_NODE) * REG_VTIMER_MBIGEN_WIDTH; + val = 1 << (cpu_abs_offset % PPIS_PER_MBIGEN_NODE); + + writel_relaxed(val, addr); + dsb(sy); +} + +static int vtimer_mbigen_set_type(unsigned int cpu_id) +{ + struct vtimer_mbigen_device *chip = get_vtimer_mbigen(cpu_id); + void __iomem *addr; + int cpu_abs_offset; + u32 val, mask; + + chip = get_vtimer_mbigen(cpu_id); + if (!chip) + return -EINVAL; + + cpu_abs_offset = get_abs_offset(cpu_id, chip->cpu_base); + addr = chip->base + REG_VTIMER_MBIGEN_TYPE_OFFSET + + (cpu_abs_offset / PPIS_PER_MBIGEN_NODE) * REG_VTIMER_MBIGEN_WIDTH; + + mask = 1 << (cpu_abs_offset % PPIS_PER_MBIGEN_NODE); + + spin_lock(&chip->vmgn_lock); + val = readl_relaxed(addr); + val |= mask; + writel_relaxed(val, addr); + dsb(sy); + spin_unlock(&chip->vmgn_lock); + return 0; +} + + static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) { unsigned int nid, pin; @@ -341,6 +573,254 @@ static inline int mbigen_acpi_create_domain(struct platform_device *pdev, } #endif +static void vtimer_mbigen_set_kvm_info(void) +{ + vtimer_kvm_info = arch_timer_get_kvm_info(); + + vtimer_kvm_info->irqbypass_flag |= VT_EXPANDDEV_PROBED; +} + +static int vtimer_mbigen_chip_get_aff3(struct vtimer_mbigen_device *chip) +{ + void __iomem *base = chip->base; + void __iomem *addr = base + MBIGEN_CTLR; + u32 val = readl_relaxed(addr); + + return ((val & MBIGEN_AFF3_MASK) >> MBIGEN_AFF3_SHIFT); +} + +static int vtimer_mbigen_chip_match_cpu(struct vtimer_mbigen_device *chip) +{ + int cpu; + chip->cpu_base = -1; + chip->cpu_num = 0; + + for_each_possible_cpu(cpu) { + int mpidr_aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + if (chip->mpidr_aff3 == mpidr_aff3) { + /* get the first cpu attached to the mbigen */ + if (chip->cpu_base == -1) { + /* Make sure cpu_base is attached to PIN0 */ + u64 mpidr = cpu_logical_map(cpu); + if (MPIDR_AFFINITY_LEVEL(mpidr, 2) + || MPIDR_AFFINITY_LEVEL(mpidr, 1) + || MPIDR_AFFINITY_LEVEL(mpidr, 0)) + return -EINVAL; + + chip->cpu_base = cpu; + } + + chip->cpu_num++; + /* + * check if this cpu has already + * attached to another mbigen + */ + if (cpumask_test_and_set_cpu(cpu, &vtimer_cpu_mask)) + return -EINVAL; + } + } + + if (chip->cpu_base == -1 || chip->cpu_num > IRQS_PER_MBIGEN_NODE) + return -EINVAL; + + return 0; +} + +static void vtimer_mbigen_chip_set_type(struct vtimer_mbigen_device *chip) +{ + int cpuid_start, cpuid_end, cpuid; + + cpuid_start = chip->cpu_base; + cpuid_end = chip->cpu_base + chip->cpu_num; + + for (cpuid = cpuid_start; cpuid < cpuid_end; cpuid++) + vtimer_mbigen_set_type(cpuid); +} + +static bool is_mbigen_vtimer_bypass_enabled(struct mbigen_device *mgn_chip) +{ + void __iomem *base = mgn_chip->base; + void __iomem *addr = base + MBIGEN_NODE_CFG_OFFSET; + u32 val = readl_relaxed(addr); + + return ((val & MBIGEN_TYPE_MASK) >> MBIGEN_TYPE_SHIFT) + == TYPE_VITMER_ENABLED; +} + +/** + * MBIX_VPPI_ITS_TA: Indicates the address of the ITS corresponding + * to the mbigen. + */ +#define MBIX_VPPI_ITS_TA 0x0038 +static bool vtimer_mbigen_should_probe(struct mbigen_device *mgn_chip) +{ + unsigned int mpidr_aff3; + struct vtimer_mbigen_device *chip; + void __iomem *addr; + u32 val; + + addr = mgn_chip->base + MBIX_VPPI_ITS_TA; + val = readl_relaxed(addr); + if (!val) + return false; + + addr = mgn_chip->base + MBIGEN_CTLR; + val = readl_relaxed(addr); + mpidr_aff3 = (val & MBIGEN_AFF3_MASK) >> MBIGEN_AFF3_SHIFT; + list_for_each_entry(chip, &vtimer_mgn_list, entry) { + if (chip->mpidr_aff3 == mpidr_aff3) + return false; + } + + return true; +} + + +#define CHIP0_TA_MBIGEN_PHY_BASE 0x4604400000 +#define CHIP0_TA_MBIGEN_ITS_BASE 0x84028 +#define CHIP0_TA_PERI_PHY_BASE 0x4614002018 + +#define CHIP0_TB_MBIGEN_PHY_BASE 0xc604400000 +#define CHIP0_TB_PERI_PHY_BASE 0xc614002018 +#define CHIP0_TB_MBIGEN_ITS_BASE 0x4028 + +#define CHIP1_TA_MBIGEN_PHY_BASE 0x204604400000 +#define CHIP1_TA_PERI_PHY_BASE 0x204614002018 +#define CHIP1_TA_MBIGEN_ITS_BASE 0x2084028 + +#define CHIP1_TB_MBIGEN_PHY_BASE 0x20c604400000 +#define CHIP1_TB_MBIGEN_ITS_BASE 0x2004028 +#define CHIP1_TB_PERI_PHY_BASE 0x20c614002018 +extern bool vtimer_irqbypass; +static int vtimer_mbigen_set_regs(struct platform_device *pdev) +{ + struct mbigen_device *mgn_chip = platform_get_drvdata(pdev); + struct resource *res; + void __iomem *addr; + unsigned int mpidr_aff3; + u32 val; + struct vtimer_mbigen_device *chip; + + if (!vtimer_irqbypass) + return 0; + + addr = mgn_chip->base + MBIGEN_CTLR; + val = readl_relaxed(addr); + mpidr_aff3 = (val & MBIGEN_AFF3_MASK) >> MBIGEN_AFF3_SHIFT; + list_for_each_entry(chip, &vtimer_mgn_list, entry) { + if (chip->mpidr_aff3 == mpidr_aff3) + return 0; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mgn_chip) + return -ENOMEM; + + if (res->start == CHIP0_TA_MBIGEN_PHY_BASE) { + addr = ioremap(CHIP0_TA_PERI_PHY_BASE, 4); + if (!addr) { + pr_err("Unable to map CHIP0-TA-PERI\n"); + return -ENOMEM; + } + + writel_relaxed(1, addr); + iounmap(addr); + + addr = mgn_chip->base + MBIX_VPPI_ITS_TA; + writel_relaxed(CHIP0_TA_MBIGEN_ITS_BASE, addr); + } + + if (res->start == CHIP0_TB_MBIGEN_PHY_BASE) { + addr = ioremap(CHIP0_TB_PERI_PHY_BASE, 4); + if (!addr) { + pr_err("Unable to map CHIP0-TB-PERI\n"); + return -ENOMEM; + } + + writel_relaxed(1, addr); + iounmap(addr); + + addr = mgn_chip->base + MBIX_VPPI_ITS_TA; + writel_relaxed(CHIP0_TB_MBIGEN_ITS_BASE, addr); + } + + if (res->start == CHIP1_TA_MBIGEN_PHY_BASE) { + addr = ioremap(CHIP1_TA_PERI_PHY_BASE, 4); + if (!addr) { + pr_err("Unable to map CHIP1-TA-PERI\n"); + return -ENOMEM; + } + + writel_relaxed(1, addr); + iounmap(addr); + + addr = mgn_chip->base + MBIX_VPPI_ITS_TA; + writel_relaxed(CHIP1_TA_MBIGEN_ITS_BASE, addr); + } + + if (res->start == CHIP1_TB_MBIGEN_PHY_BASE) { + addr = ioremap(CHIP1_TB_PERI_PHY_BASE, 4); + if (!addr) { + pr_err("Unable to map CHIP1-TB-PERI\n"); + return -ENOMEM; + } + + writel_relaxed(1, addr); + iounmap(addr); + + addr = mgn_chip->base + MBIX_VPPI_ITS_TA; + writel_relaxed(CHIP1_TB_MBIGEN_ITS_BASE, addr); + } + + return 0; +} + +static int vtimer_mbigen_device_probe(struct platform_device *pdev) +{ + struct mbigen_device *mgn_chip = platform_get_drvdata(pdev); + struct vtimer_mbigen_device *vtimer_mgn_chip; + int err; + + err = vtimer_mbigen_set_regs(pdev); + if (err) + return err; + + mgn_chip->vtimer_bypass_enabled = + is_mbigen_vtimer_bypass_enabled(mgn_chip); + + if (!mgn_chip->vtimer_bypass_enabled || + !vtimer_mbigen_should_probe(mgn_chip)) + return 0; + + vtimer_mgn_chip = kzalloc(sizeof(*vtimer_mgn_chip), GFP_KERNEL); + if (!vtimer_mgn_chip) + return -ENOMEM; + + mgn_chip->vtimer_mbigen_chip = vtimer_mgn_chip; + vtimer_mgn_chip->base = mgn_chip->base; + vtimer_mgn_chip->mpidr_aff3 = vtimer_mbigen_chip_get_aff3(vtimer_mgn_chip); + err = vtimer_mbigen_chip_match_cpu(vtimer_mgn_chip); + if (err) { + dev_err(&pdev->dev, + "Fail to match vtimer mbigen device with cpu\n"); + goto out; + } + + spin_lock_init(&vtimer_mgn_chip->vmgn_lock); + list_add(&vtimer_mgn_chip->entry, &vtimer_mgn_list); + vtimer_mbigen_set_kvm_info(); + vtimer_mbigen_chip_set_type(vtimer_mgn_chip); + + pr_info("vtimer mbigen device @%p probed success!\n", mgn_chip->base); + return 0; + +out: + kfree(vtimer_mgn_chip); + dev_err(&pdev->dev, "vtimer mbigen device @%p probed failed\n", + mgn_chip->base); + return err; +} + static int mbigen_device_probe(struct platform_device *pdev) { struct mbigen_device *mgn_chip; @@ -377,7 +857,8 @@ static int mbigen_device_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, mgn_chip); - return 0; + + return vtimer_mbigen_device_probe(pdev); } static const struct of_device_id mbigen_of_match[] = { diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 6b085710ec68aace8e527d4fea2e32fadd30b8b2..68c67ddd1dc02595598e8ed8712aa1f40fa920bd 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -64,6 +64,10 @@ struct arch_timer_kvm_info { struct timecounter timecounter; int virtual_irq; int physical_irq; + +/* vtimer expand device probed flag */ +#define VT_EXPANDDEV_PROBED (1 << 0) + unsigned long irqbypass_flag; }; struct arch_timer_mem_frame { @@ -106,4 +110,18 @@ static inline bool arch_timer_evtstrm_available(void) #endif +static inline bool vtimer_irqbypass_hw_support(struct arch_timer_kvm_info *info) +{ + if (!(info->irqbypass_flag & VT_EXPANDDEV_PROBED)) + return false; + + return true; +} + +void vtimer_mbigen_set_vector(int cpu_id, u16 vpeid); +bool vtimer_mbigen_get_active(int cpu_id); +void vtimer_mbigen_set_auto_clr(int cpu_id, bool set); +void vtimer_gic_set_auto_clr(int cpu_id, bool set); +void vtimer_mbigen_set_active(int cpu_id, bool set); + #endif diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index cd6d8f260eab1415dad7feec0ebf5c868b5275b1..413d6f9bc3ff1b057a835fb82616d6c13c4f0db3 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -44,6 +44,13 @@ struct arch_timer_context { u32 host_timer_irq_flags; }; +struct vtimer_mbigen_context { + /* Active state in vtimer mbigen */ + bool active; + + bool loaded; +}; + struct timer_map { struct arch_timer_context *direct_vtimer; struct arch_timer_context *direct_ptimer; @@ -58,10 +65,14 @@ struct arch_timer_cpu { /* Is the timer enabled */ bool enabled; + + /* Info for vtimer mbigen device */ + struct vtimer_mbigen_context mbigen_ctx; }; int kvm_timer_hyp_init(bool); int kvm_timer_enable(struct kvm_vcpu *vcpu); +int kvm_vtimer_config(struct kvm *kvm); int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu); @@ -90,6 +101,8 @@ bool kvm_arch_timer_get_input_level(int vintid); #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER]) #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER]) +#define vcpu_vtimer_mbigen(v) (&(v)->arch.timer_cpu.mbigen_ctx) + #define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers) u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index c82c9c76e0005b359b7c374028a661ca9680c2dd..699cedb92f40b0b6458d35b9b0312794dbe12fd4 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -33,6 +33,16 @@ #define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ (irq) <= VGIC_MAX_SPI) +/* Information about HiSilicon implementation of vtimer (GICv4.1-based) */ +struct vtimer_info { + u32 intid; + + bool (*get_active_stat)(struct kvm_vcpu *vcpu, int vintid); + void (*set_active_stat)(struct kvm_vcpu *vcpu, int vintid, bool active); +}; + +u16 kvm_vgic_get_vcpu_vpeid(struct kvm_vcpu *vcpu); + /*The number of lpi translation cache lists*/ #define LPI_TRANS_CACHES_NUM 8 @@ -75,6 +85,12 @@ struct vgic_global { bool has_gicv4; bool has_gicv4_1; + /* + * Hardware (HiSilicon implementation) has vtimer interrupt + * direct injection support? + */ + bool has_direct_vtimer; + /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; @@ -142,6 +158,8 @@ struct vgic_irq { void *owner; /* Opaque pointer to reserve an interrupt for in-kernel devices. */ + + struct vtimer_info *vtimer_info; /* vtimer interrupt only */ }; struct vgic_register_region; @@ -243,6 +261,9 @@ struct vgic_dist { /* Wants SGIs without active state */ bool nassgireq; + /* Indicate whether the vtimer irqbypass mode is used */ + bool vtimer_irqbypass; + struct vgic_irq *spis; struct vgic_io_device dist_iodev; @@ -315,6 +336,8 @@ struct vgic_cpu { struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + struct vtimer_info vtimer; + raw_spinlock_t ap_list_lock; /* Protects the ap_list */ /* @@ -383,6 +406,14 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1); +/** + * kvm_vgic_vtimer_irqbypass_support - Get the vtimer irqbypass HW capability + */ +static inline bool kvm_vgic_vtimer_irqbypass_support(void) +{ + return kvm_vgic_global_state.has_direct_vtimer; +} + /** * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW * @@ -413,5 +444,8 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, int vgic_v4_load(struct kvm_vcpu *vcpu); void vgic_v4_commit(struct kvm_vcpu *vcpu); int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db); +int kvm_vgic_config_vtimer_irqbypass(struct kvm_vcpu *vcpu, u32 vintid, + bool (*get_as)(struct kvm_vcpu *, int), + void (*set_as)(struct kvm_vcpu *, int, bool)); #endif /* __KVM_ARM_VGIC_H */ diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index fa8c0455c3523316e00a6eaaaed4bf4347ccdb42..9b2c86a09a474acaaf437a08936f545d3f133bc4 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -34,6 +34,9 @@ struct gic_kvm_info { bool has_v4; /* rvpeid support */ bool has_v4_1; + /* vtimer irqbypass support */ + bool has_vtimer; + }; const struct gic_kvm_info *gic_get_kvm_info(void); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 380c83b7a89fe62685106acdf28a8064dbfa2e01..e5c5b05c9c5a3a06d87bafe76620420b5710771f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -378,9 +378,15 @@ #define GITS_TRANSLATER 0x10040 #define GITS_SGIR 0x20020 +/* HiSilicon IMP DEF register to set vPPI pending. */ +#define GITS_PPIR 0x200A8 + +/* HiSilicon IMP DEF register */ +#define GITS_VERSION 0xC000 #define GITS_SGIR_VPEID GENMASK_ULL(47, 32) -#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) +/* Hackish... Extend it to [4:0] to support vPPI. */ +#define GITS_SGIR_VINTID GENMASK_ULL(4, 0) #define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ImDe (1U << 1) @@ -402,6 +408,14 @@ #define GITS_TYPER_VMAPP (1ULL << 40) #define GITS_TYPER_SVPET GENMASK_ULL(42, 41) +/** + * HiSilicon IMP DEF field which indicates if the vPPI direct injection + * is supported. + * - 0: not supported + * - 1: supported + */ +#define GITS_VERSION_VTIMER (1ULL << 12) + #define GITS_IIDR_REV_SHIFT 12 #define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) #define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) @@ -684,6 +698,7 @@ struct rdists { bool has_rvpeid; bool has_direct_lpi; bool has_vpend_valid_dirty; + bool has_vtimer; }; struct irq_domain; @@ -698,6 +713,8 @@ int its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *domain); int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); +phys_addr_t get_gicr_paddr(int cpu); + static inline bool gic_enable_sre(void) { u32 val; @@ -713,6 +730,36 @@ static inline bool gic_enable_sre(void) return !!(val & ICC_SRE_EL1_SRE); } +enum gic_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static inline enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + #endif #endif diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 943c3411ca1012510fec3c22b7c139f2c7dbe05a..9cab2d5f12016f4702eea0c43545501f6801e77d 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -57,7 +57,8 @@ struct its_vpe { u8 priority; bool enabled; bool group; - } sgi_config[16]; + } sgi_config[32]; + int nr_irqs; atomic_t vmapp_count; }; }; @@ -143,6 +144,7 @@ int its_prop_update_vsgi(int irq, u8 priority, bool group); struct irq_domain_ops; int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *vpe_ops, - const struct irq_domain_ops *sgi_ops); + const struct irq_domain_ops *sgi_ops, + const bool has_vtimer_irqbypass); #endif