From 8f3ed5afa9c2261ad0ad7b62438e389c58fe8b18 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Fri, 10 Jan 2025 09:19:16 +0800 Subject: [PATCH 01/14] anolis: LoongArch: fix compile error when enable CONFIG_PARAVIRT LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ Delete duplicate function definitions Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4471 Reviewed-by: Juxin Gao --- arch/loongarch/kernel/paravirt.c | 329 +++---------------------------- 1 file changed, 31 insertions(+), 298 deletions(-) diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index cce00d898c8c..72798ac59c4e 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -4,9 +4,9 @@ #include #include #include -#include #include #include +#include static int has_steal_clock; struct static_key paravirt_steal_enabled; @@ -21,85 +21,6 @@ static u64 native_steal_clock(int cpu) DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); -static bool steal_acc = true; -static int __init parse_no_stealacc(char *arg) -{ - steal_acc = false; - return 0; -} -early_param("no-steal-acc", parse_no_stealacc); - -static u64 para_steal_clock(int cpu) -{ - u64 steal; - struct kvm_steal_time *src; - int version; - - src = &per_cpu(steal_time, cpu); - do { - - version = src->version; - /* Make sure that the version is read before the steal */ - virt_rmb(); - steal = src->steal; - /* Make sure that the steal is read before the next version */ - virt_rmb(); - - } while ((version & 1) || (version != src->version)); - return steal; -} - -static int pv_register_steal_time(void) -{ - int cpu = smp_processor_id(); - struct kvm_steal_time *st; - unsigned long addr; - - if (!has_steal_clock) - return -EPERM; - - st = &per_cpu(steal_time, cpu); - addr = per_cpu_ptr_to_phys(st); - - /* The whole structure kvm_steal_time should be one page */ - if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { - pr_warn("Illegal PV steal time addr %lx\n", addr); - return -EFAULT; - } - - addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); - return 0; -} - -static bool steal_acc = true; - -static int __init parse_no_stealacc(char *arg) -{ - steal_acc = false; - return 0; -} -early_param("no-steal-acc", parse_no_stealacc); - -static u64 paravt_steal_clock(int cpu) -{ - int version; - u64 steal; - struct kvm_steal_time *src; - - src = &per_cpu(steal_time, cpu); - do { - - version = src->version; - virt_rmb(); /* Make sure that the version is read before the steal */ - steal = src->steal; - virt_rmb(); /* Make sure that the steal is read before the next version */ - - } while ((version & 1) || (version != src->version)); - - return steal; -} - static bool steal_acc = true; static int __init parse_no_stealacc(char *arg) @@ -133,7 +54,7 @@ static struct smp_ops native_ops; static void pv_send_ipi_single(int cpu, unsigned int action) { - unsigned int min, old; + int min, old; irq_cpustat_t *info = &per_cpu(irq_stat, cpu); if (unlikely(action == ACTION_BOOT_CPU)) { @@ -146,13 +67,14 @@ static void pv_send_ipi_single(int cpu, unsigned int action) return; min = cpu_logical_map(cpu); - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, 1, 0, min); + kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min); } -#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) +#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) + static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) { - unsigned int cpu, i, min = 0, max = 0, old; + int i, cpu, min = 0, max = 0, old; __uint128_t bitmap = 0; irq_cpustat_t *info; @@ -174,20 +96,20 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) cpu = cpu_logical_map(i); if (!bitmap) { min = max = cpu; - } else if (cpu > min && cpu < min + KVM_IPI_CLUSTER_SIZE) { - max = cpu > max ? cpu : max; - } else if (cpu < min && (max - cpu) < KVM_IPI_CLUSTER_SIZE) { + } else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) { + /* cpu < min, and bitmap still enough */ bitmap <<= min - cpu; min = cpu; + } else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) { + /* cpu > min, and bitmap still enough */ + max = cpu > max ? cpu : max; } else { /* - * Physical cpuid is sorted in ascending order ascend - * for the next mask calculation, send IPI here - * directly and skip the remainding cpus + * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE, + * send IPI here directly and skip the remaining CPUs. */ - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, - (unsigned long)bitmap, - (unsigned long)(bitmap >> BITS_PER_LONG), min); + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); min = max = cpu; bitmap = 0; } @@ -195,78 +117,53 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) } if (bitmap) - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, (unsigned long)bitmap, + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, (unsigned long)(bitmap >> BITS_PER_LONG), min); } -static irqreturn_t loongson_do_swi(int irq, void *dev) +static irqreturn_t pv_ipi_interrupt(int irq, void *dev) { + u32 action; irq_cpustat_t *info; - long action; - /* Clear swi interrupt */ + /* Clear SWI interrupt */ clear_csr_estat(1 << INT_SWI0); info = this_cpu_ptr(&irq_stat); action = atomic_xchg(&info->message, 0); - if (action & SMP_CALL_FUNCTION) { - generic_smp_call_function_interrupt(); - info->ipi_irqs[IPI_CALL_FUNCTION]++; - } if (action & SMP_RESCHEDULE) { scheduler_ipi(); info->ipi_irqs[IPI_RESCHEDULE]++; } + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + info->ipi_irqs[IPI_CALL_FUNCTION]++; + } + return IRQ_HANDLED; } static void pv_init_ipi(void) { - int r, swi0; + int r, swi; /* Init native ipi irq for ACTION_BOOT_CPU */ native_ops.init_ipi(); - swi0 = get_percpu_irq(INT_SWI0); - if (swi0 < 0) + swi = get_percpu_irq(INT_SWI0); + if (swi < 0) panic("SWI0 IRQ mapping failed\n"); - irq_set_percpu_devid(swi0); - r = request_percpu_irq(swi0, loongson_do_swi, "SWI0", &irq_stat); + irq_set_percpu_devid(swi); + r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat); if (r < 0) panic("SWI0 IRQ request failed\n"); } - -static void pv_disable_steal_time(void) -{ - if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); -} - -static int pv_cpu_online(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_register_steal_time(); - local_irq_restore(flags); - return 0; -} - -static int pv_cpu_down_prepare(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_disable_steal_time(); - local_irq_restore(flags); - return 0; -} #endif bool kvm_para_available(void) { - static int hypervisor_type; int config; + static int hypervisor_type; if (!cpu_has_hypervisor) return false; @@ -299,7 +196,7 @@ int __init pv_ipi_init(void) return 0; #ifdef CONFIG_SMP - native_ops = mp_ops; + native_ops = smp_ops; smp_ops.init_ipi = pv_init_ipi; smp_ops.send_ipi_single = pv_send_ipi_single; smp_ops.send_ipi_mask = pv_send_ipi_mask; @@ -308,56 +205,6 @@ int __init pv_ipi_init(void) return 0; } -static void pv_cpu_reboot(void *unused) -{ - pv_disable_steal_time(); -} - -static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, - void *unused) -{ - on_each_cpu(pv_cpu_reboot, NULL, 1); - return NOTIFY_DONE; -} - -static struct notifier_block pv_reboot_nb = { - .notifier_call = pv_reboot_notify, -}; - -int __init pv_time_init(void) -{ - int feature; - - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) - return 0; - - has_steal_clock = 1; - if (pv_register_steal_time()) { - has_steal_clock = 0; - return 0; - } - - register_reboot_notifier(&pv_reboot_nb); - static_call_update(pv_steal_clock, para_steal_clock); - static_key_slow_inc(¶virt_steal_enabled); - if (steal_acc) - static_key_slow_inc(¶virt_steal_rq_enabled); - -#ifdef CONFIG_SMP - if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "loongarch/pv:online", - pv_cpu_online, pv_cpu_down_prepare) < 0) - pr_err("Failed to install cpu hotplug callbacks\n"); -#endif - pr_info("Using stolen time PV\n"); - return 0; -} - static int pv_enable_steal_time(void) { int cpu = smp_processor_id(); @@ -466,120 +313,6 @@ int __init pv_time_init(void) return 0; } -static int pv_enable_steal_time(void) -{ - int cpu = smp_processor_id(); - unsigned long addr; - struct kvm_steal_time *st; - - if (!has_steal_clock) - return -EPERM; - - st = &per_cpu(steal_time, cpu); - addr = per_cpu_ptr_to_phys(st); - - /* The whole structure kvm_steal_time should be in one page */ - if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { - pr_warn("Illegal PV steal time addr %lx\n", addr); - return -EFAULT; - } - - addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); - - return 0; -} - -static void pv_disable_steal_time(void) -{ - if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); -} - -#ifdef CONFIG_SMP -static int pv_time_cpu_online(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_enable_steal_time(); - local_irq_restore(flags); - - return 0; -} - -static int pv_time_cpu_down_prepare(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_disable_steal_time(); - local_irq_restore(flags); - - return 0; -} -#endif - -static void pv_cpu_reboot(void *unused) -{ - pv_disable_steal_time(); -} - -static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) -{ - on_each_cpu(pv_cpu_reboot, NULL, 1); - return NOTIFY_DONE; -} - -static struct notifier_block pv_reboot_nb = { - .notifier_call = pv_reboot_notify, -}; - -int __init pv_time_init(void) -{ - int r, feature; - - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) - return 0; - - has_steal_clock = 1; - r = pv_enable_steal_time(); - if (r < 0) { - has_steal_clock = 0; - return 0; - } - register_reboot_notifier(&pv_reboot_nb); - -#ifdef CONFIG_SMP - r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "loongarch/pv_time:online", - pv_time_cpu_online, pv_time_cpu_down_prepare); - if (r < 0) { - has_steal_clock = 0; - pr_err("Failed to install cpu hotplug callbacks\n"); - return r; - } -#endif - - static_call_update(pv_steal_clock, paravt_steal_clock); - - static_key_slow_inc(¶virt_steal_enabled); -#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - if (steal_acc) - static_key_slow_inc(¶virt_steal_rq_enabled); -#endif - - pr_info("Using paravirt steal-time\n"); - - return 0; -} - int __init pv_spinlock_init(void) { if (!cpu_has_hypervisor) -- Gitee From fc378e173bcd7415716e3df13d9256cca131136c Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Thu, 9 Jan 2025 16:58:04 +0800 Subject: [PATCH 02/14] anolis: LoongArch: KVM: enable ptw for kvm LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ Add the ptw feature bit to cpucfg Signed-off-by: Xianglai Li --- arch/loongarch/kvm/exit.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 19cb22da35de..5579ee8afedc 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -44,6 +44,8 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) switch (index) { case 0 ... (KVM_MAX_CPUCFG_REGS - 1): vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + if (cpu_has_ptw && (index == LOONGARCH_CPUCFG2)) + vcpu->arch.gprs[rd] |= CPUCFG2_PTW; break; case CPUCFG_KVM_SIG: /* CPUCFG emulation between 0x40000000 -- 0x400000ff */ -- Gitee From f6d5de70627432f8cba29edd8ac51859b1dd3118 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Fri, 27 Dec 2024 18:57:42 +0800 Subject: [PATCH 03/14] anolis: driver/iommu: Fixed multiple vfio devices not working properly LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ Fixed 16 vfio devices cannot be pass-through to VMS. Signed-off-by: Xianglai Li --- drivers/iommu/loongarch_iommu.c | 45 ++++++++++++++++++--------------- drivers/iommu/loongarch_iommu.h | 2 +- 2 files changed, 25 insertions(+), 22 deletions(-) diff --git a/drivers/iommu/loongarch_iommu.c b/drivers/iommu/loongarch_iommu.c index 26ce45ea46c2..fc5013428eca 100644 --- a/drivers/iommu/loongarch_iommu.c +++ b/drivers/iommu/loongarch_iommu.c @@ -199,6 +199,22 @@ static void flush_iotlb_by_domain_id(struct loongarch_iommu *iommu, u16 domain_i iommu_write_regl(iommu, LA_IOMMU_VBTC, val); } +static void flush_iotlb(struct loongarch_iommu *iommu) +{ + u32 val; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Flush all tlb */ + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + val &= ~0x1f; + val |= 0x5; + iommu_write_regl(iommu, LA_IOMMU_VBTC, val); +} + static int flush_pgtable_is_busy(struct loongarch_iommu *iommu) { u32 val; @@ -336,22 +352,6 @@ static int update_dev_table(struct la_iommu_dev_data *dev_data, int flag) return 0; } -static void flush_iotlb(struct loongarch_iommu *iommu) -{ - u32 val; - - if (iommu == NULL) { - pr_err("%s iommu is NULL", __func__); - return; - } - - /* Flush all tlb */ - val = iommu_read_regl(iommu, LA_IOMMU_VBTC); - val &= ~0x1f; - val |= 0x5; - iommu_write_regl(iommu, LA_IOMMU_VBTC, val); -} - static int iommu_flush_iotlb(struct loongarch_iommu *iommu) { u32 retry = 0; @@ -443,8 +443,10 @@ static int domain_id_alloc(struct loongarch_iommu *iommu) if (id < MAX_DOMAIN_ID) __set_bit(id, iommu->domain_bitmap); spin_unlock(&iommu->domain_bitmap_lock); - if (id >= MAX_DOMAIN_ID) + if (id >= MAX_DOMAIN_ID) { + id = -1; pr_err("LA-IOMMU: Alloc domain id over max domain id\n"); + } return id; } @@ -595,8 +597,8 @@ static struct iommu_domain *la_iommu_domain_alloc(unsigned int type) struct dom_info *info; switch (type) { + case IOMMU_DOMAIN_BLOCKED: case IOMMU_DOMAIN_UNMANAGED: - case IOMMU_DOMAIN_IDENTITY: info = alloc_dom_info(); if (info == NULL) return NULL; @@ -833,11 +835,11 @@ static int la_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) struct iommu_info *info; unsigned short bdf; - if (domain->type == IOMMU_DOMAIN_IDENTITY) - domain = NULL; - la_iommu_detach_dev(dev); + if (domain != NULL && domain->type == IOMMU_DOMAIN_BLOCKED) + return 0; + if (domain == NULL) return 0; @@ -920,6 +922,7 @@ static void la_iommu_detach_dev(struct device *dev) spin_lock(&iommu_entry->devlock); do_detach(dev_data); spin_unlock(&iommu_entry->devlock); + dev_data->domain = NULL; pci_info(pdev, "%s iommu devid %x sigment %x\n", __func__, iommu->devid, iommu->segment); diff --git a/drivers/iommu/loongarch_iommu.h b/drivers/iommu/loongarch_iommu.h index cf5640d95900..a411d2b34d01 100644 --- a/drivers/iommu/loongarch_iommu.h +++ b/drivers/iommu/loongarch_iommu.h @@ -169,7 +169,7 @@ struct dom_entry { struct la_iommu_dev_data { struct list_head list; /* for iommu_entry->dev_list */ struct loongarch_iommu *iommu; - struct iommu_info *iommu_entry; + struct iommu_info *iommu_entry; struct iommu_domain *domain; struct device *dev; unsigned short bdf; -- Gitee From 60cbf8ea52f28e3267bb452df421e89d889a3277 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 2 Jan 2025 14:20:25 +0800 Subject: [PATCH 04/14] anolis: LoongArch: LSVZ: Clear LLBCTL if secondary mmu mapping is changed LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ Register LLBCTL is separated CSR register from host, host exception eret instruction will clear host LLBCTL CSR register, guest exception will clear guest LLBCTL CSR register. VCPU0 atomic64_fetch_add_unless VCPU1 atomic64_fetch_add_unless 0: ll.d %[p], %[c] beq %[p], %[u], 1f Here secondary mmu mapping is changed, hpa page is replaced with new page. And VCPU1 executed atomic instruction on new page. 0: ll.d %[p], %[c] beq %[p], %[u], 1f add.d %[rc], %[p], %[a] sc.d %[rc], %[c] add.d %[rc], %[p], %[a] sc.d %[rc], %[c] LLBCTL is on and it represents the memory is not modified, sc.d will modify the memory directly. Here clear guest LLBCTL_WCLLB register when mapping is the changed. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li --- arch/loongarch/kvm/main.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 1f50f6723739..5f0ddf239352 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -245,6 +245,21 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); vcpu->cpu = cpu; kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + /* + * LLBCTL_WCLLB is separated CSR register from host + * eret instruction in host mode clears host LLBCTL_WCLLB + * register, and clears guest register in guest mode + * + * When gpa --> hpa mapping is changed, guest does not know + * even if the content is changed with new address + * + * Here clear guest LLBCTL_WCLLB register when mapping is + * changed, else if mapping is changed when guest is executing + * LL/SC pair, LL loads old address, SC store new address + * successfully since LLBCTL_WCLLB is on, even if memory + * with new address is changed with other VCPUs. + */ + set_gcsr_llbctl(LOONGARCH_CSR_LLBCTL); } /* Restore GSTAT(0x50).vpid */ -- Gitee From aeaccc4e07abc44ca0dc44872bcf1f3d1a8d2b6d Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Wed, 5 Feb 2025 15:43:38 +0800 Subject: [PATCH 05/14] anolis: LoongArch: KVM: Add reset function for irqchip LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ We need to empty the data in irqchip when the VM restarts. Signed-off-by: Xianglai Li --- arch/loongarch/include/asm/kvm_ipi.h | 1 + arch/loongarch/include/asm/kvm_pch_pic.h | 3 ++- arch/loongarch/kvm/intc/ipi.c | 20 ++++++++++++++++++++ arch/loongarch/kvm/intc/pch_pic.c | 20 ++++++++++++++++++++ arch/loongarch/kvm/vcpu.c | 5 ++++- 5 files changed, 47 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_ipi.h b/arch/loongarch/include/asm/kvm_ipi.h index 729dfc1e3f40..513de58ba1de 100644 --- a/arch/loongarch/include/asm/kvm_ipi.h +++ b/arch/loongarch/include/asm/kvm_ipi.h @@ -49,4 +49,5 @@ struct ipi_state { #define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) int kvm_loongarch_register_ipi_device(void); +int kvm_loongarch_reset_ipi(struct kvm_vcpu *vcpu); #endif diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h index 91bd5a5ec575..f880698e2ed2 100644 --- a/arch/loongarch/include/asm/kvm_pch_pic.h +++ b/arch/loongarch/include/asm/kvm_pch_pic.h @@ -41,6 +41,7 @@ struct loongarch_pch_pic { spinlock_t lock; struct kvm *kvm; struct kvm_io_device device; + uint64_t pch_pic_base; uint64_t mask; /* 1:disable irq, 0:enable irq */ uint64_t htmsi_en; /* 1:msi */ uint64_t edge; /* 1:edge triggered, 0:level triggered */ @@ -52,10 +53,10 @@ struct loongarch_pch_pic { uint64_t polarity; /* 0: high level trigger, 1: low level trigger */ uint8_t route_entry[64]; /* default value 0, route to int0: extioi */ uint8_t htmsi_vector[64]; /* irq route table for routing to extioi */ - uint64_t pch_pic_base; }; void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level); void pch_msi_set_irq(struct kvm *kvm, int irq, int level); int kvm_loongarch_register_pch_pic_device(void); +int kvm_loongarch_reset_pch(struct kvm *kvm); #endif /* LOONGARCH_PCH_PIC_H */ diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index 12024d9fdd0b..2638f58bcf81 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -536,3 +536,23 @@ int kvm_loongarch_register_ipi_device(void) return kvm_register_device_ops(&kvm_loongarch_ipi_dev_ops, KVM_DEV_TYPE_LA_IPI); } + +int kvm_loongarch_reset_ipi(struct kvm_vcpu *vcpu) +{ + struct ipi_state *s = &vcpu->arch.ipi_state; + u8 offset, size; + u8 *pstart; + + if (!s) + return -EINVAL; + + pstart = (char *)&s->status; + offset = (char *)&s->status - (char *)s; + size = sizeof(struct ipi_state) - offset; + + spin_lock(&vcpu->arch.ipi_state.lock); + memset(pstart, 0, size); + spin_unlock(&vcpu->arch.ipi_state.lock); + + return 0; +} diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 7d053dbcd5c0..6726f718e16b 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -538,3 +538,23 @@ int kvm_loongarch_register_pch_pic_device(void) return kvm_register_device_ops(&kvm_loongarch_pch_pic_dev_ops, KVM_DEV_TYPE_LA_IOAPIC); } + +int kvm_loongarch_reset_pch(struct kvm *kvm) +{ + struct loongarch_pch_pic *s = kvm->arch.pch_pic; + u8 offset, size; + u8 *pstart; + + if (!s) + return -EINVAL; + + pstart = (char *)&s->mask; + offset = (char *)&s->mask - (char *)s; + size = sizeof(struct loongarch_pch_pic) - offset; + + spin_lock(&s->lock); + memset(pstart, 0, size); + spin_unlock(&s->lock); + + return 0; +} diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 8c5cadd9821d..2cbf74559145 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -870,8 +870,11 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, break; case KVM_REG_LOONGARCH_VCPU_RESET: vcpu->arch.st.guest_addr = 0; - if (vcpu->vcpu_id == 0) + if (vcpu->vcpu_id == 0) { kvm_loongarch_reset_extioi(vcpu->kvm); + kvm_loongarch_reset_pch(vcpu->kvm); + } + kvm_loongarch_reset_ipi(vcpu); memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); break; -- Gitee From ce9cb8920f4df347137e0644c4d7e1c55952207b Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Fri, 14 Feb 2025 08:46:29 +0800 Subject: [PATCH 06/14] anolis: LoongArch: KVM: Fixed VM migration failure after ptw was enabled LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ Before ptw is enabled, when a virtual machine writes data to a physical page, a page modification exception will be triggered. In the exception processing, the dirty position of pte is set, and the page dirty bitmap of kvm is set. During migration, the page dirty bitmap is used for dirty page migration. After ptw is enabled, when the virtual machine writes data to the physical page, the ptw hardware directly writes the dirty bit of the pte without triggering page modification exceptions. kvm cannot set page dirty bitmap correctly, resulting in partial data loss and migration failure. In order to solve this problem, we use the write bit and dirty bit of pte to mark whether the current page needs to be migrated, that is, the write bit and dirty bit of pte are cleared to zero at the beginning of the migration, so that even when ptw is enabled, the page modification exception will be triggered. In this way, the correct dirty page marking process is entered to complete the correct migration of memory, and the 50bit of pte is used to record the original write property in order to restore the correct write property of pte after the dirty page marking is completed. Signed-off-by: Xianglai Li --- arch/loongarch/include/asm/kvm_mmu.h | 15 ++++++++++++++- arch/loongarch/kvm/mmu.c | 17 ++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h index 099bafc6f797..153875b87b7e 100644 --- a/arch/loongarch/include/asm/kvm_mmu.h +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -18,6 +18,15 @@ #define _KVM_FLUSH_PGTABLE 0x1 #define _KVM_HAS_PGMASK 0x2 + +/* If the page entry has a write attribute, + * we use the page entry 50bit(KVM_RECORD_PAGE_WRITE_ABLE) + * to record it to restore the write attribute of the page entry, + * in the fast path kvm_map_page_fast for page table processing + */ +#define KVM_RECORD_PAGE_WRITE_ABLE_SHIFT 50 +#define KVM_RECORD_PAGE_WRITE_ABLE (_ULCAST_(1) << KVM_RECORD_PAGE_WRITE_ABLE_SHIFT) + #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) #define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT)) @@ -53,6 +62,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) } static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } +static inline int kvm_record_pte_write_able(kvm_pte_t pte) +{ + return pte & KVM_RECORD_PAGE_WRITE_ABLE; +} static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } @@ -74,7 +87,7 @@ static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) { - return pte & ~_PAGE_DIRTY; + return pte & (~(_PAGE_DIRTY | _PAGE_WRITE)); } static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index cb701649f56f..b4b1464fc4fb 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -538,6 +538,8 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) * _PAGE_DIRTY since gpa has already recorded as dirty page */ prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); + if (prot_bits & _PAGE_WRITE) + prot_bits |= KVM_RECORD_PAGE_WRITE_ABLE; kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); return true; @@ -602,6 +604,14 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ /* Track access to pages marked old */ new = kvm_pte_mkyoung(*ptep); + + /* We restore the write property of + * the page table entry according to + * KVM_RECORD_PAGE_WRITE_ABLE + */ + if (kvm_record_pte_write_able(new)) + new |= _PAGE_WRITE; + /* call kvm_set_pfn_accessed() after unlock */ if (write && !kvm_pte_dirty(new)) { @@ -903,7 +913,12 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) prot_bits |= _CACHE_SUC; if (writeable) { - prot_bits |= _PAGE_WRITE; + /* If the page entry has a write attribute, + * we use the page entry 50bit(KVM_RECORD_PAGE_WRITE_ABLE) + * to record it to restore the write attribute of the page entry, + * in the fast path kvm_map_page_fast for page table processing + */ + prot_bits |= _PAGE_WRITE | KVM_RECORD_PAGE_WRITE_ABLE; if (write) prot_bits |= __WRITEABLE; } -- Gitee From 646c17d997a7df082e71349d5aec87b5cfd012f0 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Thu, 20 Feb 2025 13:48:09 +0800 Subject: [PATCH 07/14] anolis: LoongArch: KVM: add virt extioi cpu encode support LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ The extioi controller can send interrupts to only four cpus and cannot send interrupts to other cpus. This patch enables extioi to send interrupts to a maximum of 256 cpus. Signed-off-by: Xianglai Li --- arch/loongarch/include/asm/kvm_extioi.h | 22 ++ arch/loongarch/include/uapi/asm/kvm.h | 11 + arch/loongarch/kvm/intc/extioi.c | 290 ++++++++++++++++++------ 3 files changed, 254 insertions(+), 69 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_extioi.h b/arch/loongarch/include/asm/kvm_extioi.h index c2bd295d0edc..053ca2df7056 100644 --- a/arch/loongarch/include/asm/kvm_extioi.h +++ b/arch/loongarch/include/asm/kvm_extioi.h @@ -34,6 +34,23 @@ #define EXTIOI_COREMAP_START 0x800 #define EXTIOI_COREMAP_END 0x8ff +#define EIOINTC_VIRT_BASE (0x40000000) +#define EIOINTC_VIRT_SIZE (0x1000) + +#define EIOINTC_VIRT_FEATURES (0x0) +#define EIOINTC_HAS_VIRT_EXTENSION (0) +#define EIOINTC_HAS_ENABLE_OPTION (1) +#define EIOINTC_HAS_INT_ENCODE (2) +#define EIOINTC_HAS_CPU_ENCODE (3) +#define EIOINTC_VIRT_HAS_FEATURES ((1U << EIOINTC_HAS_VIRT_EXTENSION) \ + | (1U << EIOINTC_HAS_ENABLE_OPTION) \ + | (1U << EIOINTC_HAS_INT_ENCODE) \ + | (1U << EIOINTC_HAS_CPU_ENCODE)) +#define EIOINTC_VIRT_CONFIG (0x4) +#define EIOINTC_ENABLE (1) +#define EIOINTC_ENABLE_INT_ENCODE (2) +#define EIOINTC_ENABLE_CPU_ENCODE (3) + #define LS3A_INTC_IP 8 #define EXTIOI_SW_COREMAP_FLAG (1 << 0) @@ -42,6 +59,11 @@ struct loongarch_extioi { spinlock_t lock; struct kvm *kvm; struct kvm_io_device device; + struct kvm_io_device device_vext; + uint32_t num_cpu; + uint32_t features; + uint32_t status; + /* hardware state */ union nodetype { u64 reg_u64[EXTIOI_IRQS_NODETYPE_COUNT / 4]; diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 13c1280662ae..fa461fcd0a25 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -141,6 +141,17 @@ struct kvm_iocsr_entry { #define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000006 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2 + +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000007 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3 + + #define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004 #define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c index 5327066f16ae..6dd90145a511 100644 --- a/arch/loongarch/kvm/intc/extioi.c +++ b/arch/loongarch/kvm/intc/extioi.c @@ -17,8 +17,10 @@ static void extioi_update_irq(struct loongarch_extioi *s, int irq, int level) struct kvm_vcpu *vcpu; ipnum = s->ipmap.reg_u8[irq / 32]; - ipnum = count_trailing_zeros(ipnum); - ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) { + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + } cpu = s->sw_coremap[irq]; vcpu = kvm_get_vcpu(s->kvm, cpu); @@ -53,8 +55,10 @@ static void extioi_set_sw_coreisr(struct loongarch_extioi *s) for (irq = 0; irq < EXTIOI_IRQS; irq++) { ipnum = s->ipmap.reg_u8[irq / 32]; - ipnum = count_trailing_zeros(ipnum); - ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) { + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + } irq_index = irq / 32; /* length of accessing core isr is 4 bytes */ irq_mask = 1 << (irq & 0x1f); @@ -99,6 +103,35 @@ static inline void extioi_enable_irq(struct kvm_vcpu *vcpu, struct loongarch_ext } } +static inline void extioi_update_sw_coremap(struct loongarch_extioi *s, + int irq, void *pvalue, u32 len, bool notify) +{ + int i, cpu; + u64 val = *(u64 *)pvalue; + + for (i = 0; i < len; i++) { + cpu = val & 0xff; + val = val >> 8; + + if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) { + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + } + + if (s->sw_coremap[irq + i] == cpu) + continue; + + if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) { + /* lower irq at old cpu and raise irq at new cpu */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else { + s->sw_coremap[irq + i] = cpu; + } + } +} + static int loongarch_extioi_writeb(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, gpa_t addr, int len, const void *val) @@ -167,24 +200,7 @@ static int loongarch_extioi_writeb(struct kvm_vcpu *vcpu, irq = offset - EXTIOI_COREMAP_START; index = irq; s->coremap.reg_u8[index] = data; - - cpu = data & 0xff; - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; - - if (s->sw_coremap[irq] == cpu) - break; - - if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq, 0); - s->sw_coremap[irq] = cpu; - extioi_update_irq(s, irq, 1); - } else - s->sw_coremap[irq] = cpu; - + extioi_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); break; default: ret = -EINVAL; @@ -270,28 +286,8 @@ static int loongarch_extioi_writew(struct kvm_vcpu *vcpu, case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: irq = offset - EXTIOI_COREMAP_START; index = irq >> 2; - s->coremap.reg_u32[index] = data; - - for (i = 0; i < sizeof(data); i++) { - cpu = data & 0xff; - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; - data = data >> 8; - - if (s->sw_coremap[irq + i] == cpu) - continue; - - if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq + i, 0); - s->sw_coremap[irq + i] = cpu; - extioi_update_irq(s, irq + i, 1); - } else - s->sw_coremap[irq + i] = cpu; - } + extioi_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); break; default: ret = -EINVAL; @@ -379,28 +375,8 @@ static int loongarch_extioi_writel(struct kvm_vcpu *vcpu, case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: irq = offset - EXTIOI_COREMAP_START; index = irq >> 3; - s->coremap.reg_u64[index] = data; - - for (i = 0; i < sizeof(data); i++) { - cpu = data & 0xff; - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; - data = data >> 8; - - if (s->sw_coremap[irq + i] == cpu) - continue; - - if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq + i, 0); - s->sw_coremap[irq + i] = cpu; - extioi_update_irq(s, irq + i, 1); - } else - s->sw_coremap[irq + i] = cpu; - } + extioi_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); break; default: ret = -EINVAL; @@ -692,24 +668,190 @@ static int kvm_loongarch_extioi_regs_access(struct kvm_device *dev, return 0; } +static int kvm_extioi_ctrl_access(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret = 0; + unsigned long flags; + unsigned long type = (unsigned long)attr->attr; + u32 i, start_irq; + void __user *data; + struct loongarch_extioi *s = dev->kvm->arch.extioi; + + data = (void __user *)attr->addr; + spin_lock_irqsave(&s->lock, flags); + switch (type) { + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: + if (copy_from_user(&s->num_cpu, data, 4)) + ret = -EFAULT; + break; + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: + if (copy_from_user(&s->features, data, 4)) + ret = -EFAULT; + if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION))) + s->status |= BIT(EIOINTC_ENABLE); + break; + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED: + extioi_set_sw_coreisr(s); + for (i = 0; i < (EXTIOI_IRQS / 4); i++) { + start_irq = i * 4; + extioi_update_sw_coremap(s, start_irq, + (void *)&s->coremap.reg_u32[i], sizeof(u32), false); + } + break; + default: + break; + } + spin_unlock_irqrestore(&s->lock, flags); + + return ret; +} + +static int kvm_extioi_sw_status_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int addr, ret = 0; + unsigned long flags; + void *p = NULL; + void __user *data; + struct loongarch_extioi *s; + + s = dev->kvm->arch.extioi; + addr = attr->attr; + addr &= 0xffff; + + data = (void __user *)attr->addr; + switch (addr) { + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: + p = &s->num_cpu; + break; + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE: + p = &s->features; + break; + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE: + p = &s->status; + break; + default: + kvm_err("%s: unknown extioi register, addr = %d\n", __func__, addr); + return -EINVAL; + } + spin_lock_irqsave(&s->lock, flags); + if (is_write) { + if (copy_from_user(p, data, 4)) + ret = -EFAULT; + } else { + if (copy_to_user(data, p, 4)) + ret = -EFAULT; + } + spin_unlock_irqrestore(&s->lock, flags); + + return ret; +} + static int kvm_loongarch_extioi_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + switch (attr->group) { + case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: return kvm_loongarch_extioi_regs_access(dev, attr, false); - - return -EINVAL; + case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: + return kvm_extioi_sw_status_access(dev, attr, false); + default: + return -EINVAL; + } } static int kvm_loongarch_extioi_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + switch (attr->group) { + + case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL: + return kvm_extioi_ctrl_access(dev, attr); + case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: return kvm_loongarch_extioi_regs_access(dev, attr, true); + case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: + return kvm_extioi_sw_status_access(dev, attr, true); + } + return -EINVAL; } +static int kvm_extioi_virt_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + unsigned long flags; + u32 *data = val; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr -= EIOINTC_VIRT_BASE; + spin_lock_irqsave(&extioi->lock, flags); + switch (addr) { + case EIOINTC_VIRT_FEATURES: + *data = extioi->features; + break; + case EIOINTC_VIRT_CONFIG: + *data = extioi->status; + break; + default: + break; + } + spin_unlock_irqrestore(&extioi->lock, flags); + + return 0; +} + +static int kvm_extioi_virt_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret = 0; + unsigned long flags; + u32 value = *(u32 *)val; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr -= EIOINTC_VIRT_BASE; + spin_lock_irqsave(&extioi->lock, flags); + switch (addr) { + case EIOINTC_VIRT_FEATURES: + ret = -EPERM; + break; + case EIOINTC_VIRT_CONFIG: + /* + * extioi features can only be set at disabled status + */ + if ((extioi->status & BIT(EIOINTC_ENABLE)) && value) { + ret = -EPERM; + break; + } + extioi->status = value & extioi->features; + break; + default: + break; + } + spin_unlock_irqrestore(&extioi->lock, flags); + + return ret; +} + +static const struct kvm_io_device_ops kvm_extioi_virt_ops = { + .read = kvm_extioi_virt_read, + .write = kvm_extioi_virt_write, +}; + static void kvm_loongarch_extioi_destroy(struct kvm_device *dev) { struct kvm *kvm; @@ -729,6 +871,7 @@ static void kvm_loongarch_extioi_destroy(struct kvm_device *dev) device = &extioi->device; kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &extioi->device_vext); kfree(extioi); } @@ -736,7 +879,7 @@ static int kvm_loongarch_extioi_create(struct kvm_device *dev, u32 type) { int ret; struct loongarch_extioi *s; - struct kvm_io_device *device; + struct kvm_io_device *device, *device1; struct kvm *kvm = dev->kvm; /* extioi has been created */ @@ -762,6 +905,15 @@ static int kvm_loongarch_extioi_create(struct kvm_device *dev, u32 type) return -EFAULT; } + device1 = &s->device_vext; + kvm_iodevice_init(device1, &kvm_extioi_virt_ops); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, + EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1); + if (ret < 0) { + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device); + kfree(s); + return ret; + } kvm->arch.extioi = s; kvm_info("create extioi device successfully\n"); -- Gitee From 4ae8b227da1a87c461c5c9c149d42a44239128b2 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 8 Mar 2025 17:17:51 +0800 Subject: [PATCH 08/14] LoongArch: KVM: Reload guest CSR registers after S4 LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ On host HW guest CSR registers are lost after suspend and resume operation. Since last_vcpu of boot CPU still records latest vCPU pointer so that guest CSR register skips to reload when boot CPU resumes and vCPU is scheduled. Here last_vcpu is cleared so that guest CSR registers will reload from scheduled vCPU context after suspend and resume. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li --- arch/loongarch/kvm/main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 5f0ddf239352..ef2bad612c00 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -314,6 +314,12 @@ int kvm_arch_hardware_enable(void) kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + /* + * HW Guest CSR registers are lost after CPU suspend and resume. + * Clear last_vcpu so that Guest CSR registers forced to reload + * from vCPU SW state + */ + this_cpu_ptr(vmcs)->last_vcpu = NULL; return 0; } -- Gitee From 3994d60437dd8e72e6cef2947a41139378dd01e5 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 8 Mar 2025 17:23:07 +0800 Subject: [PATCH 09/14] LoongArch: KVM: Add interrupt checking with Loongson AVEC LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ There is newly added macro INT_AVEC with CSR ESTAT register, which is bit 14 used for Loongson AVEC support. AVEC interrupt status bit 14 is supported with macro CSR_ESTAT_IS, here replace hardcoded value 0x1fff with macro CSR_ESTAT_IS so that AVEC interrupt status is supported by KVM also. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li --- arch/loongarch/kvm/vcpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 2cbf74559145..f8f5e1b130a1 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -309,7 +309,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { int ret = RESUME_GUEST; unsigned long estat = vcpu->arch.host_estat; - u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 intr = estat & CSR_ESTAT_IS; u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; vcpu->mode = OUTSIDE_GUEST_MODE; -- Gitee From 515312b6b010005f7d5969e1985778aeab4d5950 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Tue, 18 Mar 2025 09:57:27 +0800 Subject: [PATCH 10/14] LoongArch:config: enable pci host controller fdt driver LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ When the virtual machine only uses the -kernel parameter to boot the kernel but does not carry the -bios parameter, the device information of the startup mainly relies on the fdt to pass to the kernel instead of the acpi, so we need to enable the pci host controller fdt driver to solve the following call stack problem: stack: [<9000000001155e78>] i8042_flush+0x88/0x1b8 [<90000000015c1248>] i8042_init+0x1c4/0x28c [<9000000000220ccc>] do_one_initcall+0x6c/0x2a0 [<9000000001581054>] do_initcalls+0x114/0x164 [<900000000158128c>] kernel_init_freeable+0x174/0x1b0 [<9000000001565254>] kernel_init+0x24/0x120 [<9000000000221fa4>] ret_from_kernel_thread+0x8/0xa4 Signed-off-by: Xianglai Li --- arch/loongarch/configs/loongson3_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 980f5f2c99a1..c31a243625f8 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -2206,3 +2206,4 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_FUNCTION_TRACER=y # CONFIG_STRICT_DEVMEM is not set # CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_PCI_HOST_GENERIC=y -- Gitee From c614e78048ab283a5b6af2bee84ddb5530f784df Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Tue, 25 Mar 2025 09:02:02 +0800 Subject: [PATCH 11/14] LoongArch: KVM: Repair the restart pressure test virtual machine hang dead problem LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ After the virtual restart, exintc does not correctly clear the estat register and qemu does not clear the estat register. Estat data sent from qemu is merged instead of overwritten on kvm. In order to solve this problem, the following changes were made to this patch: 1.exintc correctly clears zeros when the VM restarts 2. estat on the qemu side clears ESTAT correctly when the VM restarts 3. When the estat register data on the qemu side is sent to the kvm, the data needs to be overwritten instead of merged Signed-off-by: Xianglai Li --- arch/loongarch/kvm/intc/extioi.c | 2 +- arch/loongarch/kvm/intc/ipi.c | 2 +- arch/loongarch/kvm/intc/pch_pic.c | 2 +- arch/loongarch/kvm/vcpu.c | 7 +++++++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c index 6dd90145a511..801ac861c876 100644 --- a/arch/loongarch/kvm/intc/extioi.c +++ b/arch/loongarch/kvm/intc/extioi.c @@ -938,7 +938,7 @@ int kvm_loongarch_reset_extioi(struct kvm *kvm) { struct loongarch_extioi *extioi = kvm->arch.extioi; unsigned long flags; - u8 offset, size; + unsigned long offset, size; u8 *pstart; if (!extioi) diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index 2638f58bcf81..4459ec42eb40 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -540,7 +540,7 @@ int kvm_loongarch_register_ipi_device(void) int kvm_loongarch_reset_ipi(struct kvm_vcpu *vcpu) { struct ipi_state *s = &vcpu->arch.ipi_state; - u8 offset, size; + unsigned long offset, size; u8 *pstart; if (!s) diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 6726f718e16b..9f287b419910 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -542,7 +542,7 @@ int kvm_loongarch_register_pch_pic_device(void) int kvm_loongarch_reset_pch(struct kvm *kvm) { struct loongarch_pch_pic *s = kvm->arch.pch_pic; - u8 offset, size; + unsigned long offset, size; u8 *pstart; if (!s) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index f8f5e1b130a1..aaed344ec8d2 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -874,7 +874,14 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, kvm_loongarch_reset_extioi(vcpu->kvm); kvm_loongarch_reset_pch(vcpu->kvm); } + kvm_loongarch_reset_ipi(vcpu); + /* + * When the vcpu resets, clear the ESTAT and GINTC registers, + * and clear other CSR registers through the _kvm_set_csr register. + */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); break; -- Gitee From 01ee8cea2373220ce33d1db27b161f59c953996b Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Fri, 21 Mar 2025 11:05:05 +0800 Subject: [PATCH 12/14] LoongArch: Revert "LoongArch: Fix cpu hotplug issue" LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ Upstream kernel community has to solve this problem. This patch also causes other problems. When cpus on node0 are started on the physical machine but cpus on node1 are not started on the physical machine, devices on node1 fail to alloc memory space. In this case, roll back this patch, Then apply the upstream scheme. Signed-off-by: Xianglai Li --- arch/loongarch/include/asm/smp.h | 3 -- arch/loongarch/kernel/acpi.c | 24 ++++++---------- arch/loongarch/kernel/setup.c | 47 -------------------------------- arch/loongarch/kernel/smp.c | 9 +++--- 4 files changed, 13 insertions(+), 70 deletions(-) diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index cc232901e4dd..630e5ebec21c 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -102,7 +102,4 @@ static inline void __cpu_die(unsigned int cpu) } #endif -int topo_add_cpu(int physid); -int topo_get_cpu(int physid); - #endif /* __ASM_SMP_H */ diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 17dc28821a9d..58819b017ba8 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -71,10 +71,10 @@ int set_processor_mask(u32 id, u32 flags) return -ENODEV; } - - cpu = topo_add_cpu(cpuid); - if (cpu < 0) - return -EEXIST; + if (cpuid == loongson_sysconf.boot_cpu_id) + cpu = 0; + else + cpu = cpumask_next_zero(-1, cpu_present_mask); if (flags & ACPI_MADT_ENABLED) { num_processors++; @@ -197,6 +197,8 @@ void __init acpi_boot_table_init(void) goto fdt_earlycon; } + loongson_sysconf.boot_cpu_id = read_csr_cpuid(); + /* * Process the Multiple APIC Description Table (MADT), if present */ @@ -246,7 +248,7 @@ void __init numa_set_distance(int from, int to, int distance) void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { - int pxm, node, cpu; + int pxm, node; if (srat_disabled()) return; @@ -275,11 +277,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) return; } - cpu = topo_get_cpu(pa->apic_id); - /* Check whether apic_id exists in MADT table */ - if (cpu < 0) - return; - early_numa_add_cpu(pa->apic_id, node); set_cpuid_to_node(pa->apic_id, node); @@ -318,17 +315,12 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu { int cpu; - cpu = topo_get_cpu(physid); - /* Check whether apic_id exists in MADT table */ + cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); if (cpu < 0) { pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); return cpu; } - num_processors++; - set_cpu_present(cpu, true); - __cpu_number_map[physid] = cpu; - __cpu_logical_map[cpu] = physid; acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 68b81254dd9a..77b79825d44a 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -72,8 +72,6 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; -static int possible_cpus; -static bool bsp_added; /* * Setup information @@ -376,55 +374,10 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } -int topo_get_cpu(int physid) -{ - int i; - - for (i = 0; i < possible_cpus; i++) - if (cpu_logical_map(i) == physid) - break; - - if (i == possible_cpus) - return -ENOENT; - - return i; -} - -int topo_add_cpu(int physid) -{ - int cpu; - - if (!bsp_added && (physid == loongson_sysconf.boot_cpu_id)) { - bsp_added = true; - return 0; - } - - cpu = topo_get_cpu(physid); - if (cpu >= 0) { - pr_warn("Adding duplicated physical cpuid 0x%x\n", physid); - return -EEXIST; - } - - if (possible_cpus >= nr_cpu_ids) - return -ERANGE; - - __cpu_logical_map[possible_cpus] = physid; - cpu = possible_cpus++; - return cpu; -} - -static void __init topo_init(void) -{ - loongson_sysconf.boot_cpu_id = read_csr_cpuid(); - __cpu_logical_map[0] = loongson_sysconf.boot_cpu_id; - possible_cpus++; -} - void __init platform_init(void) { arch_reserve_vmcore(); arch_parse_crashkernel(); - topo_init(); #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 5d02bf5126b7..7f693a24687d 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -291,9 +291,10 @@ static void __init fdt_smp_setup(void) if (cpuid >= nr_cpu_ids) continue; - cpu = topo_add_cpu(cpuid); - if (cpu < 0) - continue; + if (cpuid == loongson_sysconf.boot_cpu_id) + cpu = 0; + else + cpu = cpumask_next_zero(-1, cpu_present_mask); num_processors++; set_cpu_possible(cpu, true); @@ -301,7 +302,7 @@ static void __init fdt_smp_setup(void) __cpu_number_map[cpuid] = cpu; __cpu_logical_map[cpu] = cpuid; - early_numa_add_cpu(cpuid, 0); + early_numa_add_cpu(cpu, 0); set_cpuid_to_node(cpuid, 0); } -- Gitee From 8408e7da4a23a4b300f9f84683a0e1087a828656 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Tue, 1 Apr 2025 16:40:38 +0800 Subject: [PATCH 13/14] driver/iommu: Set iommu driver buildin kernel LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ Set the CONFIG_LOONGARCH_IOMMU configuration item to y. Signed-off-by: Xianglai Li --- arch/loongarch/configs/loongson3_defconfig | 2 +- drivers/iommu/loongarch_iommu.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index c31a243625f8..dc13d6fd004a 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -1916,7 +1916,7 @@ CONFIG_COMEDI_NI_PCIMIO=m CONFIG_STAGING=y CONFIG_COMMON_CLK_LOONGSON2=y CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y -CONFIG_LOONGARCH_IOMMU=m +CONFIG_LOONGARCH_IOMMU=y CONFIG_LOONGSON2_GUTS=y CONFIG_LOONGSON2_PM=y CONFIG_PM_DEVFREQ=y diff --git a/drivers/iommu/loongarch_iommu.c b/drivers/iommu/loongarch_iommu.c index fc5013428eca..3ad54cf0f3c2 100644 --- a/drivers/iommu/loongarch_iommu.c +++ b/drivers/iommu/loongarch_iommu.c @@ -109,7 +109,7 @@ u16 la_iommu_last_bdf; /* largest PCI device id * we have to handle */ -int loongarch_iommu_disable; +int loongarch_iommu_disable = 1; #define iommu_write_regl(iommu, off, val) \ writel(val, iommu->confbase + off) -- Gitee From 30fe06c94a7d7103ac1dee2691db8ebf2052768a Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Wed, 2 Apr 2025 11:19:36 +0800 Subject: [PATCH 14/14] drivers/pci: Enable pci bridge acs capability LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBE9VJ ------------------------------------------ To refine the granularity of the iommu group, now turn on the acs capability of all Bridges, loongarch architecture does not support pci device end-to-end access, we can assume that all pci Bridges have acs capability. Signed-off-by: Xianglai Li --- drivers/pci/quirks.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 708e86c4fff6..a46123dcbe5a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4514,8 +4514,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, quirk_bridge_cavm_thrx2_pcie_root); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, quirk_bridge_cavm_thrx2_pcie_root); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, 0x3c09, - quirk_bridge_cavm_thrx2_pcie_root); /* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) @@ -5085,6 +5083,18 @@ static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) return false; } +static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * Loongson PCIe Root Ports don't advertise an ACS capability, but + * they do not allow peer-to-peer transactions between Root Ports. + * Allow each Root Port to be in a separate IOMMU group by masking + * SV/RR/CR/UF bits. + */ + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); +} + static const struct pci_dev_acs_enabled { u16 vendor; u16 device; @@ -5204,6 +5214,17 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, + /* Loongson PCIe Root Ports */ + { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, /* Zhaoxin multi-function devices */ @@ -5268,8 +5289,6 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_MUCSE, 0x1c61, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_MUCSE, 0x1083, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_MUCSE, 0x1c83, pci_quirk_mf_endpoint_acs }, - { PCI_VENDOR_ID_LOONGSON, 0x3c09, pci_quirk_xgene_acs}, - { PCI_VENDOR_ID_LOONGSON, 0x3c19, pci_quirk_xgene_acs}, { 0 } }; -- Gitee