diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 980f5f2c99a14daa01d3228c1202a3fc822fea66..dc13d6fd004a3a3144f60df53a240e023f544708 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -1916,7 +1916,7 @@ CONFIG_COMEDI_NI_PCIMIO=m CONFIG_STAGING=y CONFIG_COMMON_CLK_LOONGSON2=y CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y -CONFIG_LOONGARCH_IOMMU=m +CONFIG_LOONGARCH_IOMMU=y CONFIG_LOONGSON2_GUTS=y CONFIG_LOONGSON2_PM=y CONFIG_PM_DEVFREQ=y @@ -2206,3 +2206,4 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_FUNCTION_TRACER=y # CONFIG_STRICT_DEVMEM is not set # CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_PCI_HOST_GENERIC=y diff --git a/arch/loongarch/include/asm/kvm_extioi.h b/arch/loongarch/include/asm/kvm_extioi.h index c2bd295d0edcb6d0b750ba89e55fef2a4d6a1966..053ca2df70567b864083c86bcd277fb3747de496 100644 --- a/arch/loongarch/include/asm/kvm_extioi.h +++ b/arch/loongarch/include/asm/kvm_extioi.h @@ -34,6 +34,23 @@ #define EXTIOI_COREMAP_START 0x800 #define EXTIOI_COREMAP_END 0x8ff +#define EIOINTC_VIRT_BASE (0x40000000) +#define EIOINTC_VIRT_SIZE (0x1000) + +#define EIOINTC_VIRT_FEATURES (0x0) +#define EIOINTC_HAS_VIRT_EXTENSION (0) +#define EIOINTC_HAS_ENABLE_OPTION (1) +#define EIOINTC_HAS_INT_ENCODE (2) +#define EIOINTC_HAS_CPU_ENCODE (3) +#define EIOINTC_VIRT_HAS_FEATURES ((1U << EIOINTC_HAS_VIRT_EXTENSION) \ + | (1U << EIOINTC_HAS_ENABLE_OPTION) \ + | (1U << EIOINTC_HAS_INT_ENCODE) \ + | (1U << EIOINTC_HAS_CPU_ENCODE)) +#define EIOINTC_VIRT_CONFIG (0x4) +#define EIOINTC_ENABLE (1) +#define EIOINTC_ENABLE_INT_ENCODE (2) +#define EIOINTC_ENABLE_CPU_ENCODE (3) + #define LS3A_INTC_IP 8 #define EXTIOI_SW_COREMAP_FLAG (1 << 0) @@ -42,6 +59,11 @@ struct loongarch_extioi { spinlock_t lock; struct kvm *kvm; struct kvm_io_device device; + struct kvm_io_device device_vext; + uint32_t num_cpu; + uint32_t features; + uint32_t status; + /* hardware state */ union nodetype { u64 reg_u64[EXTIOI_IRQS_NODETYPE_COUNT / 4]; diff --git a/arch/loongarch/include/asm/kvm_ipi.h b/arch/loongarch/include/asm/kvm_ipi.h index 729dfc1e3f401758601a9bf8673f9ee51f8156b4..513de58ba1dedff813fcaaf7caa66f55cede2041 100644 --- a/arch/loongarch/include/asm/kvm_ipi.h +++ b/arch/loongarch/include/asm/kvm_ipi.h @@ -49,4 +49,5 @@ struct ipi_state { #define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) int kvm_loongarch_register_ipi_device(void); +int kvm_loongarch_reset_ipi(struct kvm_vcpu *vcpu); #endif diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h index 099bafc6f797c960adf971147150ce5e9a580407..153875b87b7e918e6c6212fb4426669f333e72da 100644 --- a/arch/loongarch/include/asm/kvm_mmu.h +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -18,6 +18,15 @@ #define _KVM_FLUSH_PGTABLE 0x1 #define _KVM_HAS_PGMASK 0x2 + +/* If the page entry has a write attribute, + * we use the page entry 50bit(KVM_RECORD_PAGE_WRITE_ABLE) + * to record it to restore the write attribute of the page entry, + * in the fast path kvm_map_page_fast for page table processing + */ +#define KVM_RECORD_PAGE_WRITE_ABLE_SHIFT 50 +#define KVM_RECORD_PAGE_WRITE_ABLE (_ULCAST_(1) << KVM_RECORD_PAGE_WRITE_ABLE_SHIFT) + #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) #define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT)) @@ -53,6 +62,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) } static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } +static inline int kvm_record_pte_write_able(kvm_pte_t pte) +{ + return pte & KVM_RECORD_PAGE_WRITE_ABLE; +} static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } @@ -74,7 +87,7 @@ static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) { - return pte & ~_PAGE_DIRTY; + return pte & (~(_PAGE_DIRTY | _PAGE_WRITE)); } static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h index 91bd5a5ec575d23662df8e85a28ce44024841db7..f880698e2ed2cb1feded3d8f86ace46da544e0ae 100644 --- a/arch/loongarch/include/asm/kvm_pch_pic.h +++ b/arch/loongarch/include/asm/kvm_pch_pic.h @@ -41,6 +41,7 @@ struct loongarch_pch_pic { spinlock_t lock; struct kvm *kvm; struct kvm_io_device device; + uint64_t pch_pic_base; uint64_t mask; /* 1:disable irq, 0:enable irq */ uint64_t htmsi_en; /* 1:msi */ uint64_t edge; /* 1:edge triggered, 0:level triggered */ @@ -52,10 +53,10 @@ struct loongarch_pch_pic { uint64_t polarity; /* 0: high level trigger, 1: low level trigger */ uint8_t route_entry[64]; /* default value 0, route to int0: extioi */ uint8_t htmsi_vector[64]; /* irq route table for routing to extioi */ - uint64_t pch_pic_base; }; void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level); void pch_msi_set_irq(struct kvm *kvm, int irq, int level); int kvm_loongarch_register_pch_pic_device(void); +int kvm_loongarch_reset_pch(struct kvm *kvm); #endif /* LOONGARCH_PCH_PIC_H */ diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index cc232901e4dd2373f97eb38b3955936919526d2f..630e5ebec21cb3700dc1d22b003111ef69906daa 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -102,7 +102,4 @@ static inline void __cpu_die(unsigned int cpu) } #endif -int topo_add_cpu(int physid); -int topo_get_cpu(int physid); - #endif /* __ASM_SMP_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 13c1280662ae45b5cbf7f14ee55c7121e0ddf339..fa461fcd0a25d8945a717fcb078c7e0e8cc46da6 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -141,6 +141,17 @@ struct kvm_iocsr_entry { #define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000006 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2 + +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000007 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3 + + #define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004 #define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 17dc28821a9ddb4cd8116c165100f74d4e025e35..58819b017ba83e545dfc7983430f5e39f41f2fb9 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -71,10 +71,10 @@ int set_processor_mask(u32 id, u32 flags) return -ENODEV; } - - cpu = topo_add_cpu(cpuid); - if (cpu < 0) - return -EEXIST; + if (cpuid == loongson_sysconf.boot_cpu_id) + cpu = 0; + else + cpu = cpumask_next_zero(-1, cpu_present_mask); if (flags & ACPI_MADT_ENABLED) { num_processors++; @@ -197,6 +197,8 @@ void __init acpi_boot_table_init(void) goto fdt_earlycon; } + loongson_sysconf.boot_cpu_id = read_csr_cpuid(); + /* * Process the Multiple APIC Description Table (MADT), if present */ @@ -246,7 +248,7 @@ void __init numa_set_distance(int from, int to, int distance) void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { - int pxm, node, cpu; + int pxm, node; if (srat_disabled()) return; @@ -275,11 +277,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) return; } - cpu = topo_get_cpu(pa->apic_id); - /* Check whether apic_id exists in MADT table */ - if (cpu < 0) - return; - early_numa_add_cpu(pa->apic_id, node); set_cpuid_to_node(pa->apic_id, node); @@ -318,17 +315,12 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu { int cpu; - cpu = topo_get_cpu(physid); - /* Check whether apic_id exists in MADT table */ + cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); if (cpu < 0) { pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); return cpu; } - num_processors++; - set_cpu_present(cpu, true); - __cpu_number_map[physid] = cpu; - __cpu_logical_map[cpu] = physid; acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index cce00d898c8c40d352b805b2b96c27e95a07ab2c..72798ac59c4ea0df85509cf07bde131933975847 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -4,9 +4,9 @@ #include #include #include -#include #include #include +#include static int has_steal_clock; struct static_key paravirt_steal_enabled; @@ -21,85 +21,6 @@ static u64 native_steal_clock(int cpu) DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); -static bool steal_acc = true; -static int __init parse_no_stealacc(char *arg) -{ - steal_acc = false; - return 0; -} -early_param("no-steal-acc", parse_no_stealacc); - -static u64 para_steal_clock(int cpu) -{ - u64 steal; - struct kvm_steal_time *src; - int version; - - src = &per_cpu(steal_time, cpu); - do { - - version = src->version; - /* Make sure that the version is read before the steal */ - virt_rmb(); - steal = src->steal; - /* Make sure that the steal is read before the next version */ - virt_rmb(); - - } while ((version & 1) || (version != src->version)); - return steal; -} - -static int pv_register_steal_time(void) -{ - int cpu = smp_processor_id(); - struct kvm_steal_time *st; - unsigned long addr; - - if (!has_steal_clock) - return -EPERM; - - st = &per_cpu(steal_time, cpu); - addr = per_cpu_ptr_to_phys(st); - - /* The whole structure kvm_steal_time should be one page */ - if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { - pr_warn("Illegal PV steal time addr %lx\n", addr); - return -EFAULT; - } - - addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); - return 0; -} - -static bool steal_acc = true; - -static int __init parse_no_stealacc(char *arg) -{ - steal_acc = false; - return 0; -} -early_param("no-steal-acc", parse_no_stealacc); - -static u64 paravt_steal_clock(int cpu) -{ - int version; - u64 steal; - struct kvm_steal_time *src; - - src = &per_cpu(steal_time, cpu); - do { - - version = src->version; - virt_rmb(); /* Make sure that the version is read before the steal */ - steal = src->steal; - virt_rmb(); /* Make sure that the steal is read before the next version */ - - } while ((version & 1) || (version != src->version)); - - return steal; -} - static bool steal_acc = true; static int __init parse_no_stealacc(char *arg) @@ -133,7 +54,7 @@ static struct smp_ops native_ops; static void pv_send_ipi_single(int cpu, unsigned int action) { - unsigned int min, old; + int min, old; irq_cpustat_t *info = &per_cpu(irq_stat, cpu); if (unlikely(action == ACTION_BOOT_CPU)) { @@ -146,13 +67,14 @@ static void pv_send_ipi_single(int cpu, unsigned int action) return; min = cpu_logical_map(cpu); - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, 1, 0, min); + kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min); } -#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) +#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) + static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) { - unsigned int cpu, i, min = 0, max = 0, old; + int i, cpu, min = 0, max = 0, old; __uint128_t bitmap = 0; irq_cpustat_t *info; @@ -174,20 +96,20 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) cpu = cpu_logical_map(i); if (!bitmap) { min = max = cpu; - } else if (cpu > min && cpu < min + KVM_IPI_CLUSTER_SIZE) { - max = cpu > max ? cpu : max; - } else if (cpu < min && (max - cpu) < KVM_IPI_CLUSTER_SIZE) { + } else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) { + /* cpu < min, and bitmap still enough */ bitmap <<= min - cpu; min = cpu; + } else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) { + /* cpu > min, and bitmap still enough */ + max = cpu > max ? cpu : max; } else { /* - * Physical cpuid is sorted in ascending order ascend - * for the next mask calculation, send IPI here - * directly and skip the remainding cpus + * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE, + * send IPI here directly and skip the remaining CPUs. */ - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, - (unsigned long)bitmap, - (unsigned long)(bitmap >> BITS_PER_LONG), min); + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); min = max = cpu; bitmap = 0; } @@ -195,78 +117,53 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) } if (bitmap) - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, (unsigned long)bitmap, + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, (unsigned long)(bitmap >> BITS_PER_LONG), min); } -static irqreturn_t loongson_do_swi(int irq, void *dev) +static irqreturn_t pv_ipi_interrupt(int irq, void *dev) { + u32 action; irq_cpustat_t *info; - long action; - /* Clear swi interrupt */ + /* Clear SWI interrupt */ clear_csr_estat(1 << INT_SWI0); info = this_cpu_ptr(&irq_stat); action = atomic_xchg(&info->message, 0); - if (action & SMP_CALL_FUNCTION) { - generic_smp_call_function_interrupt(); - info->ipi_irqs[IPI_CALL_FUNCTION]++; - } if (action & SMP_RESCHEDULE) { scheduler_ipi(); info->ipi_irqs[IPI_RESCHEDULE]++; } + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + info->ipi_irqs[IPI_CALL_FUNCTION]++; + } + return IRQ_HANDLED; } static void pv_init_ipi(void) { - int r, swi0; + int r, swi; /* Init native ipi irq for ACTION_BOOT_CPU */ native_ops.init_ipi(); - swi0 = get_percpu_irq(INT_SWI0); - if (swi0 < 0) + swi = get_percpu_irq(INT_SWI0); + if (swi < 0) panic("SWI0 IRQ mapping failed\n"); - irq_set_percpu_devid(swi0); - r = request_percpu_irq(swi0, loongson_do_swi, "SWI0", &irq_stat); + irq_set_percpu_devid(swi); + r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat); if (r < 0) panic("SWI0 IRQ request failed\n"); } - -static void pv_disable_steal_time(void) -{ - if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); -} - -static int pv_cpu_online(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_register_steal_time(); - local_irq_restore(flags); - return 0; -} - -static int pv_cpu_down_prepare(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_disable_steal_time(); - local_irq_restore(flags); - return 0; -} #endif bool kvm_para_available(void) { - static int hypervisor_type; int config; + static int hypervisor_type; if (!cpu_has_hypervisor) return false; @@ -299,7 +196,7 @@ int __init pv_ipi_init(void) return 0; #ifdef CONFIG_SMP - native_ops = mp_ops; + native_ops = smp_ops; smp_ops.init_ipi = pv_init_ipi; smp_ops.send_ipi_single = pv_send_ipi_single; smp_ops.send_ipi_mask = pv_send_ipi_mask; @@ -308,56 +205,6 @@ int __init pv_ipi_init(void) return 0; } -static void pv_cpu_reboot(void *unused) -{ - pv_disable_steal_time(); -} - -static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, - void *unused) -{ - on_each_cpu(pv_cpu_reboot, NULL, 1); - return NOTIFY_DONE; -} - -static struct notifier_block pv_reboot_nb = { - .notifier_call = pv_reboot_notify, -}; - -int __init pv_time_init(void) -{ - int feature; - - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) - return 0; - - has_steal_clock = 1; - if (pv_register_steal_time()) { - has_steal_clock = 0; - return 0; - } - - register_reboot_notifier(&pv_reboot_nb); - static_call_update(pv_steal_clock, para_steal_clock); - static_key_slow_inc(¶virt_steal_enabled); - if (steal_acc) - static_key_slow_inc(¶virt_steal_rq_enabled); - -#ifdef CONFIG_SMP - if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "loongarch/pv:online", - pv_cpu_online, pv_cpu_down_prepare) < 0) - pr_err("Failed to install cpu hotplug callbacks\n"); -#endif - pr_info("Using stolen time PV\n"); - return 0; -} - static int pv_enable_steal_time(void) { int cpu = smp_processor_id(); @@ -466,120 +313,6 @@ int __init pv_time_init(void) return 0; } -static int pv_enable_steal_time(void) -{ - int cpu = smp_processor_id(); - unsigned long addr; - struct kvm_steal_time *st; - - if (!has_steal_clock) - return -EPERM; - - st = &per_cpu(steal_time, cpu); - addr = per_cpu_ptr_to_phys(st); - - /* The whole structure kvm_steal_time should be in one page */ - if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { - pr_warn("Illegal PV steal time addr %lx\n", addr); - return -EFAULT; - } - - addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); - - return 0; -} - -static void pv_disable_steal_time(void) -{ - if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); -} - -#ifdef CONFIG_SMP -static int pv_time_cpu_online(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_enable_steal_time(); - local_irq_restore(flags); - - return 0; -} - -static int pv_time_cpu_down_prepare(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_disable_steal_time(); - local_irq_restore(flags); - - return 0; -} -#endif - -static void pv_cpu_reboot(void *unused) -{ - pv_disable_steal_time(); -} - -static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) -{ - on_each_cpu(pv_cpu_reboot, NULL, 1); - return NOTIFY_DONE; -} - -static struct notifier_block pv_reboot_nb = { - .notifier_call = pv_reboot_notify, -}; - -int __init pv_time_init(void) -{ - int r, feature; - - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) - return 0; - - has_steal_clock = 1; - r = pv_enable_steal_time(); - if (r < 0) { - has_steal_clock = 0; - return 0; - } - register_reboot_notifier(&pv_reboot_nb); - -#ifdef CONFIG_SMP - r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "loongarch/pv_time:online", - pv_time_cpu_online, pv_time_cpu_down_prepare); - if (r < 0) { - has_steal_clock = 0; - pr_err("Failed to install cpu hotplug callbacks\n"); - return r; - } -#endif - - static_call_update(pv_steal_clock, paravt_steal_clock); - - static_key_slow_inc(¶virt_steal_enabled); -#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - if (steal_acc) - static_key_slow_inc(¶virt_steal_rq_enabled); -#endif - - pr_info("Using paravirt steal-time\n"); - - return 0; -} - int __init pv_spinlock_init(void) { if (!cpu_has_hypervisor) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 68b81254dd9a5a1c190c337c7a7cc2d217ca4a2d..77b79825d44aa6f00f2420965a6e024538179084 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -72,8 +72,6 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; -static int possible_cpus; -static bool bsp_added; /* * Setup information @@ -376,55 +374,10 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } -int topo_get_cpu(int physid) -{ - int i; - - for (i = 0; i < possible_cpus; i++) - if (cpu_logical_map(i) == physid) - break; - - if (i == possible_cpus) - return -ENOENT; - - return i; -} - -int topo_add_cpu(int physid) -{ - int cpu; - - if (!bsp_added && (physid == loongson_sysconf.boot_cpu_id)) { - bsp_added = true; - return 0; - } - - cpu = topo_get_cpu(physid); - if (cpu >= 0) { - pr_warn("Adding duplicated physical cpuid 0x%x\n", physid); - return -EEXIST; - } - - if (possible_cpus >= nr_cpu_ids) - return -ERANGE; - - __cpu_logical_map[possible_cpus] = physid; - cpu = possible_cpus++; - return cpu; -} - -static void __init topo_init(void) -{ - loongson_sysconf.boot_cpu_id = read_csr_cpuid(); - __cpu_logical_map[0] = loongson_sysconf.boot_cpu_id; - possible_cpus++; -} - void __init platform_init(void) { arch_reserve_vmcore(); arch_parse_crashkernel(); - topo_init(); #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 5d02bf5126b7177c9f79a4d38bfe74f01e6a3bff..7f693a24687d1a364dc7eeae85dc02bad637a88a 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -291,9 +291,10 @@ static void __init fdt_smp_setup(void) if (cpuid >= nr_cpu_ids) continue; - cpu = topo_add_cpu(cpuid); - if (cpu < 0) - continue; + if (cpuid == loongson_sysconf.boot_cpu_id) + cpu = 0; + else + cpu = cpumask_next_zero(-1, cpu_present_mask); num_processors++; set_cpu_possible(cpu, true); @@ -301,7 +302,7 @@ static void __init fdt_smp_setup(void) __cpu_number_map[cpuid] = cpu; __cpu_logical_map[cpu] = cpuid; - early_numa_add_cpu(cpuid, 0); + early_numa_add_cpu(cpu, 0); set_cpuid_to_node(cpuid, 0); } diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 19cb22da35de7a79265269362dbf462182bea882..5579ee8afedc6a9a95e96ff247f70e43bbd2d1c2 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -44,6 +44,8 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) switch (index) { case 0 ... (KVM_MAX_CPUCFG_REGS - 1): vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + if (cpu_has_ptw && (index == LOONGARCH_CPUCFG2)) + vcpu->arch.gprs[rd] |= CPUCFG2_PTW; break; case CPUCFG_KVM_SIG: /* CPUCFG emulation between 0x40000000 -- 0x400000ff */ diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c index 5327066f16aec95ca383f127e1aa1f048fa73a10..801ac861c876d1893208d39d313dc2bcaecad176 100644 --- a/arch/loongarch/kvm/intc/extioi.c +++ b/arch/loongarch/kvm/intc/extioi.c @@ -17,8 +17,10 @@ static void extioi_update_irq(struct loongarch_extioi *s, int irq, int level) struct kvm_vcpu *vcpu; ipnum = s->ipmap.reg_u8[irq / 32]; - ipnum = count_trailing_zeros(ipnum); - ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) { + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + } cpu = s->sw_coremap[irq]; vcpu = kvm_get_vcpu(s->kvm, cpu); @@ -53,8 +55,10 @@ static void extioi_set_sw_coreisr(struct loongarch_extioi *s) for (irq = 0; irq < EXTIOI_IRQS; irq++) { ipnum = s->ipmap.reg_u8[irq / 32]; - ipnum = count_trailing_zeros(ipnum); - ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) { + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + } irq_index = irq / 32; /* length of accessing core isr is 4 bytes */ irq_mask = 1 << (irq & 0x1f); @@ -99,6 +103,35 @@ static inline void extioi_enable_irq(struct kvm_vcpu *vcpu, struct loongarch_ext } } +static inline void extioi_update_sw_coremap(struct loongarch_extioi *s, + int irq, void *pvalue, u32 len, bool notify) +{ + int i, cpu; + u64 val = *(u64 *)pvalue; + + for (i = 0; i < len; i++) { + cpu = val & 0xff; + val = val >> 8; + + if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) { + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + } + + if (s->sw_coremap[irq + i] == cpu) + continue; + + if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) { + /* lower irq at old cpu and raise irq at new cpu */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else { + s->sw_coremap[irq + i] = cpu; + } + } +} + static int loongarch_extioi_writeb(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, gpa_t addr, int len, const void *val) @@ -167,24 +200,7 @@ static int loongarch_extioi_writeb(struct kvm_vcpu *vcpu, irq = offset - EXTIOI_COREMAP_START; index = irq; s->coremap.reg_u8[index] = data; - - cpu = data & 0xff; - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; - - if (s->sw_coremap[irq] == cpu) - break; - - if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq, 0); - s->sw_coremap[irq] = cpu; - extioi_update_irq(s, irq, 1); - } else - s->sw_coremap[irq] = cpu; - + extioi_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); break; default: ret = -EINVAL; @@ -270,28 +286,8 @@ static int loongarch_extioi_writew(struct kvm_vcpu *vcpu, case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: irq = offset - EXTIOI_COREMAP_START; index = irq >> 2; - s->coremap.reg_u32[index] = data; - - for (i = 0; i < sizeof(data); i++) { - cpu = data & 0xff; - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; - data = data >> 8; - - if (s->sw_coremap[irq + i] == cpu) - continue; - - if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq + i, 0); - s->sw_coremap[irq + i] = cpu; - extioi_update_irq(s, irq + i, 1); - } else - s->sw_coremap[irq + i] = cpu; - } + extioi_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); break; default: ret = -EINVAL; @@ -379,28 +375,8 @@ static int loongarch_extioi_writel(struct kvm_vcpu *vcpu, case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: irq = offset - EXTIOI_COREMAP_START; index = irq >> 3; - s->coremap.reg_u64[index] = data; - - for (i = 0; i < sizeof(data); i++) { - cpu = data & 0xff; - cpu = ffs(cpu) - 1; - cpu = (cpu >= 4) ? 0 : cpu; - data = data >> 8; - - if (s->sw_coremap[irq + i] == cpu) - continue; - - if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq + i, 0); - s->sw_coremap[irq + i] = cpu; - extioi_update_irq(s, irq + i, 1); - } else - s->sw_coremap[irq + i] = cpu; - } + extioi_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); break; default: ret = -EINVAL; @@ -692,24 +668,190 @@ static int kvm_loongarch_extioi_regs_access(struct kvm_device *dev, return 0; } +static int kvm_extioi_ctrl_access(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret = 0; + unsigned long flags; + unsigned long type = (unsigned long)attr->attr; + u32 i, start_irq; + void __user *data; + struct loongarch_extioi *s = dev->kvm->arch.extioi; + + data = (void __user *)attr->addr; + spin_lock_irqsave(&s->lock, flags); + switch (type) { + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: + if (copy_from_user(&s->num_cpu, data, 4)) + ret = -EFAULT; + break; + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: + if (copy_from_user(&s->features, data, 4)) + ret = -EFAULT; + if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION))) + s->status |= BIT(EIOINTC_ENABLE); + break; + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED: + extioi_set_sw_coreisr(s); + for (i = 0; i < (EXTIOI_IRQS / 4); i++) { + start_irq = i * 4; + extioi_update_sw_coremap(s, start_irq, + (void *)&s->coremap.reg_u32[i], sizeof(u32), false); + } + break; + default: + break; + } + spin_unlock_irqrestore(&s->lock, flags); + + return ret; +} + +static int kvm_extioi_sw_status_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int addr, ret = 0; + unsigned long flags; + void *p = NULL; + void __user *data; + struct loongarch_extioi *s; + + s = dev->kvm->arch.extioi; + addr = attr->attr; + addr &= 0xffff; + + data = (void __user *)attr->addr; + switch (addr) { + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: + p = &s->num_cpu; + break; + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE: + p = &s->features; + break; + case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE: + p = &s->status; + break; + default: + kvm_err("%s: unknown extioi register, addr = %d\n", __func__, addr); + return -EINVAL; + } + spin_lock_irqsave(&s->lock, flags); + if (is_write) { + if (copy_from_user(p, data, 4)) + ret = -EFAULT; + } else { + if (copy_to_user(data, p, 4)) + ret = -EFAULT; + } + spin_unlock_irqrestore(&s->lock, flags); + + return ret; +} + static int kvm_loongarch_extioi_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + switch (attr->group) { + case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: return kvm_loongarch_extioi_regs_access(dev, attr, false); - - return -EINVAL; + case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: + return kvm_extioi_sw_status_access(dev, attr, false); + default: + return -EINVAL; + } } static int kvm_loongarch_extioi_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + switch (attr->group) { + + case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL: + return kvm_extioi_ctrl_access(dev, attr); + case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: return kvm_loongarch_extioi_regs_access(dev, attr, true); + case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: + return kvm_extioi_sw_status_access(dev, attr, true); + } + return -EINVAL; } +static int kvm_extioi_virt_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + unsigned long flags; + u32 *data = val; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr -= EIOINTC_VIRT_BASE; + spin_lock_irqsave(&extioi->lock, flags); + switch (addr) { + case EIOINTC_VIRT_FEATURES: + *data = extioi->features; + break; + case EIOINTC_VIRT_CONFIG: + *data = extioi->status; + break; + default: + break; + } + spin_unlock_irqrestore(&extioi->lock, flags); + + return 0; +} + +static int kvm_extioi_virt_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + int ret = 0; + unsigned long flags; + u32 value = *(u32 *)val; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr -= EIOINTC_VIRT_BASE; + spin_lock_irqsave(&extioi->lock, flags); + switch (addr) { + case EIOINTC_VIRT_FEATURES: + ret = -EPERM; + break; + case EIOINTC_VIRT_CONFIG: + /* + * extioi features can only be set at disabled status + */ + if ((extioi->status & BIT(EIOINTC_ENABLE)) && value) { + ret = -EPERM; + break; + } + extioi->status = value & extioi->features; + break; + default: + break; + } + spin_unlock_irqrestore(&extioi->lock, flags); + + return ret; +} + +static const struct kvm_io_device_ops kvm_extioi_virt_ops = { + .read = kvm_extioi_virt_read, + .write = kvm_extioi_virt_write, +}; + static void kvm_loongarch_extioi_destroy(struct kvm_device *dev) { struct kvm *kvm; @@ -729,6 +871,7 @@ static void kvm_loongarch_extioi_destroy(struct kvm_device *dev) device = &extioi->device; kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &extioi->device_vext); kfree(extioi); } @@ -736,7 +879,7 @@ static int kvm_loongarch_extioi_create(struct kvm_device *dev, u32 type) { int ret; struct loongarch_extioi *s; - struct kvm_io_device *device; + struct kvm_io_device *device, *device1; struct kvm *kvm = dev->kvm; /* extioi has been created */ @@ -762,6 +905,15 @@ static int kvm_loongarch_extioi_create(struct kvm_device *dev, u32 type) return -EFAULT; } + device1 = &s->device_vext; + kvm_iodevice_init(device1, &kvm_extioi_virt_ops); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, + EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1); + if (ret < 0) { + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device); + kfree(s); + return ret; + } kvm->arch.extioi = s; kvm_info("create extioi device successfully\n"); @@ -786,7 +938,7 @@ int kvm_loongarch_reset_extioi(struct kvm *kvm) { struct loongarch_extioi *extioi = kvm->arch.extioi; unsigned long flags; - u8 offset, size; + unsigned long offset, size; u8 *pstart; if (!extioi) diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index 12024d9fdd0b5a751474cedbc45bab6bf624c760..4459ec42eb40e4eb528cd85257f1b969eb2380b5 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -536,3 +536,23 @@ int kvm_loongarch_register_ipi_device(void) return kvm_register_device_ops(&kvm_loongarch_ipi_dev_ops, KVM_DEV_TYPE_LA_IPI); } + +int kvm_loongarch_reset_ipi(struct kvm_vcpu *vcpu) +{ + struct ipi_state *s = &vcpu->arch.ipi_state; + unsigned long offset, size; + u8 *pstart; + + if (!s) + return -EINVAL; + + pstart = (char *)&s->status; + offset = (char *)&s->status - (char *)s; + size = sizeof(struct ipi_state) - offset; + + spin_lock(&vcpu->arch.ipi_state.lock); + memset(pstart, 0, size); + spin_unlock(&vcpu->arch.ipi_state.lock); + + return 0; +} diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 7d053dbcd5c066829fa6c12ab15a6aac4d7957be..9f287b41991015c2f98fd9971f02d9ba1b73ad23 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -538,3 +538,23 @@ int kvm_loongarch_register_pch_pic_device(void) return kvm_register_device_ops(&kvm_loongarch_pch_pic_dev_ops, KVM_DEV_TYPE_LA_IOAPIC); } + +int kvm_loongarch_reset_pch(struct kvm *kvm) +{ + struct loongarch_pch_pic *s = kvm->arch.pch_pic; + unsigned long offset, size; + u8 *pstart; + + if (!s) + return -EINVAL; + + pstart = (char *)&s->mask; + offset = (char *)&s->mask - (char *)s; + size = sizeof(struct loongarch_pch_pic) - offset; + + spin_lock(&s->lock); + memset(pstart, 0, size); + spin_unlock(&s->lock); + + return 0; +} diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 1f50f6723739cde230b961ed429cee62b693db44..ef2bad612c00309a59937a1b59e63cb9d802ca81 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -245,6 +245,21 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); vcpu->cpu = cpu; kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); + /* + * LLBCTL_WCLLB is separated CSR register from host + * eret instruction in host mode clears host LLBCTL_WCLLB + * register, and clears guest register in guest mode + * + * When gpa --> hpa mapping is changed, guest does not know + * even if the content is changed with new address + * + * Here clear guest LLBCTL_WCLLB register when mapping is + * changed, else if mapping is changed when guest is executing + * LL/SC pair, LL loads old address, SC store new address + * successfully since LLBCTL_WCLLB is on, even if memory + * with new address is changed with other VCPUs. + */ + set_gcsr_llbctl(LOONGARCH_CSR_LLBCTL); } /* Restore GSTAT(0x50).vpid */ @@ -299,6 +314,12 @@ int kvm_arch_hardware_enable(void) kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + /* + * HW Guest CSR registers are lost after CPU suspend and resume. + * Clear last_vcpu so that Guest CSR registers forced to reload + * from vCPU SW state + */ + this_cpu_ptr(vmcs)->last_vcpu = NULL; return 0; } diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index cb701649f56fe43efc5ebc1f72d4c5fdeabe0a53..b4b1464fc4fbde1c8c0c1beccbca7d8f0abe7893 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -538,6 +538,8 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) * _PAGE_DIRTY since gpa has already recorded as dirty page */ prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); + if (prot_bits & _PAGE_WRITE) + prot_bits |= KVM_RECORD_PAGE_WRITE_ABLE; kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); return true; @@ -602,6 +604,14 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ /* Track access to pages marked old */ new = kvm_pte_mkyoung(*ptep); + + /* We restore the write property of + * the page table entry according to + * KVM_RECORD_PAGE_WRITE_ABLE + */ + if (kvm_record_pte_write_able(new)) + new |= _PAGE_WRITE; + /* call kvm_set_pfn_accessed() after unlock */ if (write && !kvm_pte_dirty(new)) { @@ -903,7 +913,12 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) prot_bits |= _CACHE_SUC; if (writeable) { - prot_bits |= _PAGE_WRITE; + /* If the page entry has a write attribute, + * we use the page entry 50bit(KVM_RECORD_PAGE_WRITE_ABLE) + * to record it to restore the write attribute of the page entry, + * in the fast path kvm_map_page_fast for page table processing + */ + prot_bits |= _PAGE_WRITE | KVM_RECORD_PAGE_WRITE_ABLE; if (write) prot_bits |= __WRITEABLE; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 8c5cadd9821d5d713164e9ccbbe6a52574af2444..aaed344ec8d237a141dd2aa6f3fad182a2301a30 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -309,7 +309,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { int ret = RESUME_GUEST; unsigned long estat = vcpu->arch.host_estat; - u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 intr = estat & CSR_ESTAT_IS; u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; vcpu->mode = OUTSIDE_GUEST_MODE; @@ -870,8 +870,18 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, break; case KVM_REG_LOONGARCH_VCPU_RESET: vcpu->arch.st.guest_addr = 0; - if (vcpu->vcpu_id == 0) + if (vcpu->vcpu_id == 0) { kvm_loongarch_reset_extioi(vcpu->kvm); + kvm_loongarch_reset_pch(vcpu->kvm); + } + + kvm_loongarch_reset_ipi(vcpu); + /* + * When the vcpu resets, clear the ESTAT and GINTC registers, + * and clear other CSR registers through the _kvm_set_csr register. + */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); break; diff --git a/drivers/iommu/loongarch_iommu.c b/drivers/iommu/loongarch_iommu.c index 26ce45ea46c2952abb5eafdfefb370033c25f02b..3ad54cf0f3c2bd16ec857aff38bfd84cfa4f106a 100644 --- a/drivers/iommu/loongarch_iommu.c +++ b/drivers/iommu/loongarch_iommu.c @@ -109,7 +109,7 @@ u16 la_iommu_last_bdf; /* largest PCI device id * we have to handle */ -int loongarch_iommu_disable; +int loongarch_iommu_disable = 1; #define iommu_write_regl(iommu, off, val) \ writel(val, iommu->confbase + off) @@ -199,6 +199,22 @@ static void flush_iotlb_by_domain_id(struct loongarch_iommu *iommu, u16 domain_i iommu_write_regl(iommu, LA_IOMMU_VBTC, val); } +static void flush_iotlb(struct loongarch_iommu *iommu) +{ + u32 val; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Flush all tlb */ + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + val &= ~0x1f; + val |= 0x5; + iommu_write_regl(iommu, LA_IOMMU_VBTC, val); +} + static int flush_pgtable_is_busy(struct loongarch_iommu *iommu) { u32 val; @@ -336,22 +352,6 @@ static int update_dev_table(struct la_iommu_dev_data *dev_data, int flag) return 0; } -static void flush_iotlb(struct loongarch_iommu *iommu) -{ - u32 val; - - if (iommu == NULL) { - pr_err("%s iommu is NULL", __func__); - return; - } - - /* Flush all tlb */ - val = iommu_read_regl(iommu, LA_IOMMU_VBTC); - val &= ~0x1f; - val |= 0x5; - iommu_write_regl(iommu, LA_IOMMU_VBTC, val); -} - static int iommu_flush_iotlb(struct loongarch_iommu *iommu) { u32 retry = 0; @@ -443,8 +443,10 @@ static int domain_id_alloc(struct loongarch_iommu *iommu) if (id < MAX_DOMAIN_ID) __set_bit(id, iommu->domain_bitmap); spin_unlock(&iommu->domain_bitmap_lock); - if (id >= MAX_DOMAIN_ID) + if (id >= MAX_DOMAIN_ID) { + id = -1; pr_err("LA-IOMMU: Alloc domain id over max domain id\n"); + } return id; } @@ -595,8 +597,8 @@ static struct iommu_domain *la_iommu_domain_alloc(unsigned int type) struct dom_info *info; switch (type) { + case IOMMU_DOMAIN_BLOCKED: case IOMMU_DOMAIN_UNMANAGED: - case IOMMU_DOMAIN_IDENTITY: info = alloc_dom_info(); if (info == NULL) return NULL; @@ -833,11 +835,11 @@ static int la_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) struct iommu_info *info; unsigned short bdf; - if (domain->type == IOMMU_DOMAIN_IDENTITY) - domain = NULL; - la_iommu_detach_dev(dev); + if (domain != NULL && domain->type == IOMMU_DOMAIN_BLOCKED) + return 0; + if (domain == NULL) return 0; @@ -920,6 +922,7 @@ static void la_iommu_detach_dev(struct device *dev) spin_lock(&iommu_entry->devlock); do_detach(dev_data); spin_unlock(&iommu_entry->devlock); + dev_data->domain = NULL; pci_info(pdev, "%s iommu devid %x sigment %x\n", __func__, iommu->devid, iommu->segment); diff --git a/drivers/iommu/loongarch_iommu.h b/drivers/iommu/loongarch_iommu.h index cf5640d95900bde0c2c4cd72832f5e1d184e16c6..a411d2b34d01d70991cfeda572ba65d1f9a70403 100644 --- a/drivers/iommu/loongarch_iommu.h +++ b/drivers/iommu/loongarch_iommu.h @@ -169,7 +169,7 @@ struct dom_entry { struct la_iommu_dev_data { struct list_head list; /* for iommu_entry->dev_list */ struct loongarch_iommu *iommu; - struct iommu_info *iommu_entry; + struct iommu_info *iommu_entry; struct iommu_domain *domain; struct device *dev; unsigned short bdf; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 708e86c4fff6bf02b09f7b139ec57a97552a19b5..a46123dcbe5aaf8d7c18eab0cfaec8b6f675e9bc 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4514,8 +4514,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, quirk_bridge_cavm_thrx2_pcie_root); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, quirk_bridge_cavm_thrx2_pcie_root); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, 0x3c09, - quirk_bridge_cavm_thrx2_pcie_root); /* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) @@ -5085,6 +5083,18 @@ static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) return false; } +static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * Loongson PCIe Root Ports don't advertise an ACS capability, but + * they do not allow peer-to-peer transactions between Root Ports. + * Allow each Root Port to be in a separate IOMMU group by masking + * SV/RR/CR/UF bits. + */ + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); +} + static const struct pci_dev_acs_enabled { u16 vendor; u16 device; @@ -5204,6 +5214,17 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, + /* Loongson PCIe Root Ports */ + { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, /* Zhaoxin multi-function devices */ @@ -5268,8 +5289,6 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_MUCSE, 0x1c61, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_MUCSE, 0x1083, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_MUCSE, 0x1c83, pci_quirk_mf_endpoint_acs }, - { PCI_VENDOR_ID_LOONGSON, 0x3c09, pci_quirk_xgene_acs}, - { PCI_VENDOR_ID_LOONGSON, 0x3c19, pci_quirk_xgene_acs}, { 0 } };