diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 097356603e13fa0036f77fff1500e2e1b6de96c0..23fe0fd3fe74b912592ae3d8374f8c2ad8ee5002 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -234,6 +234,7 @@ config ARM64 select HAVE_KRETPROBES select HAVE_GENERIC_VDSO select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU + select HOTPLUG_SMT if (SMP && HOTPLUG_CPU) select IRQ_DOMAIN select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 597c09d37bf8dc2c36f192a1d70775681b3dec9d..b80368a02d838e3f350bf80a21fb5e2314d7d8fb 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -708,6 +708,7 @@ CONFIG_KVM=y # # General architecture-dependent options # +CONFIG_HOTPLUG_SMT=y CONFIG_HOTPLUG_CORE_SYNC=y CONFIG_HOTPLUG_CORE_SYNC_DEAD=y CONFIG_KPROBES=y diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 817d788cd86669439cf4b55af45214f016cc5efb..0dc360c32ec82d68d68c6b741588fb9089b43ac9 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -43,11 +44,16 @@ static bool __init acpi_cpu_is_threaded(int cpu) */ int __init parse_acpi_topology(void) { + int thread_num, max_smt_thread_num = 1; + struct xarray core_threads; int cpu, topology_id; + void *entry; if (acpi_disabled) return 0; + xa_init(&core_threads); + for_each_possible_cpu(cpu) { topology_id = find_acpi_cpu_topology(cpu, 0); if (topology_id < 0) @@ -57,6 +63,20 @@ int __init parse_acpi_topology(void) cpu_topology[cpu].thread_id = topology_id; topology_id = find_acpi_cpu_topology(cpu, 1); cpu_topology[cpu].core_id = topology_id; + + entry = xa_load(&core_threads, topology_id); + if (!entry) { + xa_store(&core_threads, topology_id, + xa_mk_value(1), GFP_KERNEL); + } else { + thread_num = xa_to_value(entry); + thread_num++; + xa_store(&core_threads, topology_id, + xa_mk_value(thread_num), GFP_KERNEL); + + if (thread_num > max_smt_thread_num) + max_smt_thread_num = thread_num; + } } else { cpu_topology[cpu].thread_id = -1; cpu_topology[cpu].core_id = topology_id; @@ -67,6 +87,9 @@ int __init parse_acpi_topology(void) cpu_topology[cpu].package_id = topology_id; } + topology_smt_set_num_threads(max_smt_thread_num); + + xa_destroy(&core_threads); return 0; } #endif diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index b741b5ba82bd6e53c216e9338cec8bfdd0dd3e27..252a436e0c1b7382a5351115012f7bb6415e98e0 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -526,6 +526,13 @@ static int __init parse_core(struct device_node *core, int package_id, i++; } while (t); + /* + * We've already gotten threads number in this core, update the SMT + * threads number when necessary. + */ + if (i > topology_smt_get_num_threads()) + topology_smt_set_num_threads(i); + cpu = get_cpu_for_node(core); if (cpu >= 0) { if (!leaf) { @@ -729,6 +736,36 @@ const struct cpumask *cpu_clustergroup_mask(int cpu) return &cpu_topology[cpu].cluster_sibling; } +#ifdef CONFIG_HOTPLUG_SMT + +/* Maximum threads number per-Core */ +static unsigned int topology_smt_num_threads = 1; + +void __init topology_smt_set_num_threads(unsigned int num_threads) +{ + topology_smt_num_threads = num_threads; +} + +unsigned int __init topology_smt_get_num_threads(void) +{ + return topology_smt_num_threads; +} + +/* + * On SMT Hotplug the primary thread of the SMT won't be disabled. For x86 they + * seem to have a primary thread for special purpose. For other arthitectures + * like arm64 there's no such restriction for a primary thread, so make the + * first thread in the SMT as the primary thread. + */ +bool topology_is_primary_thread(unsigned int cpu) +{ + if (cpu == cpumask_first(topology_sibling_cpumask(cpu))) + return true; + + return false; +} +#endif + void update_siblings_masks(unsigned int cpuid) { struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; @@ -841,6 +878,14 @@ void __init init_cpu_topology(void) reset_cpu_topology(); } + /* + * By this stage we get to know whether we support SMT or not, update + * the information for the core. We don't support + * CONFIG_SMT_NUM_THREADS_DYNAMIC so make the max_threads == num_threads. + */ + cpu_smt_set_num_threads(topology_smt_get_num_threads(), + topology_smt_get_num_threads()); + for_each_possible_cpu(cpu) { ret = fetch_cache_info(cpu); if (!ret) diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index a07b510e7dc55960e83dca5542f9e937925c66d9..0367f3a61838a1104fac125fabc01ec9d59a745b 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -92,6 +92,20 @@ void update_siblings_masks(unsigned int cpu); void remove_cpu_topology(unsigned int cpuid); void reset_cpu_topology(void); int parse_acpi_topology(void); + +#ifdef CONFIG_HOTPLUG_SMT +bool topology_is_primary_thread(unsigned int cpu); +void topology_smt_set_num_threads(unsigned int num_threads); +unsigned int topology_smt_get_num_threads(void); +#else +static inline bool topology_is_primary_thread(unsigned int cpu) { return false; } +static inline void topology_smt_set_num_threads(unsigned int num_threads) { } +static inline unsigned int topology_smt_get_num_threads(void) +{ + return 1; +} +#endif + #endif #endif /* _LINUX_ARCH_TOPOLOGY_H_ */