diff --git a/Documentation/arch/x86/buslock.rst b/Documentation/arch/x86/buslock.rst index 31f1bfdff16f8881fef8e041cf63ae2da82fc94c..a5daebfa0a5817457eba5ed7cc47052b7ab32c30 100644 --- a/Documentation/arch/x86/buslock.rst +++ b/Documentation/arch/x86/buslock.rst @@ -27,7 +27,8 @@ Detection Intel processors may support either or both of the following hardware mechanisms to detect split locks and bus locks. Some AMD processors also -support bus lock detect. +support bus lock detect. Some Zhaoxin processors also support split lock +detect. #AC exception for split lock detection -------------------------------------- diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 436c87b4cc4ad68db5b63fdedd2f6e521b6dbfff..a3c144c29c813b38b95778e563311a3069e49199 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2459,7 +2459,7 @@ source "kernel/livepatch/Kconfig" config X86_BUS_LOCK_DETECT bool "Split Lock Detect and Bus Lock Detect support" - depends on CPU_SUP_INTEL || CPU_SUP_AMD + depends on CPU_SUP_INTEL || CPU_SUP_AMD || CPU_SUP_ZHAOXIN || CPU_SUP_CENTAUR default y help Enable Split Lock Detect and Bus Lock Detect functionalities. diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index 30d18d3876ca106d938de895b03c599c20583aaf..842a357358abd2e066abaed43aca47090ed50621 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -202,6 +202,26 @@ static void __split_lock_reenable(struct work_struct *work) */ static DEFINE_PER_CPU(struct delayed_work, sl_reenable); +/* + * Per-CPU delayed_work can't be statically initialized properly because + * the struct address is unknown. Thus per-CPU delayed_work structures + * have to be initialized during kernel initialization and after calling + * setup_per_cpu_areas(). + */ +static int __init setup_split_lock_delayed_work(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu); + + INIT_DELAYED_WORK(work, __split_lock_reenable); + } + + return 0; +} +pure_initcall(setup_split_lock_delayed_work); + /* * If a CPU goes offline with pending delayed work to re-enable split lock * detection then the delayed work will be executed on some other CPU. That @@ -221,15 +241,16 @@ static int splitlock_cpu_offline(unsigned int cpu) static void split_lock_warn(unsigned long ip) { - struct delayed_work *work = NULL; + struct delayed_work *work; int cpu; + unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate); if (!current->reported_split_lock) pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", current->comm, current->pid, ip); current->reported_split_lock = 1; - if (sysctl_sld_mitigate) { + if (saved_sld_mitigate) { /* * misery factor #1: * sleep 10ms before trying to execute split lock. @@ -242,18 +263,10 @@ static void split_lock_warn(unsigned long ip) */ if (down_interruptible(&buslock_sem) == -EINTR) return; - work = &sl_reenable_unlock; } cpu = get_cpu(); - - if (!work) { - work = this_cpu_ptr(&sl_reenable); - /* Deferred initialization of per-CPU struct */ - if (!work->work.func) - INIT_DELAYED_WORK(work, __split_lock_reenable); - } - + work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu); schedule_delayed_work_on(cpu, work, 2); /* Disable split lock detection on this CPU to make progress */ diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index b15bcf21ac7b32704e21ffb79fba0b2a2dfe7f43..6c191b3a239292195b0784f7bc9b895e6adc75aa 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -231,6 +231,8 @@ static void init_centaur(struct cpuinfo_x86 *c) #endif init_ia32_feat_ctl(c); + + split_lock_init(); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 80b3791240e49dcc8b1a37aa5e66b4bcfe5aeb6f..0c42baff9b1cc7fa32a4b146c4626957f9725e08 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -111,6 +111,8 @@ static void init_zhaoxin(struct cpuinfo_x86 *c) #endif init_ia32_feat_ctl(c); + + split_lock_init(); } #ifdef CONFIG_X86_32