diff --git a/0001-apply-preempt-RT-patch.patch b/0001-apply-preempt-RT-patch.patch index 053afc5bd2ff890bea65c2fc1f054f8a1ea64a04..315603ccf8fdba1e70bd4da05c10dea3b45b4ddf 100644 --- a/0001-apply-preempt-RT-patch.patch +++ b/0001-apply-preempt-RT-patch.patch @@ -1,7 +1,7 @@ -From 069d7f9ad6878ef306e0e15efb50cede4e996d0a Mon Sep 17 00:00:00 2001 +From 0679ad535fd2acf89c8d49794aa8e8a040829af1 Mon Sep 17 00:00:00 2001 From: zhangyu -Date: Tue, 31 Oct 2023 14:16:34 +0800 -Subject: [PATCH] apply-preempt-RT-patch +Date: Tue, 18 Jun 2024 14:23:29 +0800 +Subject: [PATCH] oooooooo --- .../Expedited-Grace-Periods.rst | 4 +- @@ -243,11 +243,11 @@ Subject: [PATCH] apply-preempt-RT-patch include/linux/debug_locks.h | 3 +- include/linux/delay.h | 6 + include/linux/entry-common.h | 3 +- - include/linux/eventfd.h | 11 +- + include/linux/eventfd.h | 14 +- include/linux/fs.h | 2 +- include/linux/hardirq.h | 7 +- include/linux/highmem-internal.h | 222 +++ - include/linux/highmem.h | 294 ++- + include/linux/highmem.h | 294 +-- include/linux/interrupt.h | 34 +- include/linux/io-mapping.h | 28 +- include/linux/irq_cpustat.h | 28 - @@ -274,7 +274,7 @@ Subject: [PATCH] apply-preempt-RT-patch include/linux/rwlock_types_rt.h | 56 + include/linux/rwsem-rt.h | 70 + include/linux/rwsem.h | 12 + - include/linux/sched.h | 123 +- + include/linux/sched.h | 131 +- include/linux/sched/hotplug.h | 2 + include/linux/sched/mm.h | 11 + include/linux/sched/rt.h | 8 - @@ -328,7 +328,7 @@ Subject: [PATCH] apply-preempt-RT-patch kernel/locking/mutex-rt.c | 224 +++ kernel/locking/rtmutex-debug.c | 102 - kernel/locking/rtmutex-debug.h | 11 - - kernel/locking/rtmutex.c | 939 +++++++-- + kernel/locking/rtmutex.c | 940 +++++++-- kernel/locking/rtmutex.h | 7 - kernel/locking/rtmutex_common.h | 36 +- kernel/locking/rwlock-rt.c | 334 ++++ @@ -340,7 +340,7 @@ Subject: [PATCH] apply-preempt-RT-patch kernel/panic.c | 32 +- kernel/printk/Makefile | 1 - kernel/printk/internal.h | 4 - - kernel/printk/printk.c | 1708 +++++++++-------- + kernel/printk/printk.c | 1729 +++++++++-------- kernel/printk/printk_safe.c | 349 +--- kernel/ptrace.c | 32 +- kernel/rcu/Kconfig | 4 +- @@ -358,7 +358,7 @@ Subject: [PATCH] apply-preempt-RT-patch kernel/sched/swait.c | 1 + kernel/signal.c | 105 +- kernel/smp.c | 14 +- - kernel/softirq.c | 431 ++++- + kernel/softirq.c | 431 +++- kernel/stop_machine.c | 27 +- kernel/time/hrtimer.c | 30 + kernel/time/tick-sched.c | 2 +- @@ -401,7 +401,7 @@ Subject: [PATCH] apply-preempt-RT-patch net/sched/sch_generic.c | 10 + net/sunrpc/svc_xprt.c | 4 +- net/xfrm/xfrm_state.c | 3 +- - 397 files changed, 8930 insertions(+), 4789 deletions(-) + 397 files changed, 8936 insertions(+), 4816 deletions(-) delete mode 100644 arch/alpha/include/asm/kmap_types.h delete mode 100644 arch/arc/include/asm/kmap_types.h delete mode 100644 arch/arm/include/asm/kmap_types.h @@ -597,7 +597,7 @@ index f64f4413a..3b4a24877 100644 Therefore, on_each_cpu() disables preemption across its call to smp_call_function() and also across the local call to diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst -index c9ab6af4d..e97d1b487 100644 +index d53856cc6..7148e9be0 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -25,7 +25,7 @@ warnings: @@ -663,10 +663,10 @@ index fb3ff76c3..3b2b1479f 100644 read-side critical sections. It also permits spinlocks blocking while in RCU read-side critical diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 71567aa7e..ee4911a95 100644 +index 1bcc53b0c..b5249966e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -4397,6 +4397,10 @@ +@@ -4431,6 +4431,10 @@ value, meaning that RCU_SOFTIRQ is used by default. Specify rcutree.use_softirq=0 to use rcuc kthreads. @@ -677,7 +677,7 @@ index 71567aa7e..ee4911a95 100644 rcutree.rcu_fanout_exact= [KNL] Disable autobalancing of the rcu_node combining tree. This is used by rcutorture, and might -@@ -4775,6 +4779,13 @@ +@@ -4815,6 +4819,13 @@ only normal grace-period primitives. No effect on CONFIG_TINY_RCU kernels. @@ -814,7 +814,7 @@ index a966239f0..a7830c594 100644 -performs an IPI to inform all processors about the new mapping. This results -in a significant performance penalty. diff --git a/arch/Kconfig b/arch/Kconfig -index b0319fa3c..32694b49d 100644 +index 0fc9c6d59..0a371be43 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -50,6 +50,7 @@ config OPROFILE @@ -825,7 +825,7 @@ index b0319fa3c..32694b49d 100644 select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help -@@ -683,6 +684,12 @@ config HAVE_TIF_NOHZ +@@ -686,6 +687,12 @@ config HAVE_TIF_NOHZ config HAVE_VIRT_CPU_ACCOUNTING bool @@ -838,7 +838,7 @@ index b0319fa3c..32694b49d 100644 config ARCH_HAS_SCALED_CPUTIME bool -@@ -697,7 +704,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN +@@ -700,7 +707,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN some 32-bit arches may require multiple accesses, so proper locking is needed to protect against concurrent accesses. @@ -1048,10 +1048,10 @@ index 1b9f473c6..c79912a6b 100644 + alloc_kmap_pgtable(FIXMAP_BASE); } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 400d53736..220c116cd 100644 +index 370b9048e..0b80f42f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -31,6 +31,7 @@ config ARM +@@ -32,6 +32,7 @@ config ARM select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW @@ -1059,7 +1059,7 @@ index 400d53736..220c116cd 100644 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU -@@ -66,7 +67,7 @@ config ARM +@@ -67,7 +68,7 @@ config ARM select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 @@ -1068,7 +1068,7 @@ index 400d53736..220c116cd 100644 select HAVE_ARCH_KFENCE if MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL -@@ -109,6 +110,7 @@ config ARM +@@ -110,6 +111,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -1076,7 +1076,7 @@ index 400d53736..220c116cd 100644 select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ -@@ -124,6 +126,7 @@ config ARM +@@ -125,6 +127,7 @@ config ARM select OLD_SIGSUSPEND3 select PCI_SYSCALL if PCI select PERF_USE_VMALLOC @@ -1084,7 +1084,7 @@ index 400d53736..220c116cd 100644 select RTC_LIB select SET_FS select SYS_SUPPORTS_APM_EMULATION -@@ -1520,6 +1523,7 @@ config HAVE_ARCH_PFN_VALID +@@ -1521,6 +1524,7 @@ config HAVE_ARCH_PFN_VALID config HIGHMEM bool "High Memory Support" depends on MMU @@ -1556,7 +1556,7 @@ index 187fab227..000000000 - return (void *)vaddr; -} diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 30c747321..9b3118068 100644 +index cae54a9bf..c044ff3a0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -78,6 +78,7 @@ config ARM64 @@ -1567,7 +1567,7 @@ index 30c747321..9b3118068 100644 select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_DEFAULT_BPF_JIT -@@ -184,6 +185,7 @@ config ARM64 +@@ -186,6 +187,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -1575,7 +1575,7 @@ index 30c747321..9b3118068 100644 select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUTEX_CMPXCHG if FUTEX -@@ -207,6 +209,7 @@ config ARM64 +@@ -209,6 +211,7 @@ config ARM64 select PCI_DOMAINS_GENERIC if PCI select PCI_ECAM if (ACPI && PCI) select PCI_SYSCALL if PCI @@ -1666,7 +1666,7 @@ index 18782f0c4..6672b0535 100644 #include diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h -index dd8d27ea7..6464a3224 100644 +index 390d96125..531420502 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -27,6 +27,7 @@ struct thread_info { @@ -1685,7 +1685,7 @@ index dd8d27ea7..6464a3224 100644 #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ -@@ -102,6 +104,7 @@ void arch_release_task_struct(struct task_struct *tsk); +@@ -104,6 +106,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) @@ -1693,7 +1693,7 @@ index dd8d27ea7..6464a3224 100644 #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64) #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) -@@ -109,9 +112,12 @@ void arch_release_task_struct(struct task_struct *tsk); +@@ -111,9 +114,12 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ @@ -1708,7 +1708,7 @@ index dd8d27ea7..6464a3224 100644 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_SYSCALL_EMU) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c -index 5f59e24c9..4f522206c 100644 +index c247e1113..a15c83111 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -31,6 +31,7 @@ int main(void) @@ -1720,10 +1720,10 @@ index 5f59e24c9..4f522206c 100644 DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); #endif diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S -index 64145bfab..4cdbba720 100644 +index ab8ed1b62..02c180547 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S -@@ -521,9 +521,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING +@@ -517,9 +517,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING orr x24, x24, x0 alternative_else_nop_endif #endif @@ -1745,12 +1745,12 @@ index 64145bfab..4cdbba720 100644 mov x0, sp diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c -index 5335a6bd1..84520f116 100644 +index c2489a72b..44746f0e6 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c -@@ -226,6 +226,16 @@ static void sve_free(struct task_struct *task) - __sve_free(task); - } +@@ -313,6 +313,15 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, + * may disable TIF_SME and reenable traps. + */ +static void *sve_free_atomic(struct task_struct *task) +{ @@ -1761,32 +1761,39 @@ index 5335a6bd1..84520f116 100644 + task->thread.sve_state = NULL; + return sve_state; +} -+ + /* * TIF_SVE controls whether a task can use SVE without trapping while - * in userspace, and also the way a task's FPSIMD/SVE state is stored -@@ -1022,6 +1032,7 @@ void fpsimd_thread_switch(struct task_struct *next) - void fpsimd_flush_thread(void) +@@ -1522,7 +1531,6 @@ void fpsimd_thread_switch(struct task_struct *next) + static void fpsimd_flush_thread_vl(enum vec_type type) { int vl, supported_vl; -+ void *mem = NULL; +- + /* + * Reset the task vector length as required. This is where we + * ensure that all user tasks have a valid vector length +@@ -1556,6 +1564,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type) + void fpsimd_flush_thread(void) + { ++ void *mem = NULL; if (!system_supports_fpsimd()) return; -@@ -1034,7 +1045,7 @@ void fpsimd_flush_thread(void) + +@@ -1567,7 +1576,7 @@ void fpsimd_flush_thread(void) if (system_supports_sve()) { clear_thread_flag(TIF_SVE); - sve_free(current); + mem = sve_free_atomic(current); + fpsimd_flush_thread_vl(ARM64_VEC_SVE); + } - /* - * Reset the task vector length as required. -@@ -1068,6 +1079,7 @@ void fpsimd_flush_thread(void) +@@ -1579,6 +1588,7 @@ void fpsimd_flush_thread(void) } put_cpu_fpsimd_context(); -+ kfree(mem); ++ kfree(mem); } /* @@ -1805,10 +1812,10 @@ index 9a8f7c256..c0753dcdb 100644 static DEFINE_PER_CPU(call_single_data_t, cpu_backtrace_csd) = diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c -index 17cb54d1e..7f4a03453 100644 +index 7437291ff..d84d24413 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c -@@ -694,7 +694,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, +@@ -861,7 +861,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { @@ -1818,10 +1825,10 @@ index 17cb54d1e..7f4a03453 100644 local_daif_restore(DAIF_PROCCTX_NOIRQ); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c -index bc5a91d17..b757d28ab 100644 +index 11a2c3b0e..2683be020 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c -@@ -855,7 +855,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -957,7 +957,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * involves poking the GIC, which must be done in a * non-preemptible context. */ @@ -1829,8 +1836,8 @@ index bc5a91d17..b757d28ab 100644 + migrate_disable(); kvm_pmu_flush_hwstate(vcpu); - -@@ -879,7 +879,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) + #ifdef CONFIG_CVM_HOST +@@ -985,7 +985,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_timer_sync_user(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); @@ -1839,15 +1846,15 @@ index bc5a91d17..b757d28ab 100644 continue; } -@@ -958,7 +958,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) - /* Exit types that need handling before we can be preempted */ - handle_exit_early(vcpu, ret); - +@@ -1073,7 +1073,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) + #ifdef CONFIG_CVM_HOST + } + #endif - preempt_enable(); + migrate_enable(); - - /* - * The ARMv8 architecture doesn't give the hypervisor + #ifdef CONFIG_CVM_HOST + if (pmu_stopped) + arm_pmu_set_phys_irq(true); diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 7bf0a617e..c9f2533cc 100644 --- a/arch/csky/Kconfig @@ -2268,10 +2275,10 @@ index 4a0c30ced..498eaa4d3 100644 static void highmem_setup(void) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig -index 896a29df1..1b3593d53 100644 +index 0b87c1819..94b679ba0 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig -@@ -2727,6 +2727,7 @@ config WAR_MIPS34K_MISSED_ITLB +@@ -2728,6 +2728,7 @@ config WAR_MIPS34K_MISSED_ITLB config HIGHMEM bool "High Memory Support" depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA @@ -3226,7 +3233,7 @@ index 310bcd768..ae3212dcf 100644 } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c -index 1d20f0f77..7e0a497a3 100644 +index ba9b54d35..588e081ca 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -312,12 +312,11 @@ static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct, @@ -3482,7 +3489,7 @@ index 624b4438a..000000000 -} -EXPORT_SYMBOL(kunmap_atomic_high); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c -index 1ed276d23..ae7c136ed 100644 +index 08e3422eb..1faa07898 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -63,11 +63,6 @@ @@ -3616,10 +3623,10 @@ index 245f1f8df..f05555dde 100644 } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c -index 2872b66d9..0918ab137 100644 +index 3de2adc0a..2eef587bf 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c -@@ -3002,7 +3002,7 @@ print_address(unsigned long addr) +@@ -3003,7 +3003,7 @@ print_address(unsigned long addr) static void dump_log_buf(void) { @@ -3628,7 +3635,7 @@ index 2872b66d9..0918ab137 100644 unsigned char buf[128]; size_t len; -@@ -3014,9 +3014,9 @@ dump_log_buf(void) +@@ -3015,9 +3015,9 @@ dump_log_buf(void) catch_memory_errors = 1; sync(); @@ -3908,10 +3915,10 @@ index 3348e0c4d..0db6919af 100644 (VMALLOC_END - VMALLOC_START) >> 20, diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig -index 530b7ec5d..a38d00d8b 100644 +index 7e2ce4c8d..1a6e4b187 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig -@@ -139,6 +139,7 @@ config MMU +@@ -140,6 +140,7 @@ config MMU config HIGHMEM bool default y if SPARC32 @@ -4257,7 +4264,7 @@ index e4abac6c9..173999422 100644 static struct kmsg_dumper kmsg_dumper = { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 236b510b2..403764738 100644 +index 5d1efac90..bbc313958 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -16,6 +16,7 @@ config X86_32 @@ -4268,7 +4275,7 @@ index 236b510b2..403764738 100644 select MODULES_USE_ELF_REL select OLD_SIGACTION select GENERIC_VDSO_32 -@@ -95,6 +96,7 @@ config X86 +@@ -96,6 +97,7 @@ config X86 select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 @@ -4276,7 +4283,7 @@ index 236b510b2..403764738 100644 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS -@@ -219,6 +221,7 @@ config X86 +@@ -220,6 +222,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -4603,7 +4610,7 @@ index 77217bd29..8eba66a33 100644 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h -index 1b37f1d3a..2eb9b9f94 100644 +index e34d4b508..4b049abe2 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -30,6 +30,7 @@ extern void kernel_fpu_begin_mask(unsigned int kfpu_mask); @@ -4991,7 +4998,7 @@ index 33ee47670..5fcac46aa 100644 } -arch_initcall(kdump_buf_page_init); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c -index 114e87e8d..3935532b1 100644 +index 49a56bac0..0ae1b836a 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -82,6 +82,18 @@ bool irq_fpu_usable(void) @@ -5048,10 +5055,10 @@ index 440eed558..7cfc4e6b7 100644 } +#endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 5afb7bfda..b60a41928 100644 +index 0edb7ff39..cddb4d188 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -8347,6 +8347,14 @@ int kvm_arch_init(void *opaque) +@@ -8571,6 +8571,14 @@ int kvm_arch_init(void *opaque) goto out; } @@ -5401,10 +5408,10 @@ index 673196fe8..0735ca5e8 100644 kmap_waitqueues_init(); } diff --git a/block/blk-mq.c b/block/blk-mq.c -index 1eab99166..dea9f38b4 100644 +index a28957dfb..aa6ef04db 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -44,7 +44,7 @@ +@@ -47,7 +47,7 @@ bool mq_unfair_dtag = true; module_param_named(unfair_dtag, mq_unfair_dtag, bool, 0444); @@ -5413,7 +5420,7 @@ index 1eab99166..dea9f38b4 100644 static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); -@@ -650,80 +650,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) +@@ -654,80 +654,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); @@ -5502,7 +5509,7 @@ index 1eab99166..dea9f38b4 100644 } static inline bool blk_mq_complete_need_ipi(struct request *rq) -@@ -733,6 +682,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) +@@ -737,6 +686,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) if (!IS_ENABLED(CONFIG_SMP) || !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) return false; @@ -5517,7 +5524,7 @@ index 1eab99166..dea9f38b4 100644 /* same CPU or cache domain? Complete locally */ if (cpu == rq->mq_ctx->cpu || -@@ -744,6 +701,31 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) +@@ -748,6 +705,31 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } @@ -5549,7 +5556,7 @@ index 1eab99166..dea9f38b4 100644 bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); -@@ -756,15 +738,15 @@ bool blk_mq_complete_request_remote(struct request *rq) +@@ -760,15 +742,15 @@ bool blk_mq_complete_request_remote(struct request *rq) return false; if (blk_mq_complete_need_ipi(rq)) { @@ -5572,7 +5579,7 @@ index 1eab99166..dea9f38b4 100644 } EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); -@@ -1679,14 +1661,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, +@@ -1701,14 +1683,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { @@ -5590,7 +5597,7 @@ index 1eab99166..dea9f38b4 100644 } /* -@@ -4215,7 +4197,7 @@ static int __init blk_mq_init(void) +@@ -4261,7 +4243,7 @@ static int __init blk_mq_init(void) int i; for_each_possible_cpu(i) @@ -5754,7 +5761,7 @@ index a9e7f5a82..30b4c288c 100644 static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, u8 *buf, size_t bufsiz) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c -index 14fad16d3..f9a4ac20f 100644 +index 3e1bb28b7..c26ed0ce6 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da @@ -5789,7 +5796,7 @@ index 14fad16d3..f9a4ac20f 100644 static int interrupts = -1; module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); -@@ -170,7 +195,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, +@@ -186,7 +211,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); while (len--) @@ -5798,7 +5805,7 @@ index 14fad16d3..f9a4ac20f 100644 return 0; } -@@ -197,7 +222,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) +@@ -213,7 +238,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); @@ -5808,10 +5815,10 @@ index 14fad16d3..f9a4ac20f 100644 return 0; } diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c -index 9811c4095..17c9d8251 100644 +index 45d19cc0a..667ff40f3 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c -@@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) +@@ -2593,7 +2593,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) struct driver_data *driver_data = packet->driver_data; int ret = -ENOENT; @@ -5820,7 +5827,7 @@ index 9811c4095..17c9d8251 100644 if (packet->ack != 0) goto out; -@@ -3465,7 +3465,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) +@@ -3513,7 +3513,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) struct iso_context *ctx = container_of(base, struct iso_context, base); int ret = 0; @@ -5952,7 +5959,7 @@ index 0040b4765..3f4f85478 100644 GEM_BUG_ON(!list_empty(&b->signalers)); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c -index f7b2e07e2..313d8a28e 100644 +index f9fdbd79c..fe7118415 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -60,9 +60,10 @@ static int __engine_unpark(struct intel_wakeref *wf) @@ -6237,10 +6244,10 @@ index 60ab7151b..93f92ccd4 100644 return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c -index 5cea6eea7..785023081 100644 +index 9a02c4871..758c484c7 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c -@@ -89,11 +89,11 @@ apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) +@@ -88,11 +88,11 @@ apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) { void *reloc_page; @@ -6254,7 +6261,7 @@ index 5cea6eea7..785023081 100644 } static void -@@ -105,9 +105,9 @@ apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) +@@ -104,9 +104,9 @@ apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) if (info->src_bo && !info->src_bo->is_primary) id = info->src_bo->surface_id; @@ -6266,7 +6273,7 @@ index 5cea6eea7..785023081 100644 } /* return holding the reference to this object */ -@@ -149,7 +149,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, +@@ -148,7 +148,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, struct qxl_bo *cmd_bo; void *fb_cmd; int i, ret, num_relocs; @@ -6274,7 +6281,7 @@ index 5cea6eea7..785023081 100644 switch (cmd->type) { case QXL_CMD_DRAW: -@@ -185,21 +184,21 @@ static int qxl_process_single_command(struct qxl_device *qdev, +@@ -184,21 +183,21 @@ static int qxl_process_single_command(struct qxl_device *qdev, goto out_free_reloc; /* TODO copy slow path code from i915 */ @@ -6575,7 +6582,7 @@ index e99400f3a..396ec97f1 100644 if (bytes_written) hyperv_report_panic_msg(panic_pa, bytes_written); diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig -index ce9429ca6..29ccbd6ac 100644 +index d45aba3e1..6e890131d 100644 --- a/drivers/leds/trigger/Kconfig +++ b/drivers/leds/trigger/Kconfig @@ -64,6 +64,7 @@ config LEDS_TRIGGER_BACKLIGHT @@ -6587,7 +6594,7 @@ index ce9429ca6..29ccbd6ac 100644 This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index 5af25898b..c40e960cd 100644 +index c4938b1a5..f30f1b03b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2218,8 +2218,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) @@ -6620,7 +6627,7 @@ index 5af25898b..c40e960cd 100644 } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h -index 5c05acf20..665fe138a 100644 +index d1780d086..7a5f4eba8 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -635,6 +635,7 @@ struct r5conf { @@ -7106,10 +7113,10 @@ index 71e2ada86..72e2e71aa 100644 /* Find first taken slot. */ for (slot = 0; slot < ATH_BCBUF; slot++) { diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c -index 4353443b8..03e2569da 100644 +index 2d6c77dcc..7daba964f 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c -@@ -1522,7 +1522,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +@@ -1518,7 +1518,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * Prevents hv_pci_onchannelcallback() from running concurrently * in the tasklet. */ @@ -7177,10 +7184,10 @@ index 30afcbbe1..4ae5b8152 100644 } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c -index 5ea426eff..0d6b9acc7 100644 +index 53fa29c80..1b8410181 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c -@@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) +@@ -830,7 +830,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) INIT_LIST_HEAD(&del_list); @@ -7189,7 +7196,7 @@ index 5ea426eff..0d6b9acc7 100644 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; -@@ -864,7 +864,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) +@@ -866,7 +866,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) sel_time = fcf->time; } } @@ -7216,10 +7223,10 @@ index 4261380af..65160eaaa 100644 /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h -index b6dc9003b..d5cf70ead 100644 +index 61b11490a..32c534b87 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h -@@ -153,12 +153,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value) +@@ -152,12 +152,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value) up->dl_write(up, value); } @@ -7276,7 +7283,7 @@ index b6dc9003b..d5cf70ead 100644 return true; } -@@ -167,7 +210,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up) +@@ -166,7 +209,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up) if (!(up->ier & UART_IER_THRI)) return false; up->ier &= ~UART_IER_THRI; @@ -7286,7 +7293,7 @@ index b6dc9003b..d5cf70ead 100644 } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c -index 0a7e9491b..83536b159 100644 +index 43f2eed6d..687119fe2 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -275,10 +275,8 @@ static void serial8250_backup_timeout(struct timer_list *t) @@ -7428,10 +7435,10 @@ index de48a5846..d246f2755 100644 static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index 1f231fcda..0901c5bae 100644 +index 8b49ac485..947737d0e 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -729,7 +729,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) +@@ -730,7 +730,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) serial_out(p, UART_EFR, UART_EFR_ECB); serial_out(p, UART_LCR, 0); } @@ -7440,7 +7447,7 @@ index 1f231fcda..0901c5bae 100644 if (p->capabilities & UART_CAP_EFR) { serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(p, UART_EFR, efr); -@@ -1404,7 +1404,7 @@ static void serial8250_stop_rx(struct uart_port *port) +@@ -1405,7 +1405,7 @@ static void serial8250_stop_rx(struct uart_port *port) up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); up->port.read_status_mask &= ~UART_LSR_DR; @@ -7449,7 +7456,7 @@ index 1f231fcda..0901c5bae 100644 serial8250_rpm_put(up); } -@@ -1434,7 +1434,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p) +@@ -1435,7 +1435,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p) serial8250_clear_and_reinit_fifos(p); p->ier |= UART_IER_RLSI | UART_IER_RDI; @@ -7458,7 +7465,7 @@ index 1f231fcda..0901c5bae 100644 } } EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); -@@ -1676,7 +1676,7 @@ static void serial8250_disable_ms(struct uart_port *port) +@@ -1677,7 +1677,7 @@ static void serial8250_disable_ms(struct uart_port *port) mctrl_gpio_disable_ms(up->gpios); up->ier &= ~UART_IER_MSI; @@ -7467,7 +7474,7 @@ index 1f231fcda..0901c5bae 100644 } static void serial8250_enable_ms(struct uart_port *port) -@@ -1692,7 +1692,7 @@ static void serial8250_enable_ms(struct uart_port *port) +@@ -1693,7 +1693,7 @@ static void serial8250_enable_ms(struct uart_port *port) up->ier |= UART_IER_MSI; serial8250_rpm_get(up); @@ -7476,7 +7483,7 @@ index 1f231fcda..0901c5bae 100644 serial8250_rpm_put(up); } -@@ -2116,14 +2116,7 @@ static void serial8250_put_poll_char(struct uart_port *port, +@@ -2129,14 +2129,7 @@ static void serial8250_put_poll_char(struct uart_port *port, struct uart_8250_port *up = up_to_u8250p(port); serial8250_rpm_get(up); @@ -7492,7 +7499,7 @@ index 1f231fcda..0901c5bae 100644 wait_for_xmitr(up, BOTH_EMPTY); /* -@@ -2136,7 +2129,7 @@ static void serial8250_put_poll_char(struct uart_port *port, +@@ -2149,7 +2142,7 @@ static void serial8250_put_poll_char(struct uart_port *port, * and restore the IER */ wait_for_xmitr(up, BOTH_EMPTY); @@ -7501,7 +7508,7 @@ index 1f231fcda..0901c5bae 100644 serial8250_rpm_put(up); } -@@ -2441,7 +2434,7 @@ void serial8250_do_shutdown(struct uart_port *port) +@@ -2454,7 +2447,7 @@ void serial8250_do_shutdown(struct uart_port *port) */ spin_lock_irqsave(&port->lock, flags); up->ier = 0; @@ -7510,7 +7517,7 @@ index 1f231fcda..0901c5bae 100644 spin_unlock_irqrestore(&port->lock, flags); synchronize_irq(port->irq); -@@ -2797,7 +2790,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, +@@ -2806,7 +2799,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, if (up->capabilities & UART_CAP_RTOIE) up->ier |= UART_IER_RTOIE; @@ -7519,7 +7526,7 @@ index 1f231fcda..0901c5bae 100644 if (up->capabilities & UART_CAP_EFR) { unsigned char efr = 0; -@@ -3262,7 +3255,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); +@@ -3271,7 +3264,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); #ifdef CONFIG_SERIAL_8250_CONSOLE @@ -7528,7 +7535,7 @@ index 1f231fcda..0901c5bae 100644 { struct uart_8250_port *up = up_to_u8250p(port); -@@ -3270,6 +3263,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch) +@@ -3279,6 +3272,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch) serial_port_out(port, UART_TX, ch); } @@ -7547,7 +7554,7 @@ index 1f231fcda..0901c5bae 100644 /* * Restore serial console when h/w power-off detected */ -@@ -3296,6 +3301,32 @@ static void serial8250_console_restore(struct uart_8250_port *up) +@@ -3305,6 +3310,32 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } @@ -7580,7 +7587,7 @@ index 1f231fcda..0901c5bae 100644 /* * Print a string to the serial port trying not to disturb * any possible real use of the port... -@@ -3312,24 +3343,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3321,24 +3352,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, struct uart_port *port = &up->port; unsigned long flags; unsigned int ier; @@ -7607,7 +7614,7 @@ index 1f231fcda..0901c5bae 100644 /* check scratch reg to see if port powered off during system sleep */ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { -@@ -3343,7 +3362,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3352,7 +3371,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, mdelay(port->rs485.delay_rts_before_send); } @@ -7617,7 +7624,7 @@ index 1f231fcda..0901c5bae 100644 /* * Finally, wait for transmitter to become empty -@@ -3356,8 +3377,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3365,8 +3386,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (em485->tx_stopped) up->rs485_stop_tx(up); } @@ -7627,7 +7634,7 @@ index 1f231fcda..0901c5bae 100644 /* * The receive handling will happen properly because the -@@ -3369,8 +3389,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3378,8 +3398,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (up->msr_saved_flags) serial8250_modem_status(up); @@ -7637,7 +7644,7 @@ index 1f231fcda..0901c5bae 100644 } static unsigned int probe_baud(struct uart_port *port) -@@ -3390,6 +3409,7 @@ static unsigned int probe_baud(struct uart_port *port) +@@ -3399,6 +3418,7 @@ static unsigned int probe_baud(struct uart_port *port) int serial8250_console_setup(struct uart_port *port, char *options, bool probe) { @@ -7645,7 +7652,7 @@ index 1f231fcda..0901c5bae 100644 int baud = 9600; int bits = 8; int parity = 'n'; -@@ -3399,6 +3419,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) +@@ -3408,6 +3428,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) if (!port->iobase && !port->membase) return -ENODEV; @@ -7655,10 +7662,10 @@ index 1f231fcda..0901c5bae 100644 uart_parse_options(options, &baud, &parity, &bits, &flow); else if (probe) diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c -index c9876040c..43ade1432 100644 +index 0a9f11fdf..2667652ee 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c -@@ -2280,18 +2280,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) +@@ -2329,18 +2329,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int old_cr = 0, new_cr; @@ -7687,7 +7694,7 @@ index c9876040c..43ade1432 100644 /* * First save the CR then disable the interrupts -@@ -2317,8 +2323,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) +@@ -2366,8 +2372,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) pl011_write(old_cr, uap, REG_CR); if (locked) @@ -7729,7 +7736,7 @@ index 84e815808..342005ed5 100644 static int __init diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c -index c3abcd043..2479ea4c8 100644 +index 89e3e220f..30e549b5e 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -172,10 +172,10 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) @@ -7759,7 +7766,7 @@ index dae9a57d7..9a6a0ec4d 100644 _enter("%p{%pd},%llx", dentry, dentry, vnode->fid.vnode); diff --git a/fs/aio.c b/fs/aio.c -index b2396cd4a..a8e678d29 100644 +index 78aaeaf35..d3229b5ca 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -43,7 +43,6 @@ @@ -7770,7 +7777,7 @@ index b2396cd4a..a8e678d29 100644 #include #include -@@ -1766,7 +1765,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, +@@ -1777,7 +1776,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, list_del_init(&req->wait.entry); list_del(&iocb->ki_list); iocb->ki_res.res = mangle_poll(mask); @@ -7780,7 +7787,7 @@ index b2396cd4a..a8e678d29 100644 INIT_WORK(&req->work, aio_poll_put_work); schedule_work(&req->work); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h -index bcc6848bb..fabbf6cc4 100644 +index 8092c8197..19cc5bdbc 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -17,7 +17,6 @@ @@ -7805,10 +7812,10 @@ index 799be3a5d..d5165a7da 100644 cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); diff --git a/fs/dcache.c b/fs/dcache.c -index f5b78cc80..b2e0d1a07 100644 +index cc5ba31d9..9e9b2cf26 100644 --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2566,9 +2566,10 @@ EXPORT_SYMBOL(d_rehash); +@@ -2596,9 +2596,10 @@ EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { @@ -7821,7 +7828,7 @@ index f5b78cc80..b2e0d1a07 100644 return n; cpu_relax(); } -@@ -2576,26 +2577,30 @@ static inline unsigned start_dir_add(struct inode *dir) +@@ -2606,26 +2607,30 @@ static inline unsigned start_dir_add(struct inode *dir) static inline void end_dir_add(struct inode *dir, unsigned n) { @@ -7864,7 +7871,7 @@ index f5b78cc80..b2e0d1a07 100644 { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); -@@ -2609,7 +2614,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, +@@ -2639,7 +2644,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, retry: rcu_read_lock(); @@ -7873,7 +7880,7 @@ index f5b78cc80..b2e0d1a07 100644 r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { -@@ -2637,7 +2642,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, +@@ -2667,7 +2672,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, } hlist_bl_lock(b); @@ -7882,7 +7889,7 @@ index f5b78cc80..b2e0d1a07 100644 hlist_bl_unlock(b); rcu_read_unlock(); goto retry; -@@ -2710,7 +2715,7 @@ void __d_lookup_done(struct dentry *dentry) +@@ -2740,7 +2745,7 @@ void __d_lookup_done(struct dentry *dentry) hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); @@ -7892,7 +7899,7 @@ index f5b78cc80..b2e0d1a07 100644 hlist_bl_unlock(b); INIT_HLIST_NODE(&dentry->d_u.d_alias); diff --git a/fs/eventfd.c b/fs/eventfd.c -index 4a14295cf..cdaff4ddb 100644 +index 3673eb8de..5ff8cc554 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -25,8 +25,6 @@ @@ -8019,7 +8026,7 @@ index cb2146e02..fb9794dce 100644 return fscache_object_congested(); } diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c -index d5294e663..ee8846818 100644 +index 14e99ffa5..eb899feaf 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -160,7 +160,7 @@ static int fuse_direntplus_link(struct file *file, @@ -8032,7 +8039,7 @@ index d5294e663..ee8846818 100644 if (!o->nodeid) { /* diff --git a/fs/inode.c b/fs/inode.c -index 7436a17a2..45a821a8c 100644 +index bec790bde..8528c6e32 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -158,7 +158,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) @@ -8045,7 +8052,7 @@ index 7436a17a2..45a821a8c 100644 inode->dirtied_when = 0; diff --git a/fs/namei.c b/fs/namei.c -index f08e14d6d..14d27fe95 100644 +index 3588e12d6..7a171441c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1538,7 +1538,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, @@ -8057,7 +8064,7 @@ index f08e14d6d..14d27fe95 100644 /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) -@@ -3035,7 +3035,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file, +@@ -3092,7 +3092,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file, struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; @@ -8067,7 +8074,7 @@ index f08e14d6d..14d27fe95 100644 if (unlikely(IS_DEADDIR(dir_inode))) return ERR_PTR(-ENOENT); diff --git a/fs/namespace.c b/fs/namespace.c -index 6e76f2a72..dbd1119a5 100644 +index 61c88343c..4750c6fc5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -14,6 +14,7 @@ @@ -8137,7 +8144,7 @@ index b27ebdcce..f86c98a7e 100644 status = -EBUSY; spin_lock(&dentry->d_lock); diff --git a/fs/proc/array.c b/fs/proc/array.c -index 18a4588c3..decaa7768 100644 +index d5fed9281..614a21a55 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -384,9 +384,9 @@ static inline void task_context_switch_counts(struct seq_file *m, @@ -8151,20 +8158,20 @@ index 18a4588c3..decaa7768 100644 + cpumask_pr_args(&task->cpus_mask)); } - static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) + #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY diff --git a/fs/proc/base.c b/fs/proc/base.c -index 24c70ff92..6c8156c4c 100644 +index 4e0054a37..92d6818fc 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c -@@ -97,6 +97,7 @@ - #include +@@ -102,6 +102,7 @@ #include #include -+#include #include ++#include + #include #include #include "internal.h" -@@ -2164,7 +2165,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, +@@ -2275,7 +2276,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_hash_and_lookup(dir, &qname); if (!child) { @@ -8174,7 +8181,7 @@ index 24c70ff92..6c8156c4c 100644 if (IS_ERR(child)) goto end_instantiate; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c -index df435cd91..eb19a3429 100644 +index 1b9bc52ba..54f690765 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -684,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file, @@ -8187,7 +8194,7 @@ index df435cd91..eb19a3429 100644 if (IS_ERR(child)) return false; diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c -index ce03c3dbb..5c2c14d5f 100644 +index d59f13b1f..64f3f85d2 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -384,7 +384,8 @@ void pstore_record_init(struct pstore_record *record, @@ -8291,7 +8298,7 @@ index b4d43a4af..ac255e889 100644 #define __preempt_schedule() preempt_schedule() extern asmlinkage void preempt_schedule_notrace(void); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 50b4fd0a0..f38b1bd30 100644 +index f27a0916a..5ee68b9ce 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -166,7 +166,7 @@ struct request { @@ -8385,7 +8392,7 @@ index b540e5a60..dacf87c92 100644 CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h -index 7cdec529b..3292c7ba0 100644 +index 2e50162d9..d39f4c9a7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -210,6 +210,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p, @@ -8476,7 +8483,7 @@ index de029656d..6f262f3d6 100644 * arch_check_user_regs - Architecture specific sanity check for user mode regs * @regs: Pointer to currents pt_regs diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h -index 6cd2a92da..ab602b95d 100644 +index c1bd4883e..4a126ebf9 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -14,6 +14,7 @@ @@ -8487,21 +8494,24 @@ index 6cd2a92da..ab602b95d 100644 /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining -@@ -43,11 +44,9 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask); - int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, +@@ -44,12 +45,11 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w __u64 *cnt); + void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); -DECLARE_PER_CPU(int, eventfd_wake_count); -- + -static inline bool eventfd_signal_count(void) -+static inline bool eventfd_signal_allowed(void) - { +-{ - return this_cpu_read(eventfd_wake_count); +-} ++static inline bool eventfd_signal_allowed(void) ++ { + return !current->in_eventfd_signal; - } ++ } #else /* CONFIG_EVENTFD */ -@@ -84,9 +83,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, + +@@ -85,9 +85,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, return -ENOSYS; } @@ -8512,12 +8522,12 @@ index 6cd2a92da..ab602b95d 100644 + return true; } - #endif + static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) diff --git a/include/linux/fs.h b/include/linux/fs.h -index f6bb20f6f..e7d79fdf4 100644 +index 9d7e901b7..66f16043d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -724,7 +724,7 @@ struct inode { +@@ -730,7 +730,7 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; @@ -8804,7 +8814,7 @@ index 000000000..f9bc6acd3 + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index cc5fe6c62..77be3e318 100644 +index ebfee2b67..5ebb6facc 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -11,217 +11,137 @@ @@ -8878,19 +8888,19 @@ index cc5fe6c62..77be3e318 100644 + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping ++ * ++ * Can only be invoked from preemptible task context because on 32bit ++ * systems with CONFIG_HIGHMEM enabled this function might sleep. * - * However when holding an atomic kmap it is not legal to sleep, so atomic - * kmaps are appropriate for short, tight code paths only. -+ * Can only be invoked from preemptible task context because on 32bit -+ * systems with CONFIG_HIGHMEM enabled this function might sleep. ++ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area ++ * this returns the virtual address of the direct kernel mapping. * - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. -+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area -+ * this returns the virtual address of the direct kernel mapping. -+ * + * The returned virtual address is globally visible and valid up to the + * point where it is unmapped via kunmap(). The pointer can be handed to + * other contexts. @@ -9072,13 +9082,13 @@ index cc5fe6c62..77be3e318 100644 - kunmap_flush_on_unmap(addr); #endif -} -- + -#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) - -#define kmap_flush_unused() do {} while(0) - -#endif /* CONFIG_HIGHMEM */ - +- -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -DECLARE_PER_CPU(int, __kmap_atomic_idx); @@ -9342,7 +9352,7 @@ index ec2a47a81..9448e2bfc 100644 bool irq_work_queue_on(struct irq_work *work, int cpu); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h -index dc1b213ae..9bbcd8cba 100644 +index 8710502b3..db4c4d3f8 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -68,6 +68,7 @@ struct irq_desc { @@ -9395,10 +9405,10 @@ index fef2d43a7..741aa2008 100644 defined(CONFIG_PREEMPT_TRACER) extern void stop_critical_timings(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h -index e1d66cc50..727913a6f 100644 +index 2023afa63..42ce73380 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h -@@ -220,6 +220,7 @@ static __always_inline void might_resched(void) +@@ -223,6 +223,7 @@ static __always_inline void might_resched(void) extern void ___might_sleep(const char *file, int line, int preempt_offset); extern void __might_sleep(const char *file, int line, int preempt_offset); extern void __cant_sleep(const char *file, int line, int preempt_offset); @@ -9406,7 +9416,7 @@ index e1d66cc50..727913a6f 100644 /** * might_sleep - annotation for functions that can sleep -@@ -235,6 +236,10 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -238,6 +239,10 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) @@ -9417,7 +9427,7 @@ index e1d66cc50..727913a6f 100644 /** * cant_sleep - annotation for functions that cannot sleep * -@@ -243,6 +248,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -246,6 +251,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define cant_sleep() \ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) @@ -9436,7 +9446,7 @@ index e1d66cc50..727913a6f 100644 /** * non_block_start - annotate the start of section where sleeping is prohibited * -@@ -266,7 +283,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -269,7 +286,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) @@ -9446,7 +9456,7 @@ index e1d66cc50..727913a6f 100644 # define sched_annotate_sleep() do { } while (0) # define non_block_start() do { } while (0) # define non_block_end() do { } while (0) -@@ -274,13 +293,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -277,13 +296,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) @@ -9718,7 +9728,7 @@ index 3f02b8186..1b8ae0349 100644 + +#endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index d1be389c0..54b482514 100644 +index d1c5946ad..f8099b6c1 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -13,6 +13,7 @@ @@ -9940,7 +9950,7 @@ index 000000000..f0b2e07cd + +#endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h -index 5491ad5f4..cd9e5b3f1 100644 +index 33442fd01..4612bb5be 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1675,7 +1675,7 @@ struct nfs_unlinkdata { @@ -10286,7 +10296,7 @@ index 89d5281e0..0386dd2ab 100644 #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h -index 7d787f91d..9331b131b 100644 +index 9bd33432d..26c70fd11 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -46,6 +46,12 @@ static inline const char *printk_skip_headers(const char *buffer) @@ -10439,7 +10449,7 @@ index 000000000..77a89dd2c + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 095b3b39b..1effcae06 100644 +index 8716a1706..23f5f07f1 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -54,6 +54,11 @@ void __rcu_read_unlock(void); @@ -10463,7 +10473,7 @@ index 095b3b39b..1effcae06 100644 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ -@@ -329,7 +336,8 @@ static inline void rcu_preempt_sleep_check(void) { } +@@ -338,7 +345,8 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ @@ -10877,10 +10887,10 @@ index 4c715be48..9323af8a9 100644 * lock for reading */ diff --git a/include/linux/sched.h b/include/linux/sched.h -index 5e413d309..d3329dc9a 100644 +index fa8301813..e2b535ce9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -36,6 +36,7 @@ +@@ -37,6 +37,7 @@ #include #include #include @@ -10888,7 +10898,7 @@ index 5e413d309..d3329dc9a 100644 /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; -@@ -114,12 +115,8 @@ struct io_uring_task; +@@ -115,12 +116,8 @@ struct io_uring_task; __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ TASK_PARKED) @@ -10901,7 +10911,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP /* -@@ -143,6 +140,9 @@ struct io_uring_task; +@@ -144,6 +141,9 @@ struct io_uring_task; smp_store_mb(current->state, (state_value)); \ } while (0) @@ -10911,7 +10921,7 @@ index 5e413d309..d3329dc9a 100644 #define set_special_state(state_value) \ do { \ unsigned long flags; /* may shadow */ \ -@@ -196,6 +196,9 @@ struct io_uring_task; +@@ -197,6 +197,9 @@ struct io_uring_task; #define set_current_state(state_value) \ smp_store_mb(current->state, (state_value)) @@ -10921,7 +10931,7 @@ index 5e413d309..d3329dc9a 100644 /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must -@@ -675,6 +678,13 @@ struct wake_q_node { +@@ -680,6 +683,13 @@ struct wake_q_node { struct wake_q_node *next; }; @@ -10935,7 +10945,7 @@ index 5e413d309..d3329dc9a 100644 /** * struct task_struct_resvd - KABI extension struct */ -@@ -700,6 +710,8 @@ struct task_struct { +@@ -705,6 +715,8 @@ struct task_struct { #endif /* -1 unrunnable, 0 runnable, >0 stopped: */ volatile long state; @@ -10944,7 +10954,7 @@ index 5e413d309..d3329dc9a 100644 /* * This begins the randomizable portion of task_struct. Only -@@ -772,6 +784,11 @@ struct task_struct { +@@ -777,6 +789,11 @@ struct task_struct { int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t cpus_mask; @@ -10956,7 +10966,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; -@@ -880,6 +897,10 @@ struct task_struct { +@@ -885,6 +902,10 @@ struct task_struct { #ifdef CONFIG_IOMMU_SVA KABI_FILL_HOLE(unsigned pasid_activated:1) #endif @@ -10967,7 +10977,7 @@ index 5e413d309..d3329dc9a 100644 unsigned long atomic_flags; /* Flags requiring atomic access. */ -@@ -1021,11 +1042,16 @@ struct task_struct { +@@ -1026,11 +1047,16 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct __rcu *sighand; @@ -10984,7 +10994,7 @@ index 5e413d309..d3329dc9a 100644 unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; -@@ -1052,6 +1078,7 @@ struct task_struct { +@@ -1057,6 +1083,7 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; @@ -10992,7 +11002,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task: */ -@@ -1079,6 +1106,9 @@ struct task_struct { +@@ -1084,6 +1111,9 @@ struct task_struct { int softirq_context; int irq_config; #endif @@ -11002,7 +11012,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL -@@ -1364,6 +1394,7 @@ struct task_struct { +@@ -1369,6 +1399,7 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif @@ -11010,7 +11020,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif -@@ -1851,6 +1882,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); +@@ -1879,6 +1910,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -11018,7 +11028,7 @@ index 5e413d309..d3329dc9a 100644 extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP -@@ -1952,6 +1984,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) +@@ -1980,6 +2012,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -11108,6 +11118,22 @@ index 5e413d309..d3329dc9a 100644 /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return +@@ -2070,11 +2185,11 @@ static inline int spin_needbreak(spinlock_t *lock) + */ + static inline int rwlock_needbreak(rwlock_t *lock) + { +-#ifdef CONFIG_PREEMPTION +- return rwlock_is_contended(lock); +-#else ++//#ifdef CONFIG_PREEMPTION ++// return rwlock_is_contended(lock); ++//#else + return 0; +-#endif ++//#endif + } + + static __always_inline bool need_resched(void) diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h index 9a62ffdd2..412cdaba3 100644 --- a/include/linux/sched/hotplug.h @@ -11194,7 +11220,7 @@ index 26a2013ac..6e2dff721 100644 #endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h -index 0180b3d06..68a2debc1 100644 +index de6322f91..35589e8c5 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -7,6 +7,7 @@ @@ -11205,7 +11231,7 @@ index 0180b3d06..68a2debc1 100644 #include #include #include -@@ -126,6 +127,8 @@ struct uart_8250_port { +@@ -125,6 +126,8 @@ struct uart_8250_port { #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA unsigned char msr_saved_flags; @@ -11214,7 +11240,7 @@ index 0180b3d06..68a2debc1 100644 struct uart_8250_dma *dma; const struct uart_8250_ops *ops; -@@ -181,6 +184,8 @@ void serial8250_init_port(struct uart_8250_port *up); +@@ -180,6 +183,8 @@ void serial8250_init_port(struct uart_8250_port *up); void serial8250_set_defaults(struct uart_8250_port *up); void serial8250_console_write(struct uart_8250_port *up, const char *s, unsigned int count); @@ -11249,10 +11275,10 @@ index 3038a0610..fff1656c6 100644 /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index d16c8bd08..d7248f71d 100644 +index 95f0a6922..b9f4d556e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -298,6 +298,7 @@ struct sk_buff_head { +@@ -299,6 +299,7 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; @@ -11260,7 +11286,7 @@ index d16c8bd08..d7248f71d 100644 }; struct sk_buff; -@@ -1929,6 +1930,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) +@@ -1935,6 +1936,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } @@ -11274,7 +11300,7 @@ index d16c8bd08..d7248f71d 100644 struct lock_class_key *class) { diff --git a/include/linux/smp.h b/include/linux/smp.h -index 84a0b4828..8348fa412 100644 +index 812c26f61..dcf0b5d50 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -260,6 +260,9 @@ static inline int get_boot_cpu_id(void) @@ -11831,7 +11857,7 @@ index 19f76d87f..7c841bf0a 100644 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h -index 409385b25..3b3c9de82 100644 +index aad99130c..def86e994 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -67,6 +67,8 @@ struct trace_entry { @@ -12138,7 +12164,7 @@ index 2cdeca062..041d6524d 100644 #endif /* _LINUX_KERNEL_VTIME_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h -index 1663e4768..20aae6938 100644 +index ba5ac7f29..c1b63739e 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -10,6 +10,7 @@ @@ -12250,7 +12276,7 @@ index 9144e0f09..464d14b2a 100644 spinlock_t xfrm_policy_lock; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h -index fad29c996..a0a8b6a3f 100644 +index efacdfee3..02871f48c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -10,6 +10,7 @@ @@ -12293,7 +12319,7 @@ index fad29c996..a0a8b6a3f 100644 + * Variant of write_seqcount_t_begin() telling lockdep that a + * trylock was attempted. + */ -+ raw_write_seqcount_t_begin(s); ++ raw_write_seqcount_begin(s); + seqcount_acquire(&s->dep_map, 0, 1, _RET_IP_); + return true; + } @@ -12351,10 +12377,10 @@ index eb5ec1fb6..122d96db9 100644 /* This part must be outside protection */ diff --git a/init/Kconfig b/init/Kconfig -index 31fff350a..7cc9ec6ef 100644 +index e552194ef..ef3222865 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -866,7 +866,7 @@ config NUMA_BALANCING +@@ -899,7 +899,7 @@ config NUMA_BALANCING bool "Memory placement aware NUMA scheduler" depends on ARCH_SUPPORTS_NUMA_BALANCING depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY @@ -12363,7 +12389,7 @@ index 31fff350a..7cc9ec6ef 100644 help This option adds support for automatic NUMA aware memory/task placement. The mechanism is quite primitive and is based on migrating memory when -@@ -1023,6 +1023,7 @@ config CFS_BANDWIDTH +@@ -1066,6 +1066,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED @@ -12371,7 +12397,7 @@ index 31fff350a..7cc9ec6ef 100644 default n help This feature lets you explicitly allocate real CPU bandwidth -@@ -2005,6 +2006,7 @@ choice +@@ -2096,6 +2097,7 @@ choice config SLAB bool "SLAB" @@ -12379,7 +12405,7 @@ index 31fff350a..7cc9ec6ef 100644 select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work -@@ -2025,6 +2027,7 @@ config SLUB +@@ -2116,6 +2118,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" @@ -12387,7 +12413,7 @@ index 31fff350a..7cc9ec6ef 100644 help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but -@@ -2091,7 +2094,7 @@ config SHUFFLE_PAGE_ALLOCATOR +@@ -2182,7 +2185,7 @@ config SHUFFLE_PAGE_ALLOCATOR config SLUB_CPU_PARTIAL default y @@ -12434,11 +12460,11 @@ index e62a62303..b95f8784c 100644 This option turns the kernel into a real-time kernel by replacing various locking primitives (spinlocks, rwlocks, etc.) with diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index 90de01cc6..d3b1a03d8 100644 +index 038efca71..dc4b7da03 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c -@@ -358,7 +358,7 @@ void cpuset_read_unlock(void) - percpu_up_read(&cpuset_rwsem); +@@ -380,7 +380,7 @@ void cpuset_unlock(void) + mutex_unlock(&cpuset_mutex); } -static DEFINE_SPINLOCK(callback_lock); @@ -12446,7 +12472,7 @@ index 90de01cc6..d3b1a03d8 100644 static struct workqueue_struct *cpuset_migrate_mm_wq; -@@ -737,9 +737,9 @@ static int update_prefer_cpumask(struct cpuset *cs, struct cpuset *trialcs, +@@ -759,9 +759,9 @@ static int update_prefer_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (!cpumask_empty(trialcs->prefer_cpus)) dynamic_affinity_enable(); @@ -12458,7 +12484,7 @@ index 90de01cc6..d3b1a03d8 100644 return 0; } -@@ -1399,7 +1399,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, +@@ -1431,7 +1431,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, * Newly added CPUs will be removed from effective_cpus and * newly deleted ones will be added back to effective_cpus. */ @@ -12467,7 +12493,7 @@ index 90de01cc6..d3b1a03d8 100644 if (adding) { cpumask_or(parent->subparts_cpus, parent->subparts_cpus, tmp->addmask); -@@ -1421,7 +1421,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, +@@ -1453,7 +1453,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, if (cpuset->partition_root_state != new_prs) cpuset->partition_root_state = new_prs; @@ -12476,7 +12502,7 @@ index 90de01cc6..d3b1a03d8 100644 return cmd == partcmd_update; } -@@ -1524,7 +1524,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) +@@ -1556,7 +1556,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) continue; rcu_read_unlock(); @@ -12485,7 +12511,7 @@ index 90de01cc6..d3b1a03d8 100644 cpumask_copy(cp->effective_cpus, tmp->new_cpus); if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { -@@ -1558,7 +1558,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) +@@ -1590,7 +1590,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) if (new_prs != cp->partition_root_state) cp->partition_root_state = new_prs; @@ -12494,7 +12520,7 @@ index 90de01cc6..d3b1a03d8 100644 WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); -@@ -1686,7 +1686,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, +@@ -1718,7 +1718,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, return -EINVAL; } @@ -12503,7 +12529,7 @@ index 90de01cc6..d3b1a03d8 100644 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); /* -@@ -1696,7 +1696,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, +@@ -1728,7 +1728,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); } @@ -12512,7 +12538,7 @@ index 90de01cc6..d3b1a03d8 100644 update_cpumasks_hier(cs, &tmp); -@@ -1890,9 +1890,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) +@@ -1922,9 +1922,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) continue; rcu_read_unlock(); @@ -12524,7 +12550,7 @@ index 90de01cc6..d3b1a03d8 100644 WARN_ON(!is_in_v2_mode() && !nodes_equal(cp->mems_allowed, cp->effective_mems)); -@@ -1960,9 +1960,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, +@@ -1992,9 +1992,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) goto done; @@ -12536,7 +12562,7 @@ index 90de01cc6..d3b1a03d8 100644 /* use trialcs->mems_allowed as a temp variable */ update_nodemasks_hier(cs, &trialcs->mems_allowed); -@@ -2053,9 +2053,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, +@@ -2085,9 +2085,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) || (is_spread_page(cs) != is_spread_page(trialcs))); @@ -12548,7 +12574,7 @@ index 90de01cc6..d3b1a03d8 100644 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) rebuild_sched_domains_locked(); -@@ -2141,9 +2141,9 @@ static int update_prstate(struct cpuset *cs, int new_prs) +@@ -2173,9 +2173,9 @@ static int update_prstate(struct cpuset *cs, int new_prs) rebuild_sched_domains_locked(); out: if (!err) { @@ -12560,7 +12586,7 @@ index 90de01cc6..d3b1a03d8 100644 } free_cpumasks(NULL, &tmpmask); -@@ -2575,7 +2575,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) +@@ -2657,7 +2657,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) cpuset_filetype_t type = seq_cft(sf)->private; int ret = 0; @@ -12569,7 +12595,7 @@ index 90de01cc6..d3b1a03d8 100644 switch (type) { case FILE_CPULIST: -@@ -2602,7 +2602,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) +@@ -2684,7 +2684,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) ret = -EINVAL; } @@ -12578,7 +12604,7 @@ index 90de01cc6..d3b1a03d8 100644 return ret; } -@@ -2923,14 +2923,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) +@@ -3005,14 +3005,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cpuset_inc(); @@ -12595,7 +12621,7 @@ index 90de01cc6..d3b1a03d8 100644 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) goto out_unlock; -@@ -2957,7 +2957,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) +@@ -3039,7 +3039,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) } rcu_read_unlock(); @@ -12604,34 +12630,34 @@ index 90de01cc6..d3b1a03d8 100644 cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); -@@ -2965,7 +2965,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) +@@ -3047,7 +3047,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY cpumask_copy(cs->prefer_cpus, parent->prefer_cpus); #endif - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); put_online_cpus(); -@@ -3021,7 +3021,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) +@@ -3103,7 +3103,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) static void cpuset_bind(struct cgroup_subsys_state *root_css) { - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (is_in_v2_mode()) { cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); -@@ -3032,7 +3032,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) +@@ -3114,7 +3114,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) top_cpuset.mems_allowed = top_cpuset.effective_mems; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } -@@ -3144,12 +3144,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, +@@ -3224,12 +3224,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, #endif bool is_empty; @@ -12646,7 +12672,7 @@ index 90de01cc6..d3b1a03d8 100644 /* * Don't call update_tasks_cpumask() if the cpuset becomes empty, -@@ -3193,10 +3193,10 @@ hotplug_update_tasks(struct cpuset *cs, +@@ -3273,10 +3273,10 @@ hotplug_update_tasks(struct cpuset *cs, if (nodes_empty(*new_mems)) *new_mems = parent_cs(cs)->effective_mems; @@ -12659,7 +12685,7 @@ index 90de01cc6..d3b1a03d8 100644 if (cpus_updated) update_tasks_cpumask(cs); -@@ -3263,10 +3263,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) +@@ -3343,10 +3343,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || (parent->partition_root_state == PRS_ERROR))) { if (cs->nr_subparts_cpus) { @@ -12672,7 +12698,7 @@ index 90de01cc6..d3b1a03d8 100644 compute_effective_cpumask(&new_cpus, cs, parent); } -@@ -3280,9 +3280,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) +@@ -3360,9 +3360,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) cpumask_empty(&new_cpus)) { update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp); @@ -12684,7 +12710,7 @@ index 90de01cc6..d3b1a03d8 100644 } cpuset_force_rebuild(); } -@@ -3362,7 +3362,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) +@@ -3442,7 +3442,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) /* synchronize cpus_allowed to cpu_active_mask */ if (cpus_updated) { @@ -12693,7 +12719,7 @@ index 90de01cc6..d3b1a03d8 100644 if (!on_dfl) cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); /* -@@ -3382,17 +3382,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work) +@@ -3462,17 +3462,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work) } } cpumask_copy(top_cpuset.effective_cpus, &new_cpus); @@ -12714,7 +12740,7 @@ index 90de01cc6..d3b1a03d8 100644 update_tasks_nodemask(&top_cpuset); } -@@ -3496,11 +3496,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) +@@ -3576,11 +3576,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) { unsigned long flags; @@ -12728,7 +12754,7 @@ index 90de01cc6..d3b1a03d8 100644 } /** -@@ -3561,11 +3561,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) +@@ -3641,11 +3641,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) nodemask_t mask; unsigned long flags; @@ -12742,7 +12768,7 @@ index 90de01cc6..d3b1a03d8 100644 return mask; } -@@ -3657,14 +3657,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) +@@ -3737,14 +3737,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) return true; /* Not hardwall and node outside mems_allowed: scan up cpusets */ @@ -12784,10 +12810,10 @@ index d2ae14d0b..7b3bea56d 100644 /* if @may_sleep, play nice and yield if necessary */ if (may_sleep && (need_resched() || diff --git a/kernel/cpu.c b/kernel/cpu.c -index 9eedba9ac..17f106ef2 100644 +index 870ac4283..73e8bf31f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c -@@ -1687,7 +1687,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { +@@ -1711,7 +1711,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { .name = "ap:online", }, /* @@ -12796,7 +12822,7 @@ index 9eedba9ac..17f106ef2 100644 * this itself. */ [CPUHP_TEARDOWN_CPU] = { -@@ -1696,6 +1696,13 @@ static struct cpuhp_step cpuhp_hp_states[] = { +@@ -1720,6 +1720,13 @@ static struct cpuhp_step cpuhp_hp_states[] = { .teardown.single = takedown_cpu, .cant_stop = true, }, @@ -12811,10 +12837,10 @@ index 9eedba9ac..17f106ef2 100644 [CPUHP_AP_SMPBOOT_THREADS] = { .name = "smpboot/threads:online", diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c -index 4e09fab52..1f5c577b9 100644 +index c27b3dfa1..b97d05072 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c -@@ -2157,7 +2157,7 @@ static int kdb_dmesg(int argc, const char **argv) +@@ -2155,7 +2155,7 @@ static int kdb_dmesg(int argc, const char **argv) int adjust = 0; int n = 0; int skip = 0; @@ -12823,7 +12849,7 @@ index 4e09fab52..1f5c577b9 100644 size_t len; char buf[201]; -@@ -2182,8 +2182,8 @@ static int kdb_dmesg(int argc, const char **argv) +@@ -2180,8 +2180,8 @@ static int kdb_dmesg(int argc, const char **argv) kdb_set(2, setargs); } @@ -12834,7 +12860,7 @@ index 4e09fab52..1f5c577b9 100644 n++; if (lines < 0) { -@@ -2215,8 +2215,8 @@ static int kdb_dmesg(int argc, const char **argv) +@@ -2213,8 +2213,8 @@ static int kdb_dmesg(int argc, const char **argv) if (skip >= n || skip < 0) return 0; @@ -12907,7 +12933,7 @@ index 26a81ea63..c15ca5450 100644 spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c -index 2547c6a6e..fd07bda90 100644 +index 12db99751..9acac6447 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -42,6 +42,7 @@ @@ -12918,7 +12944,7 @@ index 2547c6a6e..fd07bda90 100644 #include #include #include -@@ -291,7 +292,7 @@ static inline void free_thread_stack(struct task_struct *tsk) +@@ -293,7 +294,7 @@ static inline void free_thread_stack(struct task_struct *tsk) return; } @@ -12927,7 +12953,7 @@ index 2547c6a6e..fd07bda90 100644 return; } #endif -@@ -699,6 +700,19 @@ void __mmdrop(struct mm_struct *mm) +@@ -706,6 +707,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); @@ -12947,7 +12973,7 @@ index 2547c6a6e..fd07bda90 100644 static void mmdrop_async_fn(struct work_struct *work) { struct mm_struct *mm; -@@ -740,6 +754,15 @@ void __put_task_struct(struct task_struct *tsk) +@@ -747,6 +761,15 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); @@ -12963,7 +12989,7 @@ index 2547c6a6e..fd07bda90 100644 io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); -@@ -961,11 +984,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) +@@ -981,11 +1004,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; tsk->pf_io_worker = NULL; @@ -12977,7 +13003,7 @@ index 2547c6a6e..fd07bda90 100644 #ifdef CONFIG_FAULT_INJECTION tsk->fail_nth = 0; #endif -@@ -2102,6 +2127,7 @@ static __latent_entropy struct task_struct *copy_process( +@@ -2134,6 +2159,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -12986,10 +13012,10 @@ index 2547c6a6e..fd07bda90 100644 p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME diff --git a/kernel/futex/core.c b/kernel/futex/core.c -index 8dd0bc50a..8056aa077 100644 +index cde0ca876..909dcd708 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c -@@ -1498,6 +1498,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ +@@ -1508,6 +1508,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ struct task_struct *new_owner; bool postunlock = false; DEFINE_WAKE_Q(wake_q); @@ -12997,7 +13023,7 @@ index 8dd0bc50a..8056aa077 100644 int ret = 0; new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); -@@ -1547,14 +1548,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ +@@ -1557,14 +1558,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ * not fail. */ pi_state_update_owner(pi_state, new_owner); @@ -13015,7 +13041,7 @@ index 8dd0bc50a..8056aa077 100644 return ret; } -@@ -2155,6 +2157,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, +@@ -2165,6 +2167,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, */ requeue_pi_wake_futex(this, &key2, hb2); continue; @@ -13032,7 +13058,7 @@ index 8dd0bc50a..8056aa077 100644 } else if (ret) { /* * rt_mutex_start_proxy_lock() detected a -@@ -2847,7 +2859,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, +@@ -2857,7 +2869,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, goto no_block; } @@ -13041,7 +13067,7 @@ index 8dd0bc50a..8056aa077 100644 /* * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not -@@ -3172,7 +3184,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3182,7 +3194,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, { struct hrtimer_sleeper timeout, *to; struct rt_mutex_waiter rt_waiter; @@ -13050,7 +13076,7 @@ index 8dd0bc50a..8056aa077 100644 union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; -@@ -3193,7 +3205,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3203,7 +3215,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ @@ -13059,7 +13085,7 @@ index 8dd0bc50a..8056aa077 100644 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); if (unlikely(ret != 0)) -@@ -3224,20 +3236,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3234,20 +3246,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); @@ -13126,7 +13152,7 @@ index 8dd0bc50a..8056aa077 100644 /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { -@@ -3246,14 +3293,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3256,14 +3303,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { @@ -13144,7 +13170,7 @@ index 8dd0bc50a..8056aa077 100644 /* * Adjust the return value. It's either -EFAULT or * success (1) but the caller expects 0 for success. -@@ -3272,7 +3320,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, +@@ -3282,7 +3330,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); @@ -13177,7 +13203,7 @@ index 8806444a6..acbce92f9 100644 if (!noirqdebug) note_interrupt(desc, retval); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 239f5084b..bc59cb61f 100644 +index 227787723..c7bf98810 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1302,6 +1302,8 @@ static int irq_thread(void *data) @@ -13454,7 +13480,7 @@ index fbff25adb..d3466e3ba 100644 +} +early_initcall(irq_work_init_threads); diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c -index b9a6f4658..c26219f34 100644 +index ca1a46960..dade489ea 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -984,7 +984,6 @@ void crash_kexec(struct pt_regs *regs) @@ -13466,10 +13492,10 @@ index b9a6f4658..c26219f34 100644 /* diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c -index 35859da8b..dfff31ed6 100644 +index e20c19e3b..777168d58 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c -@@ -138,6 +138,15 @@ KERNEL_ATTR_RO(vmcoreinfo); +@@ -143,6 +143,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_CRASH_CORE */ @@ -13485,7 +13511,7 @@ index 35859da8b..dfff31ed6 100644 /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) -@@ -228,6 +237,9 @@ static struct attribute * kernel_attrs[] = { +@@ -233,6 +242,9 @@ static struct attribute * kernel_attrs[] = { #ifndef CONFIG_TINY_RCU &rcu_expedited_attr.attr, &rcu_normal_attr.attr, @@ -13583,10 +13609,10 @@ index 6d11cfb9b..c7fbf737e 100644 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c -index 6cbd2b444..f2f5defaf 100644 +index 7471d85f5..4fb9e6301 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c -@@ -5413,6 +5413,7 @@ static noinstr void check_flags(unsigned long flags) +@@ -5414,6 +5414,7 @@ static noinstr void check_flags(unsigned long flags) } } @@ -13594,7 +13620,7 @@ index 6cbd2b444..f2f5defaf 100644 /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only -@@ -5427,6 +5428,7 @@ static noinstr void check_flags(unsigned long flags) +@@ -5428,6 +5429,7 @@ static noinstr void check_flags(unsigned long flags) DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } @@ -13997,7 +14023,7 @@ index fc549713b..659e93e25 100644 - debug_rt_mutex_print_deadlock(w); -} diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index a82d1176e..8fb866216 100644 +index 9d24d2263..9ff21acc0 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -8,6 +8,11 @@ @@ -14020,8 +14046,8 @@ index a82d1176e..8fb866216 100644 #include "rtmutex_common.h" -@@ -136,6 +142,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) - WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); +@@ -165,6 +171,13 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock, bool acquire_lock) + } } +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) @@ -14029,11 +14055,12 @@ index a82d1176e..8fb866216 100644 + return waiter && waiter != PI_WAKEUP_INPROGRESS && + waiter != PI_REQUEUE_INPROGRESS; +} ++ + /* * We can speed up the acquire/release, if there's no debugging state to be * set up. -@@ -227,7 +239,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, +@@ -263,7 +276,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, * Only use with rt_mutex_waiter_{less,equal}() */ #define task_to_waiter(p) \ @@ -14042,7 +14069,7 @@ index a82d1176e..8fb866216 100644 static inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, -@@ -275,6 +287,27 @@ static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) +@@ -311,6 +324,27 @@ static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b)); } @@ -14070,7 +14097,7 @@ index a82d1176e..8fb866216 100644 static void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { -@@ -353,6 +386,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, +@@ -389,6 +423,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, return debug_rt_mutex_detect_deadlock(waiter, chwalk); } @@ -14085,7 +14112,7 @@ index a82d1176e..8fb866216 100644 /* * Max number of times we'll walk the boosting chain: */ -@@ -360,7 +401,8 @@ int max_lock_depth = 1024; +@@ -396,7 +438,8 @@ int max_lock_depth = 1024; static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) { @@ -14095,7 +14122,7 @@ index a82d1176e..8fb866216 100644 } /* -@@ -496,7 +538,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -532,7 +575,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * reached or the state of the chain has changed while we * dropped the locks. */ @@ -14104,7 +14131,7 @@ index a82d1176e..8fb866216 100644 goto out_unlock_pi; /* -@@ -579,7 +621,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -615,7 +658,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * walk, we detected a deadlock. */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { @@ -14112,7 +14139,7 @@ index a82d1176e..8fb866216 100644 raw_spin_unlock(&lock->wait_lock); ret = -EDEADLK; goto out_unlock_pi; -@@ -676,13 +717,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -712,13 +754,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * follow here. This is the end of the chain we are walking. */ if (!rt_mutex_owner(lock)) { @@ -14131,7 +14158,7 @@ index a82d1176e..8fb866216 100644 raw_spin_unlock_irq(&lock->wait_lock); return 0; } -@@ -783,9 +827,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, +@@ -819,9 +864,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @task: The task which wants to acquire the lock * @waiter: The waiter that is queued to the lock's wait tree if the * callsite called task_blocked_on_lock(), otherwise NULL @@ -14145,7 +14172,7 @@ index a82d1176e..8fb866216 100644 { lockdep_assert_held(&lock->wait_lock); -@@ -821,12 +867,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +@@ -857,12 +904,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, */ if (waiter) { /* @@ -14161,7 +14188,7 @@ index a82d1176e..8fb866216 100644 /* * We can acquire the lock. Remove the waiter from the * lock waiters tree. -@@ -844,14 +889,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +@@ -880,14 +926,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, */ if (rt_mutex_has_waiters(lock)) { /* @@ -14180,7 +14207,7 @@ index a82d1176e..8fb866216 100644 /* * The current top waiter stays enqueued. We * don't have to change anything in the lock -@@ -898,6 +941,329 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +@@ -934,6 +978,329 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, return 1; } @@ -14317,7 +14344,7 @@ index a82d1176e..8fb866216 100644 + * try_to_take_rt_mutex() sets the waiter bit + * unconditionally. We might have to fix that up: + */ -+ fixup_rt_mutex_waiters(lock); ++ fixup_rt_mutex_waiters(lock, false); + + BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock)); + BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry)); @@ -14510,7 +14537,7 @@ index a82d1176e..8fb866216 100644 /* * Task blocks on lock. * -@@ -930,6 +1296,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, +@@ -966,6 +1333,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, return -EDEADLK; raw_spin_lock(&task->pi_lock); @@ -14533,7 +14560,7 @@ index a82d1176e..8fb866216 100644 waiter->task = task; waiter->lock = lock; waiter->prio = task->prio; -@@ -953,7 +1335,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, +@@ -989,7 +1372,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, rt_mutex_enqueue_pi(owner, waiter); rt_mutex_adjust_prio(owner); @@ -14542,7 +14569,7 @@ index a82d1176e..8fb866216 100644 chain_walk = 1; } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { chain_walk = 1; -@@ -995,6 +1377,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, +@@ -1031,6 +1414,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, * Called with lock->wait_lock held and interrupts disabled. */ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, @@ -14550,7 +14577,7 @@ index a82d1176e..8fb866216 100644 struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; -@@ -1034,7 +1417,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, +@@ -1070,7 +1454,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, * Pairs with preempt_enable() in rt_mutex_postunlock(); */ preempt_disable(); @@ -14562,7 +14589,7 @@ index a82d1176e..8fb866216 100644 raw_spin_unlock(¤t->pi_lock); } -@@ -1049,7 +1435,7 @@ static void remove_waiter(struct rt_mutex *lock, +@@ -1085,7 +1472,7 @@ static void remove_waiter(struct rt_mutex *lock, { bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); @@ -14571,7 +14598,7 @@ index a82d1176e..8fb866216 100644 lockdep_assert_held(&lock->wait_lock); -@@ -1075,7 +1461,8 @@ static void remove_waiter(struct rt_mutex *lock, +@@ -1111,7 +1498,8 @@ static void remove_waiter(struct rt_mutex *lock, rt_mutex_adjust_prio(owner); /* Store the lock on which owner is blocked or NULL */ @@ -14581,7 +14608,7 @@ index a82d1176e..8fb866216 100644 raw_spin_unlock(&owner->pi_lock); -@@ -1111,26 +1498,28 @@ void rt_mutex_adjust_pi(struct task_struct *task) +@@ -1147,26 +1535,28 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; @@ -14613,7 +14640,7 @@ index a82d1176e..8fb866216 100644 } /** -@@ -1146,7 +1535,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) +@@ -1182,7 +1572,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, @@ -14623,7 +14650,7 @@ index a82d1176e..8fb866216 100644 { int ret = 0; -@@ -1155,24 +1545,23 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, +@@ -1191,24 +1582,23 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, if (try_to_take_rt_mutex(lock, current, waiter)) break; @@ -14659,7 +14686,7 @@ index a82d1176e..8fb866216 100644 schedule(); raw_spin_lock_irq(&lock->wait_lock); -@@ -1193,43 +1582,110 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, +@@ -1229,43 +1619,110 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, if (res != -EDEADLOCK || detect_deadlock) return; @@ -14699,15 +14726,15 @@ index a82d1176e..8fb866216 100644 + * Not quite done after calling ww_acquire_done() ? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); - -- rt_mutex_init_waiter(&waiter); ++ + if (ww_ctx->contending_lock) { + /* + * After -EDEADLK you tried to + * acquire a different ww_mutex? Bad! + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); -+ + +- rt_mutex_init_waiter(&waiter); + /* + * You called ww_mutex_lock after receiving -EDEADLK, + * but 'forgot' to unlock everything else first? @@ -14793,7 +14820,7 @@ index a82d1176e..8fb866216 100644 return 0; } -@@ -1239,16 +1695,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, +@@ -1275,16 +1732,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, if (unlikely(timeout)) hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); @@ -14825,10 +14852,10 @@ index a82d1176e..8fb866216 100644 } /* -@@ -1256,6 +1722,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, +@@ -1292,7 +1759,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, * unconditionally. We might have to fix that up. */ - fixup_rt_mutex_waiters(lock); + fixup_rt_mutex_waiters(lock, true); + return ret; +} + @@ -14846,7 +14873,7 @@ index a82d1176e..8fb866216 100644 + int ret = 0; + + rt_mutex_init_waiter(&waiter, false); -+ + + /* + * Technically we could use raw_spin_[un]lock_irq() here, but this can + * be called in early boot if the cmpxchg() fast path is disabled @@ -14859,10 +14886,10 @@ index a82d1176e..8fb866216 100644 + + ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx, + &waiter); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -@@ -1316,7 +1812,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) + /* Remove pending timer: */ +@@ -1352,7 +1848,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) * Return whether the current task needs to call rt_mutex_postunlock(). */ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, @@ -14872,7 +14899,7 @@ index a82d1176e..8fb866216 100644 { unsigned long flags; -@@ -1370,7 +1867,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, +@@ -1406,7 +1903,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * * Queue the next waiter for wakeup once we release the wait_lock. */ @@ -14881,7 +14908,7 @@ index a82d1176e..8fb866216 100644 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return true; /* call rt_mutex_postunlock() */ -@@ -1384,29 +1881,16 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, +@@ -1420,29 +1917,16 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, @@ -14915,7 +14942,7 @@ index a82d1176e..8fb866216 100644 } static inline int -@@ -1422,9 +1906,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, +@@ -1458,9 +1942,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, /* * Performs the wakeup of the top-waiter and re-enables preemption. */ @@ -14928,7 +14955,7 @@ index a82d1176e..8fb866216 100644 /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ preempt_enable(); -@@ -1433,23 +1919,46 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) +@@ -1469,23 +1955,46 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) static inline void rt_mutex_fastunlock(struct rt_mutex *lock, bool (*slowfn)(struct rt_mutex *lock, @@ -14980,7 +15007,7 @@ index a82d1176e..8fb866216 100644 } #ifdef CONFIG_DEBUG_LOCK_ALLOC -@@ -1490,16 +1999,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); +@@ -1526,16 +2035,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { @@ -14998,7 +15025,7 @@ index a82d1176e..8fb866216 100644 } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); -@@ -1516,36 +2016,17 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) +@@ -1552,36 +2052,17 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) return __rt_mutex_slowtrylock(lock); } @@ -15043,7 +15070,7 @@ index a82d1176e..8fb866216 100644 /** * rt_mutex_trylock - try to lock a rt_mutex -@@ -1562,10 +2043,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) +@@ -1598,10 +2079,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) { int ret; @@ -15055,7 +15082,7 @@ index a82d1176e..8fb866216 100644 if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); -@@ -1573,6 +2051,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) +@@ -1609,6 +2087,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) } EXPORT_SYMBOL_GPL(rt_mutex_trylock); @@ -15067,7 +15094,7 @@ index a82d1176e..8fb866216 100644 /** * rt_mutex_unlock - unlock a rt_mutex * -@@ -1581,16 +2064,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); +@@ -1617,16 +2100,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); void __sched rt_mutex_unlock(struct rt_mutex *lock) { mutex_release(&lock->dep_map, _RET_IP_); @@ -15088,7 +15115,7 @@ index a82d1176e..8fb866216 100644 { lockdep_assert_held(&lock->wait_lock); -@@ -1607,23 +2087,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, +@@ -1643,23 +2123,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, * avoid inversion prior to the wakeup. preempt_disable() * therein pairs with rt_mutex_postunlock(). */ @@ -15127,7 +15154,7 @@ index a82d1176e..8fb866216 100644 } /** -@@ -1637,9 +2129,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) +@@ -1673,9 +2165,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) void rt_mutex_destroy(struct rt_mutex *lock) { WARN_ON(rt_mutex_is_locked(lock)); @@ -15137,7 +15164,7 @@ index a82d1176e..8fb866216 100644 } EXPORT_SYMBOL_GPL(rt_mutex_destroy); -@@ -1662,7 +2151,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, +@@ -1698,7 +2187,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, if (name && key) debug_rt_mutex_init(lock, name, key); } @@ -15146,7 +15173,7 @@ index a82d1176e..8fb866216 100644 /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a -@@ -1682,6 +2171,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, +@@ -1718,6 +2207,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL, NULL); @@ -15161,8 +15188,8 @@ index a82d1176e..8fb866216 100644 debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } -@@ -1704,6 +2201,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) - rt_mutex_set_owner(lock, NULL); +@@ -1740,6 +2237,27 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) + rt_mutex_clear_owner(lock); } +static void fixup_rt_mutex_blocked(struct rt_mutex *lock) @@ -15184,11 +15211,12 @@ index a82d1176e..8fb866216 100644 + tsk->pi_blocked_on = NULL; + raw_spin_unlock(&tsk->pi_lock); +} ++ + /** * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task * @lock: the rt_mutex to take -@@ -1734,6 +2251,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, +@@ -1770,6 +2288,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, if (try_to_take_rt_mutex(lock, task, NULL)) return 1; @@ -15223,7 +15251,7 @@ index a82d1176e..8fb866216 100644 /* We enforce deadlock detection for futexes */ ret = task_blocks_on_rt_mutex(lock, waiter, task, RT_MUTEX_FULL_CHAINWALK); -@@ -1748,7 +2293,8 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, +@@ -1784,7 +2330,8 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } @@ -15233,7 +15261,7 @@ index a82d1176e..8fb866216 100644 return ret; } -@@ -1833,12 +2379,15 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, +@@ -1869,12 +2416,15 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, raw_spin_lock_irq(&lock->wait_lock); /* sleep on the mutex */ set_current_state(TASK_INTERRUPTIBLE); @@ -15243,14 +15271,14 @@ index a82d1176e..8fb866216 100644 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. */ - fixup_rt_mutex_waiters(lock); -+ if (ret) + fixup_rt_mutex_waiters(lock, true); ++ if (ret) + fixup_rt_mutex_blocked(lock); + raw_spin_unlock_irq(&lock->wait_lock); return ret; -@@ -1900,3 +2449,97 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, +@@ -1936,3 +2486,97 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, return cleanup; } @@ -16105,7 +16133,7 @@ index 000000000..b61edc4dc + __up_write_unlock(sem, WRITER_BIAS - 1, flags); +} diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c -index 976b20b2d..51e1085e4 100644 +index cc5cc889b..f7c909ef1 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -28,6 +28,7 @@ @@ -16116,7 +16144,7 @@ index 976b20b2d..51e1085e4 100644 #include "lock_events.h" /* -@@ -1512,6 +1513,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) +@@ -1494,6 +1495,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) if (tmp & RWSEM_FLAG_WAITERS) rwsem_downgrade_wake(sem); } @@ -16124,7 +16152,7 @@ index 976b20b2d..51e1085e4 100644 /* * lock for reading -@@ -1675,7 +1677,9 @@ void down_read_non_owner(struct rw_semaphore *sem) +@@ -1657,7 +1659,9 @@ void down_read_non_owner(struct rw_semaphore *sem) { might_sleep(); __down_read(sem); @@ -16134,7 +16162,7 @@ index 976b20b2d..51e1085e4 100644 } EXPORT_SYMBOL(down_read_non_owner); -@@ -1704,7 +1708,9 @@ EXPORT_SYMBOL(down_write_killable_nested); +@@ -1686,7 +1690,9 @@ EXPORT_SYMBOL(down_write_killable_nested); void up_read_non_owner(struct rw_semaphore *sem) { @@ -16384,7 +16412,7 @@ index b1c155328..059c3d876 100644 static inline bool printk_percpu_data_ready(void) { return false; } #endif /* CONFIG_PRINTK */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index ecd28d4fa..e95b00f24 100644 +index ffd7f90b8..f068738c7 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -44,6 +44,9 @@ @@ -16405,7 +16433,7 @@ index ecd28d4fa..e95b00f24 100644 int console_printk[4] = { CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ -@@ -227,19 +229,7 @@ static int nr_ext_console_drivers; +@@ -233,19 +235,7 @@ static int nr_ext_console_drivers; static int __down_trylock_console_sem(unsigned long ip) { @@ -16426,7 +16454,7 @@ index ecd28d4fa..e95b00f24 100644 return 1; mutex_acquire(&console_lock_dep_map, 0, 1, ip); return 0; -@@ -248,13 +238,9 @@ static int __down_trylock_console_sem(unsigned long ip) +@@ -254,13 +244,9 @@ static int __down_trylock_console_sem(unsigned long ip) static void __up_console_sem(unsigned long ip) { @@ -16440,7 +16468,7 @@ index ecd28d4fa..e95b00f24 100644 } #define up_console_sem() __up_console_sem(_RET_IP_) -@@ -268,11 +254,6 @@ static void __up_console_sem(unsigned long ip) +@@ -279,11 +265,6 @@ static bool panic_in_progress(void) */ static int console_locked, console_suspended; @@ -16452,7 +16480,7 @@ index ecd28d4fa..e95b00f24 100644 /* * Array of consoles built from command line options (console=) */ -@@ -357,61 +338,43 @@ enum log_flags { +@@ -368,61 +349,43 @@ enum log_flags { LOG_CONT = 8, /* text is a fragment of a continuation line */ }; @@ -16536,7 +16564,7 @@ index ecd28d4fa..e95b00f24 100644 #define LOG_LINE_MAX (1024 - PREFIX_MAX) #define LOG_LEVEL(v) ((v) & 0x07) -@@ -449,11 +412,36 @@ static struct printk_ringbuffer *prb = &printk_rb_static; +@@ -460,11 +423,36 @@ static struct printk_ringbuffer *prb = &printk_rb_static; */ static bool __printk_percpu_data_ready __read_mostly; @@ -16574,7 +16602,7 @@ index ecd28d4fa..e95b00f24 100644 /* Return log buffer address */ char *log_buf_addr_get(void) { -@@ -495,52 +483,6 @@ static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) +@@ -506,52 +494,6 @@ static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) *trunc_msg_len = 0; } @@ -16627,7 +16655,7 @@ index ecd28d4fa..e95b00f24 100644 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); static int syslog_action_restricted(int type) -@@ -669,7 +611,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, +@@ -680,7 +622,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, /* /dev/kmsg - userspace message inject/listen interface */ struct devkmsg_user { @@ -16636,7 +16664,7 @@ index ecd28d4fa..e95b00f24 100644 struct ratelimit_state rs; struct mutex lock; char buf[CONSOLE_EXT_LOG_MAX]; -@@ -770,27 +712,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, +@@ -781,27 +723,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, if (ret) return ret; @@ -16668,7 +16696,7 @@ index ecd28d4fa..e95b00f24 100644 goto out; } -@@ -799,8 +736,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, +@@ -810,8 +747,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, &r->text_buf[0], r->info->text_len, &r->info->dev_info); @@ -16678,7 +16706,7 @@ index ecd28d4fa..e95b00f24 100644 if (len > count) { ret = -EINVAL; -@@ -835,11 +771,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) +@@ -846,11 +782,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) if (offset) return -ESPIPE; @@ -16691,7 +16719,7 @@ index ecd28d4fa..e95b00f24 100644 break; case SEEK_DATA: /* -@@ -847,16 +782,15 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) +@@ -858,16 +793,15 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) * like issued by 'dmesg -c'. Reading /dev/kmsg itself * changes no global state, and does not clear anything. */ @@ -16710,7 +16738,7 @@ index ecd28d4fa..e95b00f24 100644 return ret; } -@@ -871,15 +805,13 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) +@@ -882,15 +816,13 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) poll_wait(file, &log_wait, wait); @@ -16728,7 +16756,7 @@ index ecd28d4fa..e95b00f24 100644 return ret; } -@@ -912,9 +844,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) +@@ -923,9 +855,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) prb_rec_init_rd(&user->record, &user->info, &user->text_buf[0], sizeof(user->text_buf)); @@ -16739,7 +16767,7 @@ index ecd28d4fa..e95b00f24 100644 file->private_data = user; return 0; -@@ -1006,6 +936,9 @@ void log_buf_vmcoreinfo_setup(void) +@@ -1017,6 +947,9 @@ void log_buf_vmcoreinfo_setup(void) VMCOREINFO_SIZE(atomic_long_t); VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter); @@ -16749,7 +16777,7 @@ index ecd28d4fa..e95b00f24 100644 } #endif -@@ -1077,9 +1010,6 @@ static inline void log_buf_add_cpu(void) {} +@@ -1088,9 +1021,6 @@ static inline void log_buf_add_cpu(void) {} static void __init set_percpu_data_ready(void) { @@ -16759,7 +16787,7 @@ index ecd28d4fa..e95b00f24 100644 __printk_percpu_data_ready = true; } -@@ -1119,7 +1049,6 @@ void __init setup_log_buf(int early) +@@ -1130,7 +1060,6 @@ void __init setup_log_buf(int early) struct printk_record r; size_t new_descs_size; size_t new_infos_size; @@ -16767,7 +16795,7 @@ index ecd28d4fa..e95b00f24 100644 char *new_log_buf; unsigned int free; u64 seq; -@@ -1177,8 +1106,6 @@ void __init setup_log_buf(int early) +@@ -1188,8 +1117,6 @@ void __init setup_log_buf(int early) new_descs, ilog2(new_descs_count), new_infos); @@ -16776,7 +16804,7 @@ index ecd28d4fa..e95b00f24 100644 log_buf_len = new_log_buf_len; log_buf = new_log_buf; new_log_buf_len = 0; -@@ -1194,8 +1121,6 @@ void __init setup_log_buf(int early) +@@ -1205,8 +1132,6 @@ void __init setup_log_buf(int early) */ prb = &printk_rb_dynamic; @@ -16785,7 +16813,7 @@ index ecd28d4fa..e95b00f24 100644 if (seq != prb_next_seq(&printk_rb_static)) { pr_err("dropped %llu messages\n", prb_next_seq(&printk_rb_static) - seq); -@@ -1472,6 +1397,50 @@ static size_t get_record_print_text_size(struct printk_info *info, +@@ -1483,6 +1408,50 @@ static size_t get_record_print_text_size(struct printk_info *info, return ((prefix_len * line_count) + info->text_len + 1); } @@ -16836,7 +16864,7 @@ index ecd28d4fa..e95b00f24 100644 static int syslog_print(char __user *buf, int size) { struct printk_info info; -@@ -1479,19 +1448,19 @@ static int syslog_print(char __user *buf, int size) +@@ -1490,19 +1459,19 @@ static int syslog_print(char __user *buf, int size) char *text; int len = 0; @@ -16860,7 +16888,7 @@ index ecd28d4fa..e95b00f24 100644 break; } if (r.info->seq != syslog_seq) { -@@ -1520,7 +1489,7 @@ static int syslog_print(char __user *buf, int size) +@@ -1531,7 +1500,7 @@ static int syslog_print(char __user *buf, int size) syslog_partial += n; } else n = 0; @@ -16869,7 +16897,7 @@ index ecd28d4fa..e95b00f24 100644 if (!n) break; -@@ -1543,34 +1512,25 @@ static int syslog_print(char __user *buf, int size) +@@ -1554,34 +1523,25 @@ static int syslog_print(char __user *buf, int size) static int syslog_print_all(char __user *buf, int size, bool clear) { struct printk_info info; @@ -16908,7 +16936,7 @@ index ecd28d4fa..e95b00f24 100644 len = 0; prb_for_each_record(seq, prb, seq, &r) { -@@ -1583,20 +1543,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear) +@@ -1594,20 +1554,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear) break; } @@ -16934,7 +16962,7 @@ index ecd28d4fa..e95b00f24 100644 kfree(text); return len; -@@ -1604,9 +1564,21 @@ static int syslog_print_all(char __user *buf, int size, bool clear) +@@ -1615,9 +1575,21 @@ static int syslog_print_all(char __user *buf, int size, bool clear) static void syslog_clear(void) { @@ -16959,7 +16987,7 @@ index ecd28d4fa..e95b00f24 100644 } int do_syslog(int type, char __user *buf, int len, int source) -@@ -1632,8 +1604,9 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1643,8 +1615,9 @@ int do_syslog(int type, char __user *buf, int len, int source) return 0; if (!access_ok(buf, len)) return -EFAULT; @@ -16970,7 +16998,7 @@ index ecd28d4fa..e95b00f24 100644 if (error) return error; error = syslog_print(buf, len); -@@ -1681,10 +1654,10 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1692,10 +1665,10 @@ int do_syslog(int type, char __user *buf, int len, int source) break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: @@ -16983,7 +17011,7 @@ index ecd28d4fa..e95b00f24 100644 return 0; } if (info.seq != syslog_seq) { -@@ -1712,7 +1685,7 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1723,7 +1696,7 @@ int do_syslog(int type, char __user *buf, int len, int source) } error -= syslog_partial; } @@ -16992,7 +17020,7 @@ index ecd28d4fa..e95b00f24 100644 break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: -@@ -1731,221 +1704,191 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) +@@ -1742,231 +1715,186 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) return do_syslog(type, buf, len, SYSLOG_FROM_READER); } @@ -17067,7 +17095,7 @@ index ecd28d4fa..e95b00f24 100644 + return false; - sema_init(&console_sem, 1); -+ return true; ++ return true; } -#endif @@ -17121,36 +17149,43 @@ index ecd28d4fa..e95b00f24 100644 + struct printk_info info; + struct printk_record r; + size_t text_len; ++ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf)); - raw_spin_lock(&console_owner_lock); - waiter = READ_ONCE(console_waiter); - console_owner = NULL; - raw_spin_unlock(&console_owner_lock); -+ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf)); ++ if (!prb_read_valid(prb, *seq, &r)) ++ return false; - if (!waiter) { - spin_release(&console_owner_dep_map, _THIS_IP_); - return 0; - } -+ if (!prb_read_valid(prb, *seq, &r)) -+ return false; ++ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); - /* The waiter is now free to continue */ - WRITE_ONCE(console_waiter, false); -+ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); - -- spin_release(&console_owner_dep_map, _THIS_IP_); + if (!call_sync_console_driver(con, &con->sync_buf[0], text_len)) + return false; +- spin_release(&console_owner_dep_map, _THIS_IP_); ++ *seq = r.info->seq; ++ touch_softlockup_watchdog_sync(); ++ clocksource_touch_watchdog(); ++ rcu_cpu_stall_reset(); ++ touch_nmi_watchdog(); + - /* - * Hand off console_lock to waiter. The waiter will perform - * the up(). After this, the waiter is the console_lock owner. - */ - mutex_release(&console_lock_dep_map, _THIS_IP_); - return 1; --} -+ *seq = r.info->seq; ++ if (text_len) ++ printk_delay(r.info->level); ++ return true; + } -/** - * console_trylock_spinning - try to get console_lock by busy waiting @@ -17163,25 +17198,48 @@ index ecd28d4fa..e95b00f24 100644 - * Return: 1 if we got the lock, 0 othrewise - */ -static int console_trylock_spinning(void) --{ ++static void print_sync_until(struct console *con, u64 seq) + { - struct task_struct *owner = NULL; - bool waiter; - bool spin = false; - unsigned long flags; -+ touch_softlockup_watchdog_sync(); -+ clocksource_touch_watchdog(); -+ rcu_cpu_stall_reset(); -+ touch_nmi_watchdog(); - +- - if (console_trylock()) - return 1; -+ if (text_len) -+ printk_delay(r.info->level); ++ unsigned int flags; ++ u64 printk_seq; -- printk_safe_enter_irqsave(flags); -+ return true; +- /* +- * It's unsafe to spin once a panic has begun. If we are the +- * panic CPU, we may have already halted the owner of the +- * console_sem. If we are not the panic CPU, then we should +- * avoid taking console_sem, so the panic CPU has a better +- * chance of cleanly acquiring it later. +- */ +- if (panic_in_progress()) +- return 0; ++ console_atomic_lock(&flags); ++ for (;;) { ++ printk_seq = atomic64_read(&con->printk_seq); ++ if (printk_seq >= seq) ++ break; ++ if (!print_sync(con, &printk_seq)) ++ break; ++ atomic64_set(&con->printk_seq, printk_seq + 1); ++ } ++ console_atomic_unlock(flags); +} +- printk_safe_enter_irqsave(flags); ++#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) ++void zap_locks(void) ++{ ++// if (raw_spin_is_locked(&logbuf_lock)) { ++// debug_locks_off(); ++// raw_spin_lock_init(&logbuf_lock); ++// } + - raw_spin_lock(&console_owner_lock); - owner = READ_ONCE(console_owner); - waiter = READ_ONCE(console_waiter); @@ -17190,10 +17248,11 @@ index ecd28d4fa..e95b00f24 100644 - spin = true; - } - raw_spin_unlock(&console_owner_lock); -+static void print_sync_until(struct console *con, u64 seq) -+{ -+ unsigned int flags; -+ u64 printk_seq; ++// if (raw_spin_is_locked(&console_owner_lock)) { ++// raw_spin_lock_init(&console_owner_lock); ++// } ++// console_owner = NULL; ++// console_waiter = false; - /* - * If there is an active printk() writing to the @@ -17207,17 +17266,10 @@ index ecd28d4fa..e95b00f24 100644 - if (!spin) { - printk_safe_exit_irqrestore(flags); - return 0; -+ console_atomic_lock(&flags); -+ for (;;) { -+ printk_seq = atomic64_read(&con->printk_seq); -+ if (printk_seq >= seq) -+ break; -+ if (!print_sync(con, &printk_seq)) -+ break; -+ atomic64_set(&con->printk_seq, printk_seq + 1); - } -+ console_atomic_unlock(flags); -+} +- } ++// sema_init(&console_sem, 1); ++ } ++#endif - /* We spin waiting for the owner to release us */ - spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); @@ -17225,13 +17277,11 @@ index ecd28d4fa..e95b00f24 100644 - while (READ_ONCE(console_waiter)) - cpu_relax(); - spin_release(&console_owner_dep_map, _THIS_IP_); -+#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) -+void zap_locks(void) -+{ -+// if (raw_spin_is_locked(&logbuf_lock)) { -+// debug_locks_off(); -+// raw_spin_lock_init(&logbuf_lock); -+// } ++#ifdef CONFIG_PRINTK_NMI ++#define NUM_RECURSION_CTX 2 ++#else ++#define NUM_RECURSION_CTX 1 ++#endif - printk_safe_exit_irqrestore(flags); - /* @@ -17241,17 +17291,14 @@ index ecd28d4fa..e95b00f24 100644 - * complain. - */ - mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); -+// if (raw_spin_is_locked(&console_owner_lock)) { -+// raw_spin_lock_init(&console_owner_lock); -+// } ++struct printk_recursion { ++ char count[NUM_RECURSION_CTX]; ++}; - return 1; -+// console_owner = NULL; -+// console_waiter = false; -+ -+// sema_init(&console_sem, 1); - } -+#endif +-} ++static DEFINE_PER_CPU(struct printk_recursion, percpu_printk_recursion); ++static char printk_recursion_count[NUM_RECURSION_CTX]; -/* - * Call the console drivers, asking them to write out @@ -17260,25 +17307,24 @@ index ecd28d4fa..e95b00f24 100644 - */ -static void call_console_drivers(const char *ext_text, size_t ext_len, - const char *text, size_t len) --{ ++static char *printk_recursion_counter(void) + { - static char dropped_text[64]; - size_t dropped_len = 0; - struct console *con; -+#ifdef CONFIG_PRINTK_NMI -+#define NUM_RECURSION_CTX 2 -+#else -+#define NUM_RECURSION_CTX 1 -+#endif ++ struct printk_recursion *rec; ++ char *count; - trace_console_rcuidle(text, len); -+struct printk_recursion { -+ char count[NUM_RECURSION_CTX]; -+}; ++ if (!printk_percpu_data_ready()) { ++ count = &printk_recursion_count[0]; ++ } else { ++ rec = this_cpu_ptr(&percpu_printk_recursion); - if (!console_drivers) - return; -+static DEFINE_PER_CPU(struct printk_recursion, percpu_printk_recursion); -+static char printk_recursion_count[NUM_RECURSION_CTX]; ++ count = &rec->count[0]; ++ } - if (console_dropped) { - dropped_len = snprintf(dropped_text, sizeof(dropped_text), @@ -17286,10 +17332,10 @@ index ecd28d4fa..e95b00f24 100644 - console_dropped); - console_dropped = 0; - } -+static char *printk_recursion_counter(void) -+{ -+ struct printk_recursion *rec; -+ char *count; ++#ifdef CONFIG_PRINTK_NMI ++ if (in_nmi()) ++ count++; ++#endif - for_each_console(con) { - if (exclusive_console && con != exclusive_console) @@ -17308,58 +17354,45 @@ index ecd28d4fa..e95b00f24 100644 - con->write(con, dropped_text, dropped_len); - con->write(con, text, len); - } -+ if (!printk_percpu_data_ready()) { -+ count = &printk_recursion_count[0]; -+ } else { -+ rec = this_cpu_ptr(&percpu_printk_recursion); -+ -+ count = &rec->count[0]; - } --} - --int printk_delay_msec __read_mostly; -+#ifdef CONFIG_PRINTK_NMI -+ if (in_nmi()) -+ count++; -+#endif -+ +- } + return count; -+} + } --static inline void printk_delay(void) +-int printk_delay_msec __read_mostly; +static bool printk_enter_irqsave(unsigned long *flags) - { -- if (unlikely(printk_delay_msec)) { -- int m = printk_delay_msec; ++{ + char *count; -- while (m--) { -- mdelay(1); -- touch_nmi_watchdog(); -- } +-static inline void printk_delay(void) + local_irq_save(*flags); + count = printk_recursion_counter(); + /* Only 1 level of recursion allowed. */ + if (*count > 1) { + local_irq_restore(*flags); + return false; - } ++ } + (*count)++; + + return true; +} -+ +static void printk_exit_irqrestore(unsigned long flags) -+{ + { +- if (unlikely(printk_delay_msec)) { +- int m = printk_delay_msec; + char *count; -+ + +- while (m--) { +- mdelay(1); +- touch_nmi_watchdog(); +- } +- } + count = printk_recursion_counter(); + (*count)--; + local_irq_restore(flags); } static inline u32 printk_caller_id(void) -@@ -1954,144 +1897,248 @@ static inline u32 printk_caller_id(void) +@@ -1975,93 +1903,214 @@ static inline u32 printk_caller_id(void) 0x80000000 + raw_smp_processor_id(); } @@ -17408,6 +17441,7 @@ index ecd28d4fa..e95b00f24 100644 - prb_commit(&e); - } - return text_len; +- } + switch (kern_level) { + case '0' ... '7': + if (level && *level == LOGLEVEL_DEFAULT) @@ -17416,7 +17450,7 @@ index ecd28d4fa..e95b00f24 100644 + case 'c': /* KERN_CONT */ + if (lflags) + *lflags |= LOG_CONT; - } ++ } + + prefix_len += 2; + text += 2; @@ -17425,7 +17459,7 @@ index ecd28d4fa..e95b00f24 100644 - /* Store it in the record log */ - return log_store(caller_id, facility, level, lflags, 0, - dev_info, text, text_len); -+ return prefix_len; ++return prefix_len; } -/* Must be called under logbuf_lock. */ @@ -17460,8 +17494,8 @@ index ecd28d4fa..e95b00f24 100644 - /* strip kernel syslog prefix and extract log level or control flags */ + /* Strip log level and control flags. */ if (facility == 0) { -- int kern_level; -- + int kern_level; + - while ((kern_level = printk_get_level(text)) != 0) { - switch (kern_level) { - case '0' ... '7': @@ -17493,11 +17527,17 @@ index ecd28d4fa..e95b00f24 100644 } -asmlinkage int vprintk_emit(int facility, int level, +- const struct dev_printk_info *dev_info, +- const char *fmt, va_list args) +__printf(4, 0) +static int vprintk_store(int facility, int level, + const struct dev_printk_info *dev_info, + const char *fmt, va_list args) -+{ + { +- int printed_len; +- bool in_sched = false; +- unsigned long flags; +- + const u32 caller_id = printk_caller_id(); + struct prb_reserved_entry e; + enum log_flags lflags = 0; @@ -17623,26 +17663,27 @@ index ecd28d4fa..e95b00f24 100644 +} + +asmlinkage int vprintk_emit(int facility, int level, - const struct dev_printk_info *dev_info, - const char *fmt, va_list args) - { - int printed_len; -- bool in_sched = false; -- unsigned long flags; - ++ const struct dev_printk_info *dev_info, ++ const char *fmt, va_list args) ++{ ++ int printed_len; ++ /* Suppress unimportant messages after panic happens */ if (unlikely(suppress_printk)) return 0; +@@ -2070,53 +2119,37 @@ asmlinkage int vprintk_emit(int facility, int level, + atomic_read(&panic_cpu) != raw_smp_processor_id()) + return 0; - if (level == LOGLEVEL_SCHED) { -+ if (level == LOGLEVEL_SCHED) ++ if (level == LOGLEVEL_SCHED) level = LOGLEVEL_DEFAULT; - in_sched = true; - } -- + - boot_delay_msec(level); - printk_delay(); - +- - /* This stops the holder of console_sem just where we want him */ - logbuf_lock_irqsave(flags); printed_len = vprintk_store(facility, level, dev_info, fmt, args); @@ -17677,14 +17718,10 @@ index ecd28d4fa..e95b00f24 100644 { - return vprintk_func(fmt, args); + return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); - } --EXPORT_SYMBOL(vprintk); - --int vprintk_default(const char *fmt, va_list args) ++} +__printf(1, 0) +static int vprintk_func(const char *fmt, va_list args) - { -- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); ++{ +#ifdef CONFIG_KGDB_KDB + /* Allow to pass printk() to kdb but avoid a recursion. */ + if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) @@ -17692,17 +17729,20 @@ index ecd28d4fa..e95b00f24 100644 +#endif + return vprintk_default(fmt, args); } --EXPORT_SYMBOL_GPL(vprintk_default); -+ +-EXPORT_SYMBOL(vprintk); + +-int vprintk_default(const char *fmt, va_list args) +asmlinkage int vprintk(const char *fmt, va_list args) -+{ + { +- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); + return vprintk_func(fmt, args); -+} + } +-EXPORT_SYMBOL_GPL(vprintk_default); +EXPORT_SYMBOL(vprintk); /** * printk - print a kernel message -@@ -2127,38 +2174,158 @@ asmlinkage __visible int printk(const char *fmt, ...) +@@ -2152,38 +2185,158 @@ asmlinkage __visible int printk(const char *fmt, ...) } EXPORT_SYMBOL(printk); @@ -17751,16 +17791,16 @@ index ecd28d4fa..e95b00f24 100644 + seq = atomic64_read(&con->printk_seq); + + prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX); -+ + +-static size_t record_print_text(const struct printk_record *r, +- bool syslog, bool time) + for (;;) { + error = wait_event_interruptible(log_wait, + prb_read_valid(prb, seq, &r) || kthread_should_stop()); + + if (kthread_should_stop()) + break; - --static size_t record_print_text(const struct printk_record *r, -- bool syslog, bool time) ++ + if (error) + continue; + @@ -17885,7 +17925,7 @@ index ecd28d4fa..e95b00f24 100644 #endif /* CONFIG_PRINTK */ -@@ -2403,34 +2570,6 @@ int is_console_locked(void) +@@ -2428,21 +2581,7 @@ int is_console_locked(void) } EXPORT_SYMBOL(is_console_locked); @@ -17896,7 +17936,7 @@ index ecd28d4fa..e95b00f24 100644 -static int have_callable_console(void) -{ - struct console *con; -- + - for_each_console(con) - if ((con->flags & CON_ENABLED) && - (con->flags & CON_ANYTIME)) @@ -17904,7 +17944,13 @@ index ecd28d4fa..e95b00f24 100644 - - return 0; -} -- + + /* + * Return true when this CPU should unlock console_sem without pushing all +@@ -2463,17 +2602,7 @@ static bool abandon_console_lock_in_panic(void) + return atomic_read(&panic_cpu) != raw_smp_processor_id(); + } + -/* - * Can we actually use the console at this time on this cpu? - * @@ -17916,18 +17962,19 @@ index ecd28d4fa..e95b00f24 100644 -{ - return cpu_online(raw_smp_processor_id()) || have_callable_console(); -} -- ++ + /** * console_unlock - unlock the console system - * -@@ -2447,142 +2586,14 @@ static inline int can_use_console(void) +@@ -2491,154 +2620,15 @@ static inline int can_use_console(void) */ void console_unlock(void) { - static char ext_text[CONSOLE_EXT_LOG_MAX]; - static char text[LOG_LINE_MAX + PREFIX_MAX]; +- static int panic_console_dropped; - unsigned long flags; -- bool do_cond_resched, retry; +- bool do_cond_resched, retry, locked = false; - struct printk_info info; - struct printk_record r; - @@ -17973,6 +18020,7 @@ index ecd28d4fa..e95b00f24 100644 - - printk_safe_enter_irqsave(flags); - raw_spin_lock(&logbuf_lock); +- locked = true; -skip: - if (!prb_read_valid(prb, console_seq, &r)) - break; @@ -17980,6 +18028,10 @@ index ecd28d4fa..e95b00f24 100644 - if (console_seq != r.info->seq) { - console_dropped += r.info->seq - console_seq; - console_seq = r.info->seq; +- if (panic_in_progress() && panic_console_dropped++ > 10) { +- suppress_panic_printk = 1; +- pr_warn_once("Too many dropped messages. Supress message on non-panic CPUs to prevent livelock.\n"); +- } - } - - if (suppress_message_printing(r.info->level)) { @@ -18016,6 +18068,7 @@ index ecd28d4fa..e95b00f24 100644 - console_msg_format & MSG_FORMAT_SYSLOG, - printk_time); - console_seq++; +- locked = false; - raw_spin_unlock(&logbuf_lock); - - /* @@ -18035,6 +18088,10 @@ index ecd28d4fa..e95b00f24 100644 - return; - } - +- /* Allow panic_cpu to take over the consoles safely */ +- if (abandon_console_lock_in_panic()) +- break; +- - printk_safe_exit_irqrestore(flags); - - if (do_cond_resched) @@ -18043,10 +18100,11 @@ index ecd28d4fa..e95b00f24 100644 - console_locked = 0; -- raw_spin_unlock(&logbuf_lock); +- if (likely(locked)) +- raw_spin_unlock(&logbuf_lock); - up_console_sem(); -- + - /* - * Someone could have filled up the buffer again, so re-check if there's - * something to flush. In case we cannot trylock the console_sem again, @@ -18058,12 +18116,12 @@ index ecd28d4fa..e95b00f24 100644 - raw_spin_unlock(&logbuf_lock); - printk_safe_exit_irqrestore(flags); - -- if (retry && console_trylock()) +- if (retry && !abandon_console_lock_in_panic() && console_trylock()) - goto again; } EXPORT_SYMBOL(console_unlock); -@@ -2632,23 +2643,20 @@ void console_unblank(void) +@@ -2688,23 +2678,20 @@ void console_unblank(void) */ void console_flush_on_panic(enum con_flush_mode mode) { @@ -18098,7 +18156,7 @@ index ecd28d4fa..e95b00f24 100644 console_unlock(); } EXPORT_SYMBOL(console_flush_on_panic); -@@ -2784,7 +2792,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified) +@@ -2840,7 +2827,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified) */ void register_console(struct console *newcon) { @@ -18106,7 +18164,7 @@ index ecd28d4fa..e95b00f24 100644 struct console *bcon = NULL; int err; -@@ -2808,6 +2815,8 @@ void register_console(struct console *newcon) +@@ -2864,6 +2850,8 @@ void register_console(struct console *newcon) } } @@ -18115,7 +18173,7 @@ index ecd28d4fa..e95b00f24 100644 if (console_drivers && console_drivers->flags & CON_BOOT) bcon = console_drivers; -@@ -2849,8 +2858,10 @@ void register_console(struct console *newcon) +@@ -2905,8 +2893,10 @@ void register_console(struct console *newcon) * the real console are the same physical device, it's annoying to * see the beginning boot messages twice */ @@ -18127,7 +18185,7 @@ index ecd28d4fa..e95b00f24 100644 /* * Put this console in the list - keep the -@@ -2872,26 +2883,12 @@ void register_console(struct console *newcon) +@@ -2928,26 +2918,12 @@ void register_console(struct console *newcon) if (newcon->flags & CON_EXTENDED) nr_ext_console_drivers++; @@ -18160,7 +18218,7 @@ index ecd28d4fa..e95b00f24 100644 console_unlock(); console_sysfs_notify(); -@@ -2965,6 +2962,9 @@ int unregister_console(struct console *console) +@@ -3021,6 +2997,9 @@ int unregister_console(struct console *console) console_unlock(); console_sysfs_notify(); @@ -18170,7 +18228,7 @@ index ecd28d4fa..e95b00f24 100644 if (console->exit) res = console->exit(console); -@@ -3047,6 +3047,15 @@ static int __init printk_late_init(void) +@@ -3103,6 +3082,15 @@ static int __init printk_late_init(void) unregister_console(con); } } @@ -18186,7 +18244,7 @@ index ecd28d4fa..e95b00f24 100644 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL, console_cpu_notify); WARN_ON(ret < 0); -@@ -3062,7 +3071,6 @@ late_initcall(printk_late_init); +@@ -3118,7 +3106,6 @@ late_initcall(printk_late_init); * Delayed printk version, for scheduler-internal messages: */ #define PRINTK_PENDING_WAKEUP 0x01 @@ -18194,7 +18252,7 @@ index ecd28d4fa..e95b00f24 100644 static DEFINE_PER_CPU(int, printk_pending); -@@ -3070,14 +3078,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) +@@ -3126,14 +3113,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) { int pending = __this_cpu_xchg(printk_pending, 0); @@ -18210,7 +18268,7 @@ index ecd28d4fa..e95b00f24 100644 } static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = -@@ -3096,25 +3098,10 @@ void wake_up_klogd(void) +@@ -3152,25 +3133,10 @@ void wake_up_klogd(void) preempt_enable(); } @@ -18239,7 +18297,7 @@ index ecd28d4fa..e95b00f24 100644 } int printk_deferred(const char *fmt, ...) -@@ -3253,8 +3240,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); +@@ -3309,8 +3275,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); */ void kmsg_dump(enum kmsg_dump_reason reason) { @@ -18267,7 +18325,7 @@ index ecd28d4fa..e95b00f24 100644 rcu_read_lock(); list_for_each_entry_rcu(dumper, &dump_list, list) { -@@ -3272,25 +3277,18 @@ void kmsg_dump(enum kmsg_dump_reason reason) +@@ -3328,25 +3312,18 @@ void kmsg_dump(enum kmsg_dump_reason reason) continue; /* initialize iterator with data about the stored records */ @@ -18298,7 +18356,7 @@ index ecd28d4fa..e95b00f24 100644 * @syslog: include the "<4>" prefixes * @line: buffer to copy the line to * @size: maximum size of the buffer -@@ -3304,11 +3302,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) +@@ -3360,11 +3337,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) * * A return value of FALSE indicates that there are no more records to * read. @@ -18312,7 +18370,7 @@ index ecd28d4fa..e95b00f24 100644 { struct printk_info info; unsigned int line_count; -@@ -3318,16 +3314,16 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, +@@ -3374,16 +3349,16 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, prb_rec_init_rd(&r, &info, line, size); @@ -18332,7 +18390,7 @@ index ecd28d4fa..e95b00f24 100644 &info, &line_count)) { goto out; } -@@ -3336,48 +3332,18 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, +@@ -3392,48 +3367,18 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, } @@ -18383,7 +18441,7 @@ index ecd28d4fa..e95b00f24 100644 * @syslog: include the "<4>" prefixes * @buf: buffer to copy the line to * @size: maximum size of the buffer -@@ -3394,116 +3360,256 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line); +@@ -3450,116 +3395,256 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line); * A return value of FALSE indicates that there are no more records to * read. */ @@ -19174,10 +19232,10 @@ index b1d7aef10..5e11e30f4 100644 This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index e1bbde264..c77b9873c 100644 +index 81e3550ea..6b47b4627 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -100,8 +100,10 @@ static struct rcu_state rcu_state = { +@@ -101,8 +101,10 @@ static struct rcu_state rcu_state = { static bool dump_tree; module_param(dump_tree, bool, 0444); /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ @@ -19190,7 +19248,7 @@ index e1bbde264..c77b9873c 100644 static bool rcu_fanout_exact; module_param(rcu_fanout_exact, bool, 0444); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c -index 849f0aa99..dd94a602a 100644 +index 0e3821783..2beba0dfd 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -56,8 +56,10 @@ @@ -19206,10 +19264,10 @@ index 849f0aa99..dd94a602a 100644 #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 468427a25..4f7610fd6 100644 +index 557b01680..7e83eda5e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -65,7 +65,11 @@ const_debug unsigned int sysctl_sched_features = +@@ -67,7 +67,11 @@ const_debug unsigned int sysctl_sched_features = * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ @@ -19221,7 +19279,7 @@ index 468427a25..4f7610fd6 100644 /* * period over which we measure -rt task CPU usage in us. -@@ -856,9 +860,15 @@ static bool set_nr_if_polling(struct task_struct *p) +@@ -860,9 +864,15 @@ static bool set_nr_if_polling(struct task_struct *p) #endif #endif @@ -19239,7 +19297,7 @@ index 468427a25..4f7610fd6 100644 /* * Atomically grab the task, if ->wake_q is !nil already it means -@@ -894,7 +904,13 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) +@@ -898,7 +908,13 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) */ void wake_q_add(struct wake_q_head *head, struct task_struct *task) { @@ -19254,7 +19312,7 @@ index 468427a25..4f7610fd6 100644 get_task_struct(task); } -@@ -917,28 +933,39 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) +@@ -921,28 +937,39 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) */ void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) { @@ -19299,7 +19357,7 @@ index 468427a25..4f7610fd6 100644 put_task_struct(task); } } -@@ -974,6 +1001,48 @@ void resched_curr(struct rq *rq) +@@ -978,6 +1005,48 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } @@ -19348,7 +19406,7 @@ index 468427a25..4f7610fd6 100644 void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2063,6 +2132,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) +@@ -2070,6 +2139,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP @@ -19431,7 +19489,7 @@ index 468427a25..4f7610fd6 100644 /* * Per-CPU kthreads are allowed to run on !active && online CPUs, see * __set_cpus_allowed_ptr() and select_fallback_rq(). -@@ -2072,7 +2217,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) +@@ -2079,7 +2224,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) if (!cpumask_test_cpu(cpu, p->cpus_ptr)) return false; @@ -19440,7 +19498,7 @@ index 468427a25..4f7610fd6 100644 return cpu_online(cpu); return cpu_active(cpu); -@@ -2117,8 +2262,21 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, +@@ -2124,8 +2269,21 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, } struct migration_arg { @@ -19464,7 +19522,7 @@ index 468427a25..4f7610fd6 100644 }; /* -@@ -2151,15 +2309,17 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, +@@ -2158,15 +2316,17 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, static int migration_cpu_stop(void *data) { struct migration_arg *arg = data; @@ -19483,7 +19541,7 @@ index 468427a25..4f7610fd6 100644 /* * We need to explicitly wake pending tasks before running * __migrate_task() such that we will not miss enforcing cpus_ptr -@@ -2169,21 +2329,121 @@ static int migration_cpu_stop(void *data) +@@ -2176,21 +2336,121 @@ static int migration_cpu_stop(void *data) raw_spin_lock(&p->pi_lock); rq_lock(rq, &rf); @@ -19608,7 +19666,7 @@ index 468427a25..4f7610fd6 100644 return 0; } -@@ -2191,18 +2451,39 @@ static int migration_cpu_stop(void *data) +@@ -2198,18 +2458,39 @@ static int migration_cpu_stop(void *data) * sched_class::set_cpus_allowed must do the below, but is not required to * actually call this function. */ @@ -19651,7 +19709,7 @@ index 468427a25..4f7610fd6 100644 queued = task_on_rq_queued(p); running = task_current(rq, p); -@@ -2218,7 +2499,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2225,7 +2506,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) if (running) put_prev_task(rq, p); @@ -19660,7 +19718,7 @@ index 468427a25..4f7610fd6 100644 if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); -@@ -2226,6 +2507,222 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2233,6 +2514,222 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) set_next_task(rq, p); } @@ -19883,7 +19941,7 @@ index 468427a25..4f7610fd6 100644 /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on -@@ -2236,7 +2733,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2243,7 +2740,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) * call is not atomic; no spinlocks may be held. */ static int __set_cpus_allowed_ptr(struct task_struct *p, @@ -19893,7 +19951,7 @@ index 468427a25..4f7610fd6 100644 { const struct cpumask *cpu_valid_mask = cpu_active_mask; unsigned int dest_cpu; -@@ -2247,9 +2745,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2254,9 +2752,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, rq = task_rq_lock(p, &rf); update_rq_clock(rq); @@ -19910,7 +19968,7 @@ index 468427a25..4f7610fd6 100644 */ cpu_valid_mask = cpu_online_mask; } -@@ -2258,13 +2761,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2265,13 +2768,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, * Must re-check here, to close a race against __kthread_bind(), * sched_setaffinity() is not guaranteed to observe the flag. */ @@ -19936,7 +19994,7 @@ index 468427a25..4f7610fd6 100644 /* * Picking a ~random cpu helps in cases where we are changing affinity -@@ -2277,7 +2789,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2284,7 +2796,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } @@ -19945,7 +20003,7 @@ index 468427a25..4f7610fd6 100644 if (p->flags & PF_KTHREAD) { /* -@@ -2289,23 +2801,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2296,23 +2808,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, p->nr_cpus_allowed != 1); } @@ -19970,7 +20028,7 @@ index 468427a25..4f7610fd6 100644 out: task_rq_unlock(rq, p, &rf); -@@ -2314,7 +2811,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2321,7 +2818,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { @@ -19979,7 +20037,7 @@ index 468427a25..4f7610fd6 100644 } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); -@@ -2355,6 +2852,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) +@@ -2362,6 +2859,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) * Clearly, migrating tasks to offline CPUs is a fairly daft thing. */ WARN_ON_ONCE(!cpu_online(new_cpu)); @@ -19988,7 +20046,7 @@ index 468427a25..4f7610fd6 100644 #endif trace_sched_migrate_task(p, new_cpu); -@@ -2487,6 +2986,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, +@@ -2494,6 +2993,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, } #endif /* CONFIG_NUMA_BALANCING */ @@ -20007,7 +20065,7 @@ index 468427a25..4f7610fd6 100644 /* * wait_task_inactive - wait for a thread to unschedule. * -@@ -2531,7 +3042,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2538,7 +3049,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -20016,7 +20074,7 @@ index 468427a25..4f7610fd6 100644 return 0; cpu_relax(); } -@@ -2546,7 +3057,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2553,7 +3064,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; @@ -20026,7 +20084,7 @@ index 468427a25..4f7610fd6 100644 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); -@@ -2580,7 +3092,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2587,7 +3099,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) ktime_t to = NSEC_PER_SEC / HZ; set_current_state(TASK_UNINTERRUPTIBLE); @@ -20035,7 +20093,7 @@ index 468427a25..4f7610fd6 100644 continue; } -@@ -2685,6 +3197,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) +@@ -2692,6 +3204,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) } fallthrough; case possible: @@ -20048,7 +20106,7 @@ index 468427a25..4f7610fd6 100644 do_set_cpus_allowed(p, cpu_possible_mask); state = fail; break; -@@ -2719,7 +3237,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -2726,7 +3244,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) { lockdep_assert_held(&p->pi_lock); @@ -20057,7 +20115,7 @@ index 468427a25..4f7610fd6 100644 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else cpu = cpumask_any(p->cpus_ptr); -@@ -2742,6 +3260,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -2749,6 +3267,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) void sched_set_stop_task(int cpu, struct task_struct *stop) { @@ -20065,7 +20123,7 @@ index 468427a25..4f7610fd6 100644 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; struct task_struct *old_stop = cpu_rq(cpu)->stop; -@@ -2757,6 +3276,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) +@@ -2764,6 +3283,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); stop->sched_class = &stop_sched_class; @@ -20086,7 +20144,7 @@ index 468427a25..4f7610fd6 100644 } cpu_rq(cpu)->stop = stop; -@@ -2770,15 +3303,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) +@@ -2777,15 +3310,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) } } @@ -20113,7 +20171,7 @@ index 468427a25..4f7610fd6 100644 static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags) -@@ -3220,7 +3761,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) +@@ -3227,7 +3768,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) int cpu, success = 0; preempt_disable(); @@ -20122,7 +20180,7 @@ index 468427a25..4f7610fd6 100644 /* * We're waking current, this means 'p->on_rq' and 'task_cpu(p) * == smp_processor_id()'. Together this means we can special -@@ -3250,8 +3791,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) +@@ -3257,8 +3798,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); @@ -20150,7 +20208,7 @@ index 468427a25..4f7610fd6 100644 trace_sched_waking(p); -@@ -3440,6 +3999,18 @@ int wake_up_process(struct task_struct *p) +@@ -3447,6 +4006,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); @@ -20169,7 +20227,7 @@ index 468427a25..4f7610fd6 100644 int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); -@@ -3493,6 +4064,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) +@@ -3500,6 +4071,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) init_numa_balancing(clone_flags, p); #ifdef CONFIG_SMP p->wake_entry.u_flags = CSD_TYPE_TTWU; @@ -20177,7 +20235,7 @@ index 468427a25..4f7610fd6 100644 #endif #ifdef CONFIG_BPF_SCHED p->tag = 0; -@@ -3712,6 +4284,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) +@@ -3719,6 +4291,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -20187,7 +20245,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3882,60 +4457,145 @@ __fire_sched_out_preempt_notifiers(struct task_struct *curr, +@@ -3889,60 +4464,145 @@ __fire_sched_out_preempt_notifiers(struct task_struct *curr, notifier->ops->sched_out(notifier, next); } @@ -20371,7 +20429,7 @@ index 468427a25..4f7610fd6 100644 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) { -@@ -3976,6 +4636,22 @@ static inline void finish_lock_switch(struct rq *rq) +@@ -3983,6 +4643,22 @@ static inline void finish_lock_switch(struct rq *rq) # define finish_arch_post_lock_switch() do { } while (0) #endif @@ -20394,7 +20452,7 @@ index 468427a25..4f7610fd6 100644 /** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch -@@ -3998,6 +4674,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, +@@ -4005,6 +4681,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, perf_event_task_sched_out(prev, next); rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); @@ -20402,7 +20460,7 @@ index 468427a25..4f7610fd6 100644 prepare_task(next); prepare_arch_switch(next); } -@@ -4065,6 +4742,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) +@@ -4073,6 +4750,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) finish_lock_switch(rq); finish_arch_post_lock_switch(); kcov_finish_switch(current); @@ -20410,7 +20468,7 @@ index 468427a25..4f7610fd6 100644 fire_sched_in_preempt_notifiers(current); /* -@@ -4081,63 +4759,19 @@ static struct rq *finish_task_switch(struct task_struct *prev) +@@ -4089,63 +4767,19 @@ static struct rq *finish_task_switch(struct task_struct *prev) */ if (mm) { membarrier_mm_sync_core_before_usermode(mm); @@ -20475,7 +20533,7 @@ index 468427a25..4f7610fd6 100644 /** * schedule_tail - first thing a freshly forked thread must call. -@@ -4158,7 +4792,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) +@@ -4166,7 +4800,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) */ rq = finish_task_switch(prev); @@ -20483,7 +20541,7 @@ index 468427a25..4f7610fd6 100644 preempt_enable(); if (current->set_child_tid) -@@ -5314,7 +5947,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +@@ -5327,7 +5960,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * * WARNING: must be called with preemption disabled! */ @@ -20492,7 +20550,7 @@ index 468427a25..4f7610fd6 100644 { struct task_struct *prev, *next; unsigned long *switch_count; -@@ -5367,7 +6000,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5380,7 +6013,7 @@ static void __sched notrace __schedule(bool preempt) * - ptrace_{,un}freeze_traced() can change ->state underneath us. */ prev_state = prev->state; @@ -20501,7 +20559,7 @@ index 468427a25..4f7610fd6 100644 if (signal_pending_state(prev_state, prev)) { prev->state = TASK_RUNNING; } else { -@@ -5402,6 +6035,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5415,6 +6048,7 @@ static void __sched notrace __schedule(bool preempt) next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -20509,7 +20567,7 @@ index 468427a25..4f7610fd6 100644 clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -5427,6 +6061,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5440,6 +6074,7 @@ static void __sched notrace __schedule(bool preempt) */ ++*switch_count; @@ -20517,7 +20575,7 @@ index 468427a25..4f7610fd6 100644 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); trace_sched_switch(preempt, prev, next); -@@ -5435,10 +6070,11 @@ static void __sched notrace __schedule(bool preempt) +@@ -5448,10 +6083,11 @@ static void __sched notrace __schedule(bool preempt) rq = context_switch(rq, prev, next, &rf); } else { rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); @@ -20532,7 +20590,7 @@ index 468427a25..4f7610fd6 100644 } void __noreturn do_task_dead(void) -@@ -5449,7 +6085,7 @@ void __noreturn do_task_dead(void) +@@ -5462,7 +6098,7 @@ void __noreturn do_task_dead(void) /* Tell freezer to ignore us: */ current->flags |= PF_NOFREEZE; @@ -20541,7 +20599,7 @@ index 468427a25..4f7610fd6 100644 BUG(); /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ -@@ -5482,9 +6118,6 @@ static inline void sched_submit_work(struct task_struct *tsk) +@@ -5495,9 +6131,6 @@ static inline void sched_submit_work(struct task_struct *tsk) preempt_enable_no_resched(); } @@ -20551,7 +20609,7 @@ index 468427a25..4f7610fd6 100644 /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -5510,7 +6143,7 @@ asmlinkage __visible void __sched schedule(void) +@@ -5523,7 +6156,7 @@ asmlinkage __visible void __sched schedule(void) sched_submit_work(tsk); do { preempt_disable(); @@ -20560,7 +20618,7 @@ index 468427a25..4f7610fd6 100644 sched_preempt_enable_no_resched(); } while (need_resched()); sched_update_worker(tsk); -@@ -5538,7 +6171,7 @@ void __sched schedule_idle(void) +@@ -5551,7 +6184,7 @@ void __sched schedule_idle(void) */ WARN_ON_ONCE(current->state); do { @@ -20569,7 +20627,7 @@ index 468427a25..4f7610fd6 100644 } while (need_resched()); } -@@ -5591,7 +6224,7 @@ static void __sched notrace preempt_schedule_common(void) +@@ -5604,7 +6237,7 @@ static void __sched notrace preempt_schedule_common(void) */ preempt_disable_notrace(); preempt_latency_start(1); @@ -20578,7 +20636,7 @@ index 468427a25..4f7610fd6 100644 preempt_latency_stop(1); preempt_enable_no_resched_notrace(); -@@ -5602,6 +6235,30 @@ static void __sched notrace preempt_schedule_common(void) +@@ -5615,6 +6248,30 @@ static void __sched notrace preempt_schedule_common(void) } while (need_resched()); } @@ -20609,7 +20667,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_PREEMPTION /* * This is the entry point to schedule() from in-kernel preemption -@@ -5615,12 +6272,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) +@@ -5628,12 +6285,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) */ if (likely(!preemptible())) return; @@ -20637,7 +20695,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_PREEMPT_DYNAMIC DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); EXPORT_STATIC_CALL(preempt_schedule); -@@ -5648,6 +6319,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) +@@ -5661,6 +6332,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) if (likely(!preemptible())) return; @@ -20647,7 +20705,7 @@ index 468427a25..4f7610fd6 100644 do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5670,7 +6344,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) +@@ -5683,7 +6357,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) * an infinite recursion. */ prev_ctx = exception_enter(); @@ -20656,7 +20714,7 @@ index 468427a25..4f7610fd6 100644 exception_exit(prev_ctx); preempt_latency_stop(1); -@@ -5888,7 +6562,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) +@@ -5901,7 +6575,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) do { preempt_disable(); local_irq_enable(); @@ -20665,7 +20723,7 @@ index 468427a25..4f7610fd6 100644 local_irq_disable(); sched_preempt_enable_no_resched(); } while (need_resched()); -@@ -6054,9 +6728,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) +@@ -6067,9 +6741,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) out_unlock: /* Avoid rq from going away on us: */ preempt_disable(); @@ -20679,7 +20737,7 @@ index 468427a25..4f7610fd6 100644 preempt_enable(); } #else -@@ -6299,6 +6975,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6312,6 +6988,7 @@ static int __sched_setscheduler(struct task_struct *p, int oldpolicy = -1, policy = attr->sched_policy; int retval, oldprio, newprio, queued, running; const struct sched_class *prev_class; @@ -20687,7 +20745,7 @@ index 468427a25..4f7610fd6 100644 struct rq_flags rf; int reset_on_fork; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; -@@ -6553,6 +7230,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6573,6 +7250,7 @@ static int __sched_setscheduler(struct task_struct *p, /* Avoid rq from going away on us: */ preempt_disable(); @@ -20695,7 +20753,7 @@ index 468427a25..4f7610fd6 100644 task_rq_unlock(rq, p, &rf); if (pi) { -@@ -6561,7 +7239,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6582,7 +7260,7 @@ static int __sched_setscheduler(struct task_struct *p, } /* Run balance callbacks after we've adjusted the PI chain: */ @@ -20704,7 +20762,7 @@ index 468427a25..4f7610fd6 100644 preempt_enable(); return 0; -@@ -7056,7 +7734,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +@@ -7077,7 +7755,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) } #endif again: @@ -20713,7 +20771,7 @@ index 468427a25..4f7610fd6 100644 if (!retval) { cpuset_cpus_allowed(p, cpus_allowed); -@@ -7642,7 +8320,7 @@ void __init init_idle(struct task_struct *idle, int cpu) +@@ -7703,7 +8381,7 @@ void __init init_idle(struct task_struct *idle, int cpu) * * And since this is boot we can forgo the serialization. */ @@ -20722,7 +20780,7 @@ index 468427a25..4f7610fd6 100644 #endif /* * We're having a chicken and egg problem, even though we are -@@ -7669,7 +8347,9 @@ void __init init_idle(struct task_struct *idle, int cpu) +@@ -7730,7 +8408,9 @@ void __init init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -20733,7 +20791,7 @@ index 468427a25..4f7610fd6 100644 /* * The idle tasks have their own, simple scheduling class: */ -@@ -7779,6 +8459,7 @@ void sched_setnuma(struct task_struct *p, int nid) +@@ -7827,6 +8507,7 @@ void sched_setnuma(struct task_struct *p, int nid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -20741,7 +20799,7 @@ index 468427a25..4f7610fd6 100644 /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. -@@ -7798,119 +8479,126 @@ void idle_task_exit(void) +@@ -7846,119 +8527,126 @@ void idle_task_exit(void) /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ } @@ -20958,7 +21016,7 @@ index 468427a25..4f7610fd6 100644 #endif /* CONFIG_HOTPLUG_CPU */ void set_rq_online(struct rq *rq) -@@ -7998,6 +8686,8 @@ int sched_cpu_activate(unsigned int cpu) +@@ -8046,6 +8734,8 @@ int sched_cpu_activate(unsigned int cpu) struct rq *rq = cpu_rq(cpu); struct rq_flags rf; @@ -20967,7 +21025,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_SCHED_SMT /* * When going up, increment the number of cores with SMT present. -@@ -8033,6 +8723,8 @@ int sched_cpu_activate(unsigned int cpu) +@@ -8084,6 +8774,8 @@ int sched_cpu_activate(unsigned int cpu) int sched_cpu_deactivate(unsigned int cpu) { @@ -20976,7 +21034,7 @@ index 468427a25..4f7610fd6 100644 int ret; set_cpu_active(cpu, false); -@@ -8045,6 +8737,16 @@ int sched_cpu_deactivate(unsigned int cpu) +@@ -8096,6 +8788,16 @@ int sched_cpu_deactivate(unsigned int cpu) */ synchronize_rcu(); @@ -20993,7 +21051,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_SCHED_SMT /* * When going down, decrement the number of cores with SMT present. -@@ -8088,6 +8790,41 @@ int sched_cpu_starting(unsigned int cpu) +@@ -8142,6 +8844,41 @@ int sched_cpu_starting(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU @@ -21035,7 +21093,7 @@ index 468427a25..4f7610fd6 100644 int sched_cpu_dying(unsigned int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -8097,12 +8834,7 @@ int sched_cpu_dying(unsigned int cpu) +@@ -8151,12 +8888,7 @@ int sched_cpu_dying(unsigned int cpu) sched_tick_stop(cpu); rq_lock_irqsave(rq, &rf); @@ -21049,7 +21107,7 @@ index 468427a25..4f7610fd6 100644 rq_unlock_irqrestore(rq, &rf); calc_load_migrate(rq); -@@ -8320,6 +9052,9 @@ void __init sched_init(void) +@@ -8387,6 +9119,9 @@ void __init sched_init(void) INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); #endif @@ -21059,7 +21117,7 @@ index 468427a25..4f7610fd6 100644 #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); -@@ -8370,7 +9105,7 @@ void __init sched_init(void) +@@ -8437,7 +9172,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { @@ -21068,7 +21126,7 @@ index 468427a25..4f7610fd6 100644 return (nested == preempt_offset); } -@@ -8467,6 +9202,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset) +@@ -8534,6 +9269,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset) add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } EXPORT_SYMBOL_GPL(__cant_sleep); @@ -21228,10 +21286,10 @@ index ca0eef7d3..02a5aa60f 100644 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, u64 *ut, u64 *st) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index c4c0d760d..14252d5be 100644 +index 71b55d9de..e89bba62a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -551,7 +551,7 @@ static int push_dl_task(struct rq *rq); +@@ -552,7 +552,7 @@ static int push_dl_task(struct rq *rq); static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) { @@ -21359,7 +21417,7 @@ index c4c0d760d..14252d5be 100644 } /* Assumes rq->lock is held */ -@@ -2532,6 +2548,7 @@ const struct sched_class dl_sched_class +@@ -2544,6 +2560,7 @@ const struct sched_class dl_sched_class .rq_online = rq_online_dl, .rq_offline = rq_offline_dl, .task_woken = task_woken_dl, @@ -21368,10 +21426,10 @@ index c4c0d760d..14252d5be 100644 .task_tick = task_tick_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index b8bf7acb9..1dcf17497 100644 +index 273f6844b..bf5233b7d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4673,7 +4673,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4872,7 +4872,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) #endif if (delta_exec > ideal_runtime) { @@ -21380,7 +21438,7 @@ index b8bf7acb9..1dcf17497 100644 /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4697,7 +4697,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4896,7 +4896,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) @@ -21389,7 +21447,7 @@ index b8bf7acb9..1dcf17497 100644 } static void -@@ -4840,7 +4840,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +@@ -5039,7 +5039,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { @@ -21398,7 +21456,7 @@ index b8bf7acb9..1dcf17497 100644 return; } /* -@@ -4989,7 +4989,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) +@@ -5188,7 +5188,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -21407,7 +21465,7 @@ index b8bf7acb9..1dcf17497 100644 } static __always_inline -@@ -5783,7 +5783,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) +@@ -6479,7 +6479,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) @@ -21416,7 +21474,7 @@ index b8bf7acb9..1dcf17497 100644 return; } hrtick_start(rq, delta); -@@ -7705,7 +7705,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ +@@ -8469,7 +8469,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: @@ -21425,7 +21483,7 @@ index b8bf7acb9..1dcf17497 100644 /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -12578,7 +12578,7 @@ static void task_fork_fair(struct task_struct *p) +@@ -13378,7 +13378,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -21434,7 +21492,7 @@ index b8bf7acb9..1dcf17497 100644 } se->vruntime -= cfs_rq->min_vruntime; -@@ -12605,7 +12605,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) +@@ -13405,7 +13405,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) @@ -21468,7 +21526,7 @@ index 76fade025..0a20427ef 100644 /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index ca868c04f..9798149b5 100644 +index 52062b910..7488bcf38 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -270,7 +270,7 @@ static void pull_rt_task(struct rq *this_rq); @@ -21480,7 +21538,7 @@ index ca868c04f..9798149b5 100644 } static inline int rt_overloaded(struct rq *rq) -@@ -1679,7 +1679,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) +@@ -1681,7 +1681,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -21489,7 +21547,7 @@ index ca868c04f..9798149b5 100644 return 1; return 0; -@@ -1773,8 +1773,8 @@ static int find_lowest_rq(struct task_struct *task) +@@ -1775,8 +1775,8 @@ static int find_lowest_rq(struct task_struct *task) return this_cpu; } @@ -21500,7 +21558,7 @@ index ca868c04f..9798149b5 100644 if (best_cpu < nr_cpu_ids) { rcu_read_unlock(); return best_cpu; -@@ -1791,7 +1791,7 @@ static int find_lowest_rq(struct task_struct *task) +@@ -1793,7 +1793,7 @@ static int find_lowest_rq(struct task_struct *task) if (this_cpu != -1) return this_cpu; @@ -21509,7 +21567,7 @@ index ca868c04f..9798149b5 100644 if (cpu < nr_cpu_ids) return cpu; -@@ -1852,7 +1852,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) +@@ -1854,7 +1854,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) */ struct task_struct *next_task = pick_next_pushable_task(rq); if (unlikely(next_task != task || @@ -21518,7 +21576,7 @@ index ca868c04f..9798149b5 100644 double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; break; -@@ -1876,7 +1876,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) +@@ -1878,7 +1878,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * running task can migrate over to a CPU that is running a task * of lesser priority. */ @@ -21527,7 +21585,7 @@ index ca868c04f..9798149b5 100644 { struct task_struct *next_task; struct rq *lowest_rq; -@@ -1890,6 +1890,39 @@ static int push_rt_task(struct rq *rq) +@@ -1892,6 +1892,39 @@ static int push_rt_task(struct rq *rq) return 0; retry: @@ -21567,7 +21625,7 @@ index ca868c04f..9798149b5 100644 if (WARN_ON(next_task == rq->curr)) return 0; -@@ -1947,12 +1980,10 @@ static int push_rt_task(struct rq *rq) +@@ -1949,12 +1982,10 @@ static int push_rt_task(struct rq *rq) deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); @@ -21581,7 +21639,7 @@ index ca868c04f..9798149b5 100644 out: put_task_struct(next_task); -@@ -1962,7 +1993,7 @@ static int push_rt_task(struct rq *rq) +@@ -1964,7 +1995,7 @@ static int push_rt_task(struct rq *rq) static void push_rt_tasks(struct rq *rq) { /* push_rt_task will return true if it moved an RT */ @@ -21590,7 +21648,7 @@ index ca868c04f..9798149b5 100644 ; } -@@ -2115,7 +2146,10 @@ void rto_push_irq_work_func(struct irq_work *work) +@@ -2117,7 +2148,10 @@ void rto_push_irq_work_func(struct irq_work *work) */ if (has_pushable_tasks(rq)) { raw_spin_rq_lock(rq); @@ -21602,7 +21660,7 @@ index ca868c04f..9798149b5 100644 raw_spin_rq_unlock(rq); } -@@ -2140,7 +2174,7 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2142,7 +2176,7 @@ static void pull_rt_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, cpu; bool resched = false; @@ -21611,7 +21669,7 @@ index ca868c04f..9798149b5 100644 struct rq *src_rq; int rt_overload_count = rt_overloaded(this_rq); -@@ -2187,6 +2221,7 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2189,6 +2223,7 @@ static void pull_rt_task(struct rq *this_rq) * double_lock_balance, and another CPU could * alter this_rq */ @@ -21619,7 +21677,7 @@ index ca868c04f..9798149b5 100644 double_lock_balance(this_rq, src_rq); /* -@@ -2214,11 +2249,15 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2216,11 +2251,15 @@ static void pull_rt_task(struct rq *this_rq) if (p->prio < src_rq->curr->prio) goto skip; @@ -21640,7 +21698,7 @@ index ca868c04f..9798149b5 100644 /* * We continue with the search, just in * case there's an even higher prio task -@@ -2228,6 +2267,13 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2230,6 +2269,13 @@ static void pull_rt_task(struct rq *this_rq) } skip: double_unlock_balance(this_rq, src_rq); @@ -21654,7 +21712,7 @@ index ca868c04f..9798149b5 100644 } if (resched) -@@ -2477,6 +2523,7 @@ const struct sched_class rt_sched_class +@@ -2479,6 +2525,7 @@ const struct sched_class rt_sched_class .rq_offline = rq_offline_rt, .task_woken = task_woken_rt, .switched_from = switched_from_rt, @@ -21663,10 +21721,10 @@ index ca868c04f..9798149b5 100644 .task_tick = task_tick_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 0f871e9b1..9195d7dff 100644 +index 14d48f638..906b3aca9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1043,6 +1043,7 @@ struct rq { +@@ -1098,6 +1098,7 @@ struct rq { unsigned long cpu_capacity_orig; struct callback_head *balance_callback; @@ -21674,7 +21732,7 @@ index 0f871e9b1..9195d7dff 100644 unsigned char nohz_idle_balance; unsigned char idle_balance; -@@ -1073,6 +1074,10 @@ struct rq { +@@ -1128,6 +1129,10 @@ struct rq { /* This is used to determine avg_idle's max value */ u64 max_idle_balance_cost; @@ -21685,7 +21743,7 @@ index 0f871e9b1..9195d7dff 100644 #endif /* CONFIG_SMP */ #ifdef CONFIG_IRQ_TIME_ACCOUNTING -@@ -1146,6 +1151,11 @@ struct rq { +@@ -1201,6 +1206,11 @@ struct rq { unsigned char core_forceidle; unsigned int core_forceidle_seq; #endif @@ -21697,7 +21755,7 @@ index 0f871e9b1..9195d7dff 100644 #if defined(CONFIG_QOS_SCHED_PRIO_LB) && !defined(__GENKSYMS__) struct list_head cfs_offline_tasks; -@@ -1185,6 +1195,16 @@ static inline int cpu_of(struct rq *rq) +@@ -1240,6 +1250,16 @@ static inline int cpu_of(struct rq *rq) return 0; #endif } @@ -21714,7 +21772,7 @@ index 0f871e9b1..9195d7dff 100644 #ifdef CONFIG_QOS_SCHED #ifdef CONFIG_QOS_SCHED_MULTILEVEL -@@ -1602,6 +1622,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) +@@ -1657,6 +1677,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); rf->clock_update_flags = 0; #endif @@ -21724,7 +21782,7 @@ index 0f871e9b1..9195d7dff 100644 } static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) -@@ -1773,6 +1796,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) +@@ -1828,6 +1851,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) #ifdef CONFIG_SMP @@ -21734,7 +21792,7 @@ index 0f871e9b1..9195d7dff 100644 static inline void queue_balance_callback(struct rq *rq, struct callback_head *head, -@@ -1780,12 +1806,13 @@ queue_balance_callback(struct rq *rq, +@@ -1835,12 +1861,13 @@ queue_balance_callback(struct rq *rq, { lockdep_assert_rq_held(rq); @@ -21749,7 +21807,7 @@ index 0f871e9b1..9195d7dff 100644 } #define rcu_dereference_check_sched_domain(p) \ -@@ -2112,7 +2139,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) +@@ -2166,7 +2193,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ @@ -21758,7 +21816,7 @@ index 0f871e9b1..9195d7dff 100644 /* * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that -@@ -2193,10 +2220,13 @@ struct sched_class { +@@ -2247,10 +2274,13 @@ struct sched_class { void (*task_woken)(struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, @@ -21773,7 +21831,7 @@ index 0f871e9b1..9195d7dff 100644 #endif void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); -@@ -2286,13 +2316,38 @@ static inline bool sched_fair_runnable(struct rq *rq) +@@ -2340,13 +2370,38 @@ static inline bool sched_fair_runnable(struct rq *rq) extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); extern struct task_struct *pick_next_task_idle(struct rq *rq); @@ -21813,7 +21871,7 @@ index 0f871e9b1..9195d7dff 100644 #endif -@@ -2336,6 +2391,15 @@ extern void reweight_task(struct task_struct *p, int prio); +@@ -2390,6 +2445,15 @@ extern void reweight_task(struct task_struct *p, int prio); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -22036,10 +22094,10 @@ index eed7a3a38..9769b462e 100644 cgroup_leave_frozen(true); } else { diff --git a/kernel/smp.c b/kernel/smp.c -index 114776d0d..6d35929a1 100644 +index d9ba6935f..cf034c161 100644 --- a/kernel/smp.c +++ b/kernel/smp.c -@@ -480,8 +480,18 @@ void flush_smp_call_function_from_idle(void) +@@ -488,8 +488,18 @@ void flush_smp_call_function_from_idle(void) local_irq_save(flags); flush_smp_call_function_queue(true); @@ -22712,10 +22770,10 @@ index d0bf6da49..7a74b501a 100644 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); goto repeat; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 544ce87ba..3db616aec 100644 +index ede09dda3..9dcc62155 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -2052,6 +2052,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, +@@ -2054,6 +2054,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, } #endif @@ -22753,10 +22811,10 @@ index 544ce87ba..3db616aec 100644 * Functions related to boot-time initialization: */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index aed5d6b6c..c26a7168f 100644 +index 801a7370b..d92080fa9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -989,7 +989,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) +@@ -1031,7 +1031,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) if (unlikely(local_softirq_pending())) { static int ratelimit; @@ -22792,10 +22850,10 @@ index c1b52dab3..101a73eea 100644 do { ret = __try_to_del_timer_sync(timer, shutdown); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 6eead8a61..0cd86be3b 100644 +index 7d25b898f..773e151cd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2607,60 +2607,43 @@ enum print_line_t trace_handle_return(struct trace_seq *s) +@@ -2589,60 +2589,43 @@ enum print_line_t trace_handle_return(struct trace_seq *s) } EXPORT_SYMBOL_GPL(trace_handle_return); @@ -22875,7 +22933,7 @@ index 6eead8a61..0cd86be3b 100644 } struct ring_buffer_event * -@@ -3863,14 +3846,17 @@ unsigned long trace_total_entries(struct trace_array *tr) +@@ -3844,14 +3827,17 @@ unsigned long trace_total_entries(struct trace_array *tr) static void print_lat_help_header(struct seq_file *m) { @@ -22901,7 +22959,7 @@ index 6eead8a61..0cd86be3b 100644 } static void print_event_info(struct array_buffer *buf, struct seq_file *m) -@@ -3904,13 +3890,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file +@@ -3885,13 +3871,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file print_event_info(buf, m); @@ -22925,7 +22983,7 @@ index 6eead8a61..0cd86be3b 100644 } void -@@ -9484,7 +9473,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) +@@ -9549,7 +9538,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) tracing_off(); local_irq_save(flags); @@ -22933,7 +22991,7 @@ index 6eead8a61..0cd86be3b 100644 /* Simulate the iterator */ trace_init_global_iter(&iter); -@@ -9564,7 +9552,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) +@@ -9629,7 +9617,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); @@ -22942,7 +23000,7 @@ index 6eead8a61..0cd86be3b 100644 } EXPORT_SYMBOL_GPL(ftrace_dump); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h -index 045cd3b14..b2a72a370 100644 +index ae3411a6c..987fe88b5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -141,25 +141,6 @@ struct kretprobe_trace_entry_head { @@ -22972,7 +23030,7 @@ index 045cd3b14..b2a72a370 100644 struct trace_array; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index d2ca0ca09..30c82c785 100644 +index c9ee8b730..8f98de8da 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -184,6 +184,8 @@ static int trace_define_common_fields(void) @@ -22985,7 +23043,7 @@ index d2ca0ca09..30c82c785 100644 return ret; } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c -index 4778cecee..0c4cc486c 100644 +index 74adb873e..995a4caf4 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -441,6 +441,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) @@ -23035,10 +23093,10 @@ index 4778cecee..0c4cc486c 100644 } diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 3c40cf18d..d0870fc0d 100644 +index 687c4e61d..b3eda5091 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c -@@ -4847,9 +4847,7 @@ void show_workqueue_state(void) +@@ -4859,9 +4859,7 @@ void show_workqueue_state(void) * drivers that queue work while holding locks * also taken in their write paths. */ @@ -23048,15 +23106,15 @@ index 3c40cf18d..d0870fc0d 100644 } raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); /* -@@ -4873,7 +4871,6 @@ void show_workqueue_state(void) +@@ -4891,7 +4889,6 @@ void show_workqueue_state(void) * queue work while holding locks also taken in their write * paths. */ - printk_safe_enter(); pr_info("pool %d:", pool->id); pr_cont_pool_info(pool); - pr_cont(" hung=%us workers=%d", -@@ -4888,7 +4885,6 @@ void show_workqueue_state(void) + pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); +@@ -4904,7 +4901,6 @@ void show_workqueue_state(void) first = false; } pr_cont("\n"); @@ -23064,7 +23122,7 @@ index 3c40cf18d..d0870fc0d 100644 next_pool: raw_spin_unlock_irqrestore(&pool->lock, flags); /* -@@ -4980,6 +4976,10 @@ static void unbind_workers(int cpu) +@@ -4996,6 +4992,10 @@ static void unbind_workers(int cpu) pool->flags |= POOL_DISASSOCIATED; raw_spin_unlock_irq(&pool->lock); @@ -23076,10 +23134,10 @@ index 3c40cf18d..d0870fc0d 100644 /* diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 5839cfdc1..cd5433600 100644 +index d7ba1cde5..077a1b275 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -1420,7 +1420,7 @@ config DEBUG_ATOMIC_SLEEP +@@ -1423,7 +1423,7 @@ config DEBUG_ATOMIC_SLEEP config DEBUG_LOCKING_API_SELFTESTS bool "Locking API boot-time self-tests" @@ -23127,20 +23185,20 @@ index fb22fb266..c3c76b833 100644 +} +EXPORT_SYMBOL(cpumask_any_distribute); diff --git a/lib/debugobjects.c b/lib/debugobjects.c -index 71bdc167a..e5ab016ca 100644 +index 4dd9283f6..bb616c6ed 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c -@@ -564,7 +564,10 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack - struct debug_obj *obj; - unsigned long flags; - -- fill_pool(); +@@ -605,7 +605,10 @@ static void debug_objects_fill_pool(void) + * On RT enabled kernels the pool refill must happen in preemptible + * context: + */ +- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) ++ //if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) +#ifdef CONFIG_PREEMPT_RT + if (preempt_count() == 0 && !irqs_disabled()) +#endif -+ fill_pool(); - - db = get_bucket((unsigned long) addr); + fill_pool(); + } diff --git a/lib/dump_stack.c b/lib/dump_stack.c index a00ee6eed..f5a33b6f7 100644 @@ -23440,7 +23498,7 @@ index 78a630bbd..d27a80502 100644 if ((wait_state != TASK_RUNNING || diff --git a/mm/Kconfig b/mm/Kconfig -index 0fe459e79..46ef95511 100644 +index ccbad233f..09736233c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -404,7 +404,7 @@ config NOMMU_INITIAL_TRIM_EXCESS @@ -23452,7 +23510,7 @@ index 0fe459e79..46ef95511 100644 select COMPACTION select XARRAY_MULTI help -@@ -952,6 +952,9 @@ config ARCH_HAS_HUGEPD +@@ -958,6 +958,9 @@ config ARCH_HAS_HUGEPD config MAPPING_DIRTY_HELPERS bool @@ -23771,7 +23829,7 @@ index efe38ab47..ad72e587c 100644 #if defined(HASHED_PAGE_VIRTUAL) diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 5082d0664..c574b293e 100644 +index c0d001f86..a24ad8dd1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -67,6 +67,7 @@ @@ -23797,7 +23855,7 @@ index 5082d0664..c574b293e 100644 /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -764,6 +773,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +@@ -760,6 +769,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); memcg = pn->memcg; @@ -23805,7 +23863,7 @@ index 5082d0664..c574b293e 100644 /* Update memcg */ __this_cpu_add(memcg->vmstats_percpu->state[idx], val); -@@ -771,6 +781,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +@@ -767,6 +777,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); memcg_rstat_updated(memcg); @@ -23813,7 +23871,7 @@ index 5082d0664..c574b293e 100644 } /** -@@ -2174,6 +2185,7 @@ void unlock_page_memcg(struct page *page) +@@ -2180,6 +2191,7 @@ void unlock_page_memcg(struct page *page) EXPORT_SYMBOL(unlock_page_memcg); struct memcg_stock_pcp { @@ -23821,7 +23879,7 @@ index 5082d0664..c574b293e 100644 struct mem_cgroup *cached; /* this never be root cgroup */ unsigned int nr_pages; -@@ -2225,7 +2237,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2231,7 +2243,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (nr_pages > MEMCG_CHARGE_BATCH) return ret; @@ -23830,7 +23888,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (memcg == stock->cached && stock->nr_pages >= nr_pages) { -@@ -2233,7 +2245,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2239,7 +2251,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) ret = true; } @@ -23839,7 +23897,7 @@ index 5082d0664..c574b293e 100644 return ret; } -@@ -2268,14 +2280,14 @@ static void drain_local_stock(struct work_struct *dummy) +@@ -2274,14 +2286,14 @@ static void drain_local_stock(struct work_struct *dummy) * The only protection from memory hotplug vs. drain_stock races is * that we always operate on local CPU stock here with IRQ disabled */ @@ -23856,7 +23914,7 @@ index 5082d0664..c574b293e 100644 } /* -@@ -2287,7 +2299,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2293,7 +2305,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) struct memcg_stock_pcp *stock; unsigned long flags; @@ -23865,7 +23923,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (stock->cached != memcg) { /* reset if necessary */ -@@ -2300,7 +2312,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2306,7 +2318,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (stock->nr_pages > MEMCG_CHARGE_BATCH) drain_stock(stock); @@ -23874,7 +23932,7 @@ index 5082d0664..c574b293e 100644 } /* -@@ -2320,7 +2332,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2326,7 +2338,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ @@ -23883,7 +23941,7 @@ index 5082d0664..c574b293e 100644 for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; -@@ -2343,7 +2355,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2349,7 +2361,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) schedule_work_on(cpu, &stock->work); } } @@ -23892,7 +23950,7 @@ index 5082d0664..c574b293e 100644 mutex_unlock(&percpu_charge_mutex); } -@@ -3146,7 +3158,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3176,7 +3188,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) unsigned long flags; bool ret = false; @@ -23901,7 +23959,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { -@@ -3154,7 +3166,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3184,7 +3196,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) ret = true; } @@ -23910,7 +23968,7 @@ index 5082d0664..c574b293e 100644 return ret; } -@@ -3210,7 +3222,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3240,7 +3252,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) struct memcg_stock_pcp *stock; unsigned long flags; @@ -23919,7 +23977,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (stock->cached_objcg != objcg) { /* reset if necessary */ -@@ -3224,7 +3236,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3254,7 +3266,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) if (stock->nr_bytes > PAGE_SIZE) drain_obj_stock(stock); @@ -23928,7 +23986,7 @@ index 5082d0664..c574b293e 100644 } int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) -@@ -6827,12 +6839,12 @@ static int mem_cgroup_move_account(struct page *page, +@@ -6961,12 +6973,12 @@ static int mem_cgroup_move_account(struct page *page, ret = 0; @@ -23943,7 +24001,7 @@ index 5082d0664..c574b293e 100644 out_unlock: unlock_page(page); out: -@@ -7809,10 +7821,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) +@@ -7955,10 +7967,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) css_get(&memcg->css); commit_charge(page, memcg); @@ -23956,7 +24014,7 @@ index 5082d0664..c574b293e 100644 /* * Cgroup1's unified memory+swap counter has been charged with the -@@ -7868,11 +7880,11 @@ static void uncharge_batch(const struct uncharge_gather *ug) +@@ -8014,11 +8026,11 @@ static void uncharge_batch(const struct uncharge_gather *ug) memcg_oom_recover(ug->memcg); } @@ -23970,7 +24028,7 @@ index 5082d0664..c574b293e 100644 /* drop reference from uncharge_page */ css_put(&ug->memcg->css); -@@ -8044,10 +8056,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) +@@ -8190,10 +8202,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) css_get(&memcg->css); commit_charge(newpage, memcg); @@ -23983,7 +24041,7 @@ index 5082d0664..c574b293e 100644 } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -8177,9 +8189,13 @@ static int __init mem_cgroup_init(void) +@@ -8323,9 +8335,13 @@ static int __init mem_cgroup_init(void) cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); @@ -24000,7 +24058,7 @@ index 5082d0664..c574b293e 100644 for_each_node(node) { struct mem_cgroup_tree_per_node *rtpn; -@@ -8230,6 +8246,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -8375,6 +8391,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -24008,7 +24066,7 @@ index 5082d0664..c574b293e 100644 VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -8275,9 +8292,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -8420,9 +8437,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ @@ -24023,7 +24081,7 @@ index 5082d0664..c574b293e 100644 css_put(&memcg->css); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 9e85e8b52..8f0d4b6c3 100644 +index 14b05d7fa..1d0dcf441 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ @@ -24379,7 +24437,7 @@ index 9e85e8b52..8f0d4b6c3 100644 return NULL; } -@@ -9344,7 +9414,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -9410,7 +9480,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -24388,7 +24446,7 @@ index 9e85e8b52..8f0d4b6c3 100644 if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -9353,7 +9423,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -9419,7 +9489,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } @@ -24398,7 +24456,7 @@ index 9e85e8b52..8f0d4b6c3 100644 #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/shmem.c b/mm/shmem.c -index f7caf1dec..0ef372e7d 100644 +index 9cb612d11..22abbff47 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -307,10 +307,10 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) @@ -24465,7 +24523,7 @@ index f7caf1dec..0ef372e7d 100644 } return mpol; } -@@ -3575,9 +3576,10 @@ static int shmem_reconfigure(struct fs_context *fc) +@@ -3595,9 +3596,10 @@ static int shmem_reconfigure(struct fs_context *fc) struct shmem_options *ctx = fc->fs_private; struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); unsigned long inodes; @@ -24477,7 +24535,7 @@ index f7caf1dec..0ef372e7d 100644 inodes = sbinfo->max_inodes - sbinfo->free_inodes; if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { -@@ -3623,14 +3625,15 @@ static int shmem_reconfigure(struct fs_context *fc) +@@ -3643,14 +3645,15 @@ static int shmem_reconfigure(struct fs_context *fc) * Preserve previous mempolicy unless mpol remount option was specified. */ if (ctx->mpol) { @@ -24496,7 +24554,7 @@ index f7caf1dec..0ef372e7d 100644 return invalfc(fc, "%s", err); } -@@ -3747,7 +3750,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) +@@ -3767,7 +3770,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) sbinfo->mpol = ctx->mpol; ctx->mpol = NULL; @@ -25363,10 +25421,10 @@ index ec1c3a376..559fcc2a3 100644 for (i = 0; i < t.count; i++) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c -index e27cd716c..1285e5cb0 100644 +index 6d802924d..a4ea22e5b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c -@@ -1889,7 +1889,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) +@@ -1894,7 +1894,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; @@ -25375,7 +25433,7 @@ index e27cd716c..1285e5cb0 100644 void *vaddr; node = numa_node_id(); -@@ -1926,11 +1926,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) +@@ -1931,11 +1931,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) return ERR_PTR(err); } @@ -25390,7 +25448,7 @@ index e27cd716c..1285e5cb0 100644 return vaddr; } -@@ -1995,6 +1996,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2000,6 +2001,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; void *vaddr = NULL; unsigned int order; @@ -25398,7 +25456,7 @@ index e27cd716c..1285e5cb0 100644 BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); -@@ -2009,7 +2011,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2014,7 +2016,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) order = get_order(size); rcu_read_lock(); @@ -25408,7 +25466,7 @@ index e27cd716c..1285e5cb0 100644 list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; -@@ -2032,7 +2035,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2037,7 +2040,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) break; } @@ -25794,7 +25852,7 @@ index 6079f5625..a928c7136 100644 migrate_read_unlock(zspage); unpin_tag(handle); diff --git a/net/Kconfig b/net/Kconfig -index a22c3fb88..5a17bded7 100644 +index 51a934426..9db79194a 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -280,7 +280,7 @@ config CGROUP_NET_CLASSID @@ -25807,10 +25865,10 @@ index a22c3fb88..5a17bded7 100644 config BQL bool diff --git a/net/core/dev.c b/net/core/dev.c -index 5a1994be7..254a4221c 100644 +index 8e0f4690e..6850789d0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -221,14 +221,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) +@@ -222,14 +222,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS @@ -25827,7 +25885,7 @@ index 5a1994be7..254a4221c 100644 #endif } -@@ -3051,6 +3051,7 @@ static void __netif_reschedule(struct Qdisc *q) +@@ -3054,6 +3054,7 @@ static void __netif_reschedule(struct Qdisc *q) sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -25835,7 +25893,7 @@ index 5a1994be7..254a4221c 100644 } void __netif_schedule(struct Qdisc *q) -@@ -3113,6 +3114,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) +@@ -3116,6 +3117,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -25843,7 +25901,7 @@ index 5a1994be7..254a4221c 100644 } EXPORT_SYMBOL(__dev_kfree_skb_irq); -@@ -3791,7 +3793,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, +@@ -3810,7 +3812,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. */ @@ -25855,7 +25913,7 @@ index 5a1994be7..254a4221c 100644 if (unlikely(contended)) spin_lock(&q->busylock); -@@ -4591,6 +4597,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, +@@ -4614,6 +4620,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, rps_unlock(sd); local_irq_restore(flags); @@ -25863,7 +25921,7 @@ index 5a1994be7..254a4221c 100644 atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); -@@ -4810,7 +4817,7 @@ static int netif_rx_internal(struct sk_buff *skb) +@@ -4833,7 +4840,7 @@ static int netif_rx_internal(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -25872,7 +25930,7 @@ index 5a1994be7..254a4221c 100644 rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4820,14 +4827,14 @@ static int netif_rx_internal(struct sk_buff *skb) +@@ -4843,14 +4850,14 @@ static int netif_rx_internal(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); @@ -25890,7 +25948,7 @@ index 5a1994be7..254a4221c 100644 } return ret; } -@@ -4866,11 +4873,9 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4889,11 +4896,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); @@ -25904,7 +25962,7 @@ index 5a1994be7..254a4221c 100644 trace_netif_rx_ni_exit(err); return err; -@@ -6346,12 +6351,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) +@@ -6371,12 +6376,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) sd->rps_ipi_list = NULL; local_irq_enable(); @@ -25919,7 +25977,7 @@ index 5a1994be7..254a4221c 100644 } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -6429,6 +6436,7 @@ void __napi_schedule(struct napi_struct *n) +@@ -6454,6 +6461,7 @@ void __napi_schedule(struct napi_struct *n) local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); @@ -25927,7 +25985,7 @@ index 5a1994be7..254a4221c 100644 } EXPORT_SYMBOL(__napi_schedule); -@@ -10987,6 +10995,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -11004,6 +11012,7 @@ static int dev_cpu_dead(unsigned int oldcpu) raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); @@ -25935,7 +25993,7 @@ index 5a1994be7..254a4221c 100644 #ifdef CONFIG_RPS remsd = oldsd->rps_ipi_list; -@@ -11000,7 +11009,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -11017,7 +11026,7 @@ static int dev_cpu_dead(unsigned int oldcpu) netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -25944,7 +26002,7 @@ index 5a1994be7..254a4221c 100644 netif_rx_ni(skb); input_queue_head_incr(oldsd); } -@@ -11316,7 +11325,7 @@ static int __init net_dev_init(void) +@@ -11333,7 +11342,7 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); @@ -26035,10 +26093,10 @@ index e491b083b..ef432cea2 100644 struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) diff --git a/net/core/sock.c b/net/core/sock.c -index d8d42ff15..d23b79afa 100644 +index 65745ec67..b95d402fd 100644 --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -3068,12 +3068,11 @@ void lock_sock_nested(struct sock *sk, int subclass) +@@ -3116,12 +3116,11 @@ void lock_sock_nested(struct sock *sk, int subclass) if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; @@ -26052,7 +26110,7 @@ index d8d42ff15..d23b79afa 100644 } EXPORT_SYMBOL(lock_sock_nested); -@@ -3122,12 +3121,11 @@ bool lock_sock_fast(struct sock *sk) +@@ -3170,12 +3169,11 @@ bool lock_sock_fast(struct sock *sk) __lock_sock(sk); sk->sk_lock.owned = 1; @@ -26067,10 +26125,10 @@ index d8d42ff15..d23b79afa 100644 } EXPORT_SYMBOL(lock_sock_fast); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c -index b8eecd670..070473934 100644 +index cfb4904ee..260c6ae49 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c -@@ -1271,7 +1271,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, +@@ -1276,7 +1276,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { @@ -26080,7 +26138,7 @@ index b8eecd670..070473934 100644 err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c -index ecdd9e83f..73b5aa797 100644 +index a3e9dd348..1a3b93750 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -578,7 +578,11 @@ struct Qdisc noop_qdisc = { @@ -26134,10 +26192,10 @@ index f56b4df6c..6cb833516 100644 } EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c -index ac2f1a733..84f421e6b 100644 +index 73f5cbae6..a976e2389 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c -@@ -2673,7 +2673,8 @@ int __net_init xfrm_state_init(struct net *net) +@@ -2670,7 +2670,8 @@ int __net_init xfrm_state_init(struct net *net) net->xfrm.state_num = 0; INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); spin_lock_init(&net->xfrm.xfrm_state_lock); @@ -26148,5 +26206,5 @@ index ac2f1a733..84f421e6b 100644 out_byspi: -- -2.33.0 +2.41.0 diff --git a/0001-modify-openeuler_defconfig-for-rt62.patch b/0001-modify-openeuler_defconfig-for-rt62.patch index 2156deb0ce6fe832faf865133d739462be1ffcc1..992308f0666a494ec063f9cad4969ac170649201 100644 --- a/0001-modify-openeuler_defconfig-for-rt62.patch +++ b/0001-modify-openeuler_defconfig-for-rt62.patch @@ -1,22 +1,22 @@ -From 0026e130f88770f45b00f52ba1374dd90b8be0f0 Mon Sep 17 00:00:00 2001 -From: liyulei -Date: Mon, 6 Feb 2023 18:04:41 +0800 -Subject: [PATCH 2/2] modify openeuler_defconfig for rt62 +From b17c5ed418d6cdd498aaa2b4bb29df1d640ba8ad Mon Sep 17 00:00:00 2001 +From: zhangyu +Date: Tue, 18 Jun 2024 12:59:41 +0800 +Subject: [PATCH 2/2] zhy2 --- arch/arm64/configs/openeuler_defconfig | 5 +++-- arch/arm64/kernel/fpsimd.c | 4 ++-- - arch/x86/configs/openeuler_defconfig | 7 ++++--- + arch/x86/configs/openeuler_defconfig | 8 ++++---- arch/x86/include/asm/preempt.h | 16 +++++++++++++--- include/linux/printk.h | 2 +- kernel/printk/printk.c | 2 +- - 6 files changed, 24 insertions(+), 12 deletions(-) + 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig -index 30384eacad4f..b0ed4a9b3058 100644 +index 05b50ca38..5baa109a4 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig -@@ -74,6 +74,7 @@ CONFIG_HIGH_RES_TIMERS=y +@@ -87,6 +87,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set @@ -24,16 +24,16 @@ index 30384eacad4f..b0ed4a9b3058 100644 # # CPU/Task time and stats accounting -@@ -733,7 +734,7 @@ CONFIG_ACPI_MPAM=y - CONFIG_ACPI_PPTT=y +@@ -772,7 +773,7 @@ CONFIG_ACPI_PPTT=y + CONFIG_ACPI_PCC=y # CONFIG_PMIC_OPREGION is not set CONFIG_IRQ_BYPASS_MANAGER=y -CONFIG_VIRTUALIZATION=y +# CONFIG_VIRTUALIZATION is not set CONFIG_KVM=y - CONFIG_HAVE_KVM_IRQCHIP=y - CONFIG_HAVE_KVM_IRQFD=y -@@ -1125,7 +1126,7 @@ CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y + CONFIG_CVM_HOST=y + CONFIG_CVM_GUEST=y +@@ -1174,7 +1175,7 @@ CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y # CONFIG_GUP_BENCHMARK is not set # CONFIG_READ_ONLY_THP_FOR_FS is not set CONFIG_ARCH_HAS_PTE_SPECIAL=y @@ -43,10 +43,10 @@ index 30384eacad4f..b0ed4a9b3058 100644 CONFIG_MEMORY_RELIABLE=y # CONFIG_CLEAR_FREELIST_PAGE is not set diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c -index 84520f11667d..aa631771e0dc 100644 +index 53753ae47..bcdd1bf20 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c -@@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void) +@@ -234,7 +234,7 @@ static void __get_cpu_fpsimd_context(void) */ static void get_cpu_fpsimd_context(void) { @@ -55,7 +55,7 @@ index 84520f11667d..aa631771e0dc 100644 __get_cpu_fpsimd_context(); } -@@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void) +@@ -255,7 +255,7 @@ static void __put_cpu_fpsimd_context(void) static void put_cpu_fpsimd_context(void) { __put_cpu_fpsimd_context(); @@ -65,22 +65,23 @@ index 84520f11667d..aa631771e0dc 100644 static bool have_cpu_fpsimd_context(void) diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig -index 5ada612f1d75..685a49ca1fef 100644 +index f3b810d0c..4494a5785 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig -@@ -89,9 +89,10 @@ CONFIG_NO_HZ=y - CONFIG_HIGH_RES_TIMERS=y +@@ -89,10 +89,10 @@ CONFIG_HIGH_RES_TIMERS=y + CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y # end of Timers subsystem -CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set +- +CONFIG_PREEMPT_RT=y - # # CPU/Task time and stats accounting -@@ -206,7 +207,7 @@ CONFIG_HAVE_UID16=y + # +@@ -217,7 +217,7 @@ CONFIG_HAVE_UID16=y CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_HAVE_PCSPKR_PLATFORM=y CONFIG_BPF=y @@ -89,7 +90,7 @@ index 5ada612f1d75..685a49ca1fef 100644 CONFIG_UID16=y CONFIG_MULTIUSER=y CONFIG_SGETMASK_SYSCALL=y -@@ -735,7 +736,7 @@ CONFIG_KVM_COMPAT=y +@@ -758,7 +758,7 @@ CONFIG_KVM_COMPAT=y CONFIG_HAVE_KVM_IRQ_BYPASS=y CONFIG_HAVE_KVM_NO_POLL=y CONFIG_KVM_XFER_TO_GUEST_WORK=y @@ -99,7 +100,7 @@ index 5ada612f1d75..685a49ca1fef 100644 CONFIG_KVM_INTEL=m CONFIG_X86_SGX_KVM=y diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h -index a3b73de276c3..dda9ec24fdc1 100644 +index a3b73de27..dda9ec24f 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -134,10 +134,8 @@ static __always_inline bool should_resched(int preempt_offset) @@ -132,7 +133,7 @@ index a3b73de276c3..dda9ec24fdc1 100644 + #endif /* __ASM_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h -index 9331b131ba25..23946f4828b2 100644 +index 26c70fd11..c82c971e8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -241,7 +241,7 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); @@ -145,18 +146,18 @@ index 9331b131ba25..23946f4828b2 100644 #else static inline void zap_locks(void) { } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index e95b00f24c75..59bd804e662a 100644 +index f068738c7..82d58ad08 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1813,7 +1813,7 @@ static void print_sync_until(struct console *con, u64 seq) +@@ -1821,7 +1821,7 @@ static void print_sync_until(struct console *con, u64 seq) console_atomic_unlock(flags); } -#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) -+#if (defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI)) && !defined(CONFIG_PREEMPT_RT) ++#if 0//(defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI)) && !defined(CONFIG_PREEMPT_RT) void zap_locks(void) { // if (raw_spin_is_locked(&logbuf_lock)) { -- -2.36.1 +2.41.0 diff --git a/kernel-rt.spec b/kernel-rt.spec index 4fc925eedc30651f9fd89c1b22860c9e8fde75c0..4fd4313f32391a71b319ff48e67428b593ffb48c 100644 --- a/kernel-rt.spec +++ b/kernel-rt.spec @@ -10,9 +10,9 @@ %global upstream_version 5.10 %global upstream_sublevel 0 -%global devel_release 161 +%global devel_release 208 %global maintenance_release .0.0 -%global pkg_release .60 +%global pkg_release .61 %global rt_release .rt62 %define with_debuginfo 1 @@ -891,6 +891,9 @@ fi %endif %changelog +* Tue Jun 18 2024 zhangyu - 5.10.0-208.0.0.61 +- update kernel-rt version to 5.10.0-208.0.0 + * Mon Nov 1 2023 zhangyu - 5.10.0-161.0.0.60 - update kernel-rt version to 5.10.0-161.0.0 diff --git a/raspberrypi-kernel-rt.spec b/raspberrypi-kernel-rt.spec index a01ddba499ae792434045c202daa9bf62f6e568a..e2fc63786537e0605b030331059af4112da1e659 100644 --- a/raspberrypi-kernel-rt.spec +++ b/raspberrypi-kernel-rt.spec @@ -2,13 +2,13 @@ %global KernelVer %{version}-%{release}.raspi.%{_target_cpu} -%global hulkrelease 161.0.0 +%global hulkrelease 208.0.0 %global debug_package %{nil} Name: raspberrypi-kernel-rt Version: 5.10.0 -Release: %{hulkrelease}.rt62.10 +Release: %{hulkrelease}.rt62.11 Summary: Linux Kernel License: GPLv2 URL: http://www.kernel.org/ @@ -172,6 +172,9 @@ install -m 644 /boot/dtb-%{KernelVer}/overlays/README /boot/overlays/ /lib/modules/%{KernelVer} %changelog +* Tue Jun 18 2024 zhangyu - 5.10.0-208.0.0.11 +- - update preempt-RT to openEuler 5.10.0-208.0.0 + * Mon Dec 30 2023 zhangyu - 5.10.0-161.0.0.10 - - update preempt-RT to openEuler 5.10.0-161.0.0