diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 2aca373a703843367541bf90d877a7352553a3cb..f5559e38b24363474b20fa70e22ef8914cc5e9c7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1493,6 +1493,15 @@ config HW_PERF_EVENTS def_bool y depends on ARM_PMU +config ARCH_LLC_128_LINE_SIZE + bool "Force 128 bytes alignment for fitting LLC cacheline" + depends on ARM64 + default y + help + As specific machine's LLC cacheline size may be up to + 128 bytes, gaining performance improvement from fitting + 128 Bytes LLC cache aligned. + # Supported by clang >= 7.0 or GCC >= 12.0.0 config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 1613779be63acfdcb31ecf782088d3d8b1ecddc7..8455df351ef89a30acb8e6f0e9d8942a14598818 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -8,6 +8,12 @@ #define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE +#ifndef ____cacheline_aligned_128 +#define ____cacheline_aligned_128 __attribute__((__aligned__(128))) +#endif +#endif + #define CLIDR_LOUU_SHIFT 27 #define CLIDR_LOC_SHIFT 24 #define CLIDR_LOUIS_SHIFT 21 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 266d02809dbb1dceabfb2a05276385c6a3be9b0c..35b366addf0766edf9278ffa069250ba1fa650e3 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -48,9 +48,16 @@ DEFINE_RAW_SPINLOCK(timekeeper_lock); * cache line. */ static struct { +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE + u64 padding[8]; +#endif seqcount_raw_spinlock_t seq; struct timekeeper timekeeper; +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE +} tk_core ____cacheline_aligned_128 = { +#else } tk_core ____cacheline_aligned = { +#endif .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock), };