diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 9adad040806813b6155f627faa03ae4a24176334..1c569498ce8f429b13ab8e8549082dbd12566d4d 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -189,10 +189,21 @@ static void amd_uncore_del(struct perf_event *event, int flags) */ static u64 l3_thread_slice_mask(u64 config) { - if (boot_cpu_data.x86 <= 0x18) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model == 0x6) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + /* * If the user doesn't specify a threadmask, they're not trying to * count core 0, so we enable all cores & threads. @@ -307,6 +318,8 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ static struct attribute *amd_uncore_df_format_attr[] = { &format_attr_event12.attr, /* event14 if F17h+ */ @@ -619,11 +632,23 @@ static int __init amd_uncore_init(void) *l3_attr++ = &format_attr_enallcores.attr; *l3_attr++ = &format_attr_sliceid.attr; *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask.attr; *l3_attr++ = &format_attr_slicemask.attr; *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask.attr; + if (boot_cpu_data.x86_model == 6) { + *l3_attr++ = &format_attr_slicemask4.attr; + *l3_attr++ = &format_attr_threadmask32.attr; + } else { + *l3_attr++ = &format_attr_slicemask.attr; + *l3_attr++ = &format_attr_threadmask8.attr; + } } amd_uncore_llc = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index c77eaf919879feebedd6780a5eb40acae945a1fe..71c9ca150571b2307f1747b135bb21711e8b270e 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -48,6 +48,14 @@ #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ (0xFULL << AMD64_L3_SLICE_SHIFT) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index bdfd4d3072733f69e72e5894ec18ad0512495af4..55c4db713cd5416310b3406aafa0305d08ac5e2a 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -16,6 +16,7 @@ #ifdef CONFIG_X86_64 # include #endif +#include #include "cpu.h" @@ -267,6 +268,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + resctrl_cpu_detect(c); } static void early_init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index b246bd72333b068c422d3899c7fc479e5cf80e7d..6a32e3247d751d2fc43fab8865c46cc26817e487 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -838,7 +838,8 @@ static __init bool get_mem_config(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]); return false; @@ -965,7 +966,8 @@ static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) rdt_init_res_defs_amd(); } @@ -995,7 +997,8 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; - if (c->x86_vendor == X86_VENDOR_AMD && + if ((c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) && !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;