diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index ff4238eff08701adab26f715c79b9cb26183534a..88db23c15a9c2d12916ee61d9e8045ccf4ffbd7d 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -186,10 +186,21 @@ static void amd_uncore_del(struct perf_event *event, int flags) */ static u64 l3_thread_slice_mask(u64 config) { - if (boot_cpu_data.x86 <= 0x18) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model == 0x6) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + /* * If the user doesn't specify a threadmask, they're not trying to * count core 0, so we enable all cores & threads. @@ -258,6 +269,13 @@ amd_f17h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) attr->mode : 0; } +static umode_t +hygon_f18h_m6h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return boot_cpu_data.x86 == 0x18 && boot_cpu_data.x86_model == 0x6 ? + attr->mode : 0; +} + static umode_t amd_f19h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -315,6 +333,8 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ /* Common DF and NB attributes */ static struct attribute *amd_uncore_df_format_attr[] = { @@ -337,6 +357,12 @@ static struct attribute *amd_f17h_uncore_l3_format_attr[] = { NULL, }; +/* F18h M06h unique L3 attributes */ +static struct attribute *hygon_f18h_m6h_uncore_l3_format_attr[] = { + &format_attr_slicemask4.attr, /* slicemask */ + NULL, +}; + /* F19h unique L3 attributes */ static struct attribute *amd_f19h_uncore_l3_format_attr[] = { &format_attr_coreid.attr, /* coreid */ @@ -362,6 +388,12 @@ static struct attribute_group amd_f17h_uncore_l3_format_group = { .is_visible = amd_f17h_uncore_is_visible, }; +static struct attribute_group hygon_f18h_m6h_uncore_l3_format_group = { + .name = "format", + .attrs = hygon_f18h_m6h_uncore_l3_format_attr, + .is_visible = hygon_f18h_m6h_uncore_is_visible, +}; + static struct attribute_group amd_f19h_uncore_l3_format_group = { .name = "format", .attrs = amd_f19h_uncore_l3_format_attr, @@ -386,6 +418,11 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = { NULL, }; +static const struct attribute_group *hygon_uncore_l3_attr_update[] = { + &hygon_f18h_m6h_uncore_l3_format_group, + NULL, +}; + static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, .attr_groups = amd_uncore_df_attr_groups, @@ -698,10 +735,21 @@ static int __init amd_uncore_init(void) *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask8.attr; + if (boot_cpu_data.x86_model == 0x6) { + *l3_attr++ = &format_attr_threadmask32.attr; + amd_llc_pmu.attr_update = hygon_uncore_l3_attr_update; + } else { + *l3_attr++ = &format_attr_threadmask8.attr; + } } amd_uncore_llc = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 73d678e7ce361bf15cff6ea5a1042c64ece3c9e3..28bef43aa72dc3924152e0b8ae3bf88ff21d0640 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -50,6 +50,14 @@ #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ (0xFULL << AMD64_L3_SLICE_SHIFT) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 97f9cbad839421a561f645854870012fd0ba5165..ebe35ba90357b617360a741a08534ddbd8b54146 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -17,6 +17,7 @@ #ifdef CONFIG_X86_64 # include #endif +#include #include "cpu.h" @@ -268,6 +269,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + resctrl_cpu_detect(c); } static void init_hygon_cap(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 0a2d70e13fcaa2c508c5a83f03600f48b0547008..e7dffee9d61816af714d8926831ae6e2d8e18d31 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -687,7 +687,8 @@ static __init bool get_mem_config(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&hw_res->r_resctrl); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; @@ -820,7 +821,8 @@ static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) rdt_init_res_defs_amd(); } @@ -851,7 +853,9 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; - if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) + if ((c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) && + !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } }