diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c49a76ad747f61fd8bd14d1523df2518a5215f90..38eaa358c76b725b5e7aeb8a2d90b2f35e97c760 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2357,6 +2357,11 @@ static void __maybe_unused cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) { u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); + int cpu = smp_processor_id(); + u64 regval = 0; + + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + regval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); /* * Initialise MPAM EL2 registers and disable EL2 traps. @@ -2373,6 +2378,7 @@ cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) * been throttled to release the lock. */ write_sysreg_s(0, SYS_MPAM1_EL1); + write_sysreg_s(regval, SYS_MPAM0_EL1); } static void mpam_extra_caps(void) diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index d8891d5c59c057c05604ae65067231e9b3d9bc0c..3f070cbab420651273d407dbd86b95ec483501bd 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -4,15 +4,55 @@ #include #include +#include #include #include #include +#include DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); DEFINE_STATIC_KEY_FALSE(mpam_enabled); DEFINE_PER_CPU(u64, arm64_mpam_default); DEFINE_PER_CPU(u64, arm64_mpam_current); +static int mpam_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + u64 regval; + struct rdt_resource *r; + int i, cpu = smp_processor_id(); + + switch (cmd) { + case CPU_PM_ENTER: + if (!resctrl_mounted) + return NOTIFY_OK; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->invisible && r->is_volatile) + return NOTIFY_BAD; + } + + return NOTIFY_OK; + case CPU_PM_EXIT: + /* + * Don't use mpam_thread_switch() as the system register + * value has changed under our feet. + */ + regval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); + write_sysreg_s(0, SYS_MPAM1_EL1); + write_sysreg_s(regval, SYS_MPAM0_EL1); + + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block mpam_pm_nb = { + .notifier_call = mpam_pm_notifier, +}; + static int __init arm64_mpam_register_cpus(void) { u16 partid_max; @@ -29,6 +69,7 @@ static int __init arm64_mpam_register_cpus(void) partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); + cpu_pm_register_notifier(&mpam_pm_nb); return mpam_register_requestor(partid_max, pmg_max); } arch_initcall(arm64_mpam_register_cpus); diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index b4a0a99bde6aee67e7c3fe1f933ce5d9a2c3c2b4..406b4cf301bb6e64fa4db926c997fd8845a98d9b 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -101,6 +101,8 @@ static inline bool resctrl_arch_is_mbm_local_enabled(void) return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); } +static inline bool resctrl_arch_is_mbm_core_enabled(void) { return false; } + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index ad3f4c790a6c087ada0959350904cbbe6108a488..092ef09c3e37891f0f6026ba151ea8478c8033f8 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -134,6 +134,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, return &hw_dom->arch_mbm_total[rmid]; case QOS_L3_MBM_LOCAL_EVENT_ID: return &hw_dom->arch_mbm_local[rmid]; + default: + break; } /* Never expect to get here */ diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e0a16198a810d3567272d557686b951248e11d89..566bd92dc01087586c4b04f05654691195894404 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -926,6 +926,7 @@ static u64 mpam_msmon_overflow_val(struct mpam_msc_ris *ris) static const struct midr_range mbwu_flowrate_list[] = { MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), MIDR_ALL_VERSIONS(MIDR_HISI_LINXICORE9100), + MIDR_ALL_VERSIONS(MIDR_HISI_HIP12), { /* sentinel */ } }; @@ -1553,9 +1554,13 @@ static int mpam_discovery_cpu_online(unsigned int cpu) static int mpam_cpu_offline(unsigned int cpu) { - int idx; + int ret, idx; struct mpam_msc *msc; + ret = mpam_resctrl_prepare_offline(); + if (ret) + return ret; + idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { if (!cpumask_test_cpu(cpu, &msc->accessibility)) diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index e9c52078edea4c04cbbe7fa7329d8c3d1a8ad126..ff1890b3a78e949ae8ba6b957c798a29dd8ab865 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -590,4 +590,6 @@ void mpam_resctrl_exit(void); */ #define MSMON_CAPT_EVNT_NOW BIT(0) +int mpam_resctrl_prepare_offline(void); + #endif /* MPAM_INTERNAL_H */ diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 0e0011a0eb0383f1c7e9522df2857d44006538a4..cdcadc4111e1cbeddb31ed81b4ebe8c3808630cf 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -36,6 +36,7 @@ static bool exposed_alloc_capable; static bool exposed_mon_capable; static struct mpam_class *mbm_local_class; static struct mpam_class *mbm_total_class; +static struct mpam_class *mbm_core_class; /* * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. @@ -79,6 +80,11 @@ bool resctrl_arch_is_mbm_total_enabled(void) return mbm_total_class; } +bool resctrl_arch_is_mbm_core_enabled(void) +{ + return mbm_core_class; +} + bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) { switch (rid) { @@ -307,6 +313,8 @@ static void *resctrl_arch_mon_ctx_alloc_no_wait(struct rdt_resource *r, case QOS_L3_OCCUP_EVENT_ID: case QOS_L3_MBM_LOCAL_EVENT_ID: case QOS_L3_MBM_TOTAL_EVENT_ID: + case QOS_L2_OCCUP_EVENT_ID: + case QOS_L2_MBM_CORE_EVENT_ID: *ret = __mon_is_rmid_idx; return ret; @@ -372,10 +380,12 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, switch (eventid) { case QOS_L3_OCCUP_EVENT_ID: + case QOS_L2_OCCUP_EVENT_ID: type = mpam_feat_msmon_csu; break; case QOS_L3_MBM_LOCAL_EVENT_ID: case QOS_L3_MBM_TOTAL_EVENT_ID: + case QOS_L2_MBM_CORE_EVENT_ID: type = mpam_feat_msmon_mbwu; break; default: @@ -524,6 +534,11 @@ bool resctrl_arch_is_llc_occupancy_enabled(void) return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); } +bool resctrl_arch_is_l2c_occupancy_enabled(void) +{ + return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L2].class); +} + static bool class_has_usable_mbwu(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -918,19 +933,31 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) * be local. If it's on the memory controller, its assumed to * be global. */ - if (has_mbwu && class->level >= 3) { - mbm_local_class = class; - r->mon_capable = true; + if (has_mbwu) { + if (class->level == 3) { + mbm_local_class = class; + r->mon_capable = true; + + } else if (class->level == 2) { + mbm_core_class = class; + r->mon_capable = true; + } } /* * CSU counters only make sense on a cache. The file is called * llc_occupancy, but its expected to the on the L3. */ - if (has_csu && class->type == MPAM_CLASS_CACHE && - class->level == 3) { + if (has_csu && class->type == MPAM_CLASS_CACHE) r->mon_capable = true; - } + + /* + * The power domain of L2 cache msc is shared with the + * core's, which will cause information of the L2 msc to + * be lost when the core enter power down state. + */ + if (class->level <= 2) + r->is_volatile = true; break; case RDT_RESOURCE_MBA: @@ -1462,6 +1489,27 @@ int mpam_resctrl_online_cpu(unsigned int cpu) return 0; } +int mpam_resctrl_prepare_offline(void) +{ + struct mpam_resctrl_res *res; + int i; + + if (resctrl_mounted) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (res->resctrl_res.is_volatile && + !res->resctrl_res.invisible) { + pr_info("%s is working, umount /sys/fs/resctrl first.\n", + res->resctrl_res.name); + return -EBUSY; + } + } + } + + return 0; +} + int mpam_resctrl_offline_cpu(unsigned int cpu) { int i; @@ -1502,6 +1550,11 @@ static struct mon_evt llc_occupancy_event = { .evtid = QOS_L3_OCCUP_EVENT_ID, }; +static struct mon_evt l2c_occupancy_event = { + .name = "l2c_occupancy", + .evtid = QOS_L2_OCCUP_EVENT_ID, +}; + static struct mon_evt mbm_total_event = { .name = "mbm_total_bytes", .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, @@ -1512,6 +1565,11 @@ static struct mon_evt mbm_local_event = { .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, }; +static struct mon_evt mbm_core_event = { + .name = "mbm_core_bytes", + .evtid = QOS_L2_MBM_CORE_EVENT_ID, +}; + /* * Initialize the event list for the resource. * @@ -1534,6 +1592,14 @@ static void l3_mon_evt_init(struct rdt_resource *r) list_add_tail(&mbm_local_event.list, &r->evt_list); } + if (r->rid == RDT_RESOURCE_L2) { + if (resctrl_arch_is_l2c_occupancy_enabled()) + list_add_tail(&l2c_occupancy_event.list, &r->evt_list); + + if (resctrl_arch_is_mbm_core_enabled()) + list_add_tail(&mbm_core_event.list, &r->evt_list); + } + if ((r->rid == RDT_RESOURCE_MBA) && resctrl_arch_is_mbm_total_enabled()) list_add_tail(&mbm_total_event.list, &r->evt_list); @@ -1542,6 +1608,7 @@ static void l3_mon_evt_init(struct rdt_resource *r) int resctrl_arch_mon_resource_init(void) { l3_mon_evt_init(resctrl_arch_get_resource(RDT_RESOURCE_L3)); + l3_mon_evt_init(resctrl_arch_get_resource(RDT_RESOURCE_L2)); l3_mon_evt_init(resctrl_arch_get_resource(RDT_RESOURCE_MBA)); if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c index 60b422eed2c81d97331e32c43adac06fd00de9b8..00132a77879dabf48ee4ebe4aec90aee52382ee9 100644 --- a/fs/resctrl/ctrlmondata.c +++ b/fs/resctrl/ctrlmondata.c @@ -307,6 +307,9 @@ static int rdtgroup_parse_resource(char *resname, char *tok, struct resctrl_schema *s; list_for_each_entry(s, &resctrl_schema_all, list) { + if (s->res->invisible) + continue; + if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) return parse_line(tok, s, rdtgrp); } @@ -451,6 +454,9 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, } else { closid = rdtgrp->closid; list_for_each_entry(schema, &resctrl_schema_all, list) { + if (schema->res->invisible) + continue; + if (closid < schema->num_closid) show_doms(s, schema, closid); } diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h index fb620beba9584d6068ef2789340f5327391bc377..839fbcf51ed2b447c882b3e7dc1b7c59f9a1433f 100644 --- a/fs/resctrl/internal.h +++ b/fs/resctrl/internal.h @@ -57,6 +57,7 @@ struct rdt_fs_context { bool enable_cdpl3; bool enable_mba_mbps; bool enable_debug; + bool enable_l2; }; static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) @@ -96,7 +97,6 @@ struct rmid_read { }; extern struct list_head resctrl_schema_all; -extern bool resctrl_mounted; enum rdt_group_type { RDTCTRL_GROUP = 0, diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index 7d824dc8fa907078b7797bd4408527624fc35e1a..fb60316199c4e82d787de8bf53d0f9bd1e634f42 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -595,6 +595,20 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, if (is_mba_sc(NULL)) mbm_bw_count(closid, rmid, &rr); + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } + if (resctrl_arch_is_mbm_core_enabled()) { + rr.evtid = QOS_L2_MBM_CORE_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); } } diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index ffb6b8930435f44e1a46105e606fe8269180cef5..70fdbaa12ba8fafd9880382ea565bb688b327460 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -114,13 +114,15 @@ void rdt_staged_configs_clear(void) static bool resctrl_is_mbm_enabled(void) { return (resctrl_arch_is_mbm_total_enabled() || - resctrl_arch_is_mbm_local_enabled()); + resctrl_arch_is_mbm_local_enabled() || + resctrl_arch_is_mbm_core_enabled()); } static bool resctrl_is_mbm_event(int e) { - return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); + return (e == QOS_L3_MBM_TOTAL_EVENT_ID || + e == QOS_L3_MBM_LOCAL_EVENT_ID || + e == QOS_L2_MBM_CORE_EVENT_ID); } /* @@ -2515,6 +2517,7 @@ static void rdt_disable_ctx(void) static int rdt_enable_ctx(struct rdt_fs_context *ctx) { + struct rdt_resource *r; int ret = 0; if (ctx->enable_cdpl2) { @@ -2538,6 +2541,13 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx) if (ctx->enable_debug) resctrl_debug = true; + r = resctrl_arch_get_resource(RDT_RESOURCE_L2); + /* Only arm64 arch hides L2 resource by default */ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && !ctx->enable_l2) + r->invisible = true; + else + r->invisible = false; + return 0; out_cdpl3: @@ -2760,6 +2770,7 @@ enum rdt_param { Opt_cdpl2, Opt_mba_mbps, Opt_debug, + Opt_l2, nr__rdt_params }; @@ -2768,6 +2779,7 @@ static const struct fs_parameter_spec rdt_fs_parameters[] = { fsparam_flag("cdpl2", Opt_cdpl2), fsparam_flag("mba_MBps", Opt_mba_mbps), fsparam_flag("debug", Opt_debug), + fsparam_flag("l2", Opt_l2), {} }; @@ -2796,6 +2808,9 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) case Opt_debug: ctx->enable_debug = true; return 0; + case Opt_l2: + ctx->enable_l2 = true; + return 0; } return -EINVAL; @@ -3148,6 +3163,9 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, if (!r->mon_capable) continue; + if (r->invisible) + continue; + ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); if (ret) goto out_destroy; @@ -4001,6 +4019,15 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) return -ENOMEM; } } + if (resctrl_arch_is_mbm_core_enabled()) { + tsize = sizeof(*d->mbm_core); + d->mbm_core = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_core) { + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_core); + return -ENOMEM; + } + } return 0; } diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index aef951595f65774008baf04e17a9ff6a32c7a6d8..3a06a8afad7c0b5d554eb77c2ee80c5675380cf8 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -64,6 +64,8 @@ static inline bool resctrl_arch_event_is_free_running(enum resctrl_event_id evt) bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); bool resctrl_arch_is_llc_occupancy_enabled(void); +bool resctrl_arch_is_l2c_occupancy_enabled(void); +bool resctrl_arch_is_mbm_core_enabled(void); bool resctrl_arch_is_mbm_local_enabled(void); bool resctrl_arch_is_mbm_total_enabled(void); bool resctrl_arch_would_mbm_overflow(void); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 566a891f1e04baf01f1d4fe09fa8afc7c776b1d0..4f54c371d185f5b77dd39fc5d3318b80b0cbfd3f 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -113,6 +113,7 @@ struct rdt_domain { unsigned long *rmid_busy_llc; struct mbm_state *mbm_total; struct mbm_state *mbm_local; + struct mbm_state *mbm_core; struct delayed_work mbm_over; struct delayed_work cqm_limbo; int mbm_work_cpu; @@ -215,6 +216,8 @@ struct rdt_resource { int rid; bool alloc_capable; bool mon_capable; + bool invisible; + bool is_volatile; int num_rmid; int cache_level; struct resctrl_cache cache; @@ -433,6 +436,8 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; +extern bool resctrl_mounted; + int resctrl_init(void); void resctrl_exit(void); diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h index fd1704766c29db516d0b95a5cc721d332c7de758..3d56c35877ea232a404207a38402c34075213af8 100644 --- a/include/linux/resctrl_types.h +++ b/include/linux/resctrl_types.h @@ -99,11 +99,21 @@ enum resctrl_res_level { /* * Event IDs, the values match those used to program IA32_QM_EVTSEL before * reading IA32_QM_CTR on RDT systems. + * + * Monitor Event IDs, representative a variety of monitoring events: + * QOS_L3_OCCUP_EVENT_ID: L3 Cache Occupancy statistics event + * QOS_L3_MBM_TOTAL_EVENT_ID: Global Memory Bandwidth statistics event + * QOS_L3_MBM_LOCAL_EVENT_ID: L3 Cache Bandwidth statistics event + * QOS_L2_OCCUP_EVENT_ID: L2 Cache Occupancy statistics event + * QOS_L2_MBM_CORE_EVENT_ID: L2 Cache Bandwidth statistics event */ enum resctrl_event_id { QOS_L3_OCCUP_EVENT_ID = 0x01, QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, + + QOS_L2_OCCUP_EVENT_ID, + QOS_L2_MBM_CORE_EVENT_ID, }; #endif /* __LINUX_RESCTRL_TYPES_H */