diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index aa02523f2426fef2506460da1565e10b620a5042..872a637f9d6602ad6a9d24e8f518fc09c8393058 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -363,12 +363,13 @@ struct cgroup_rstat_cpu { */ struct u64_stats_sync bsync; struct cgroup_base_stat bstat; - + struct cgroup_base_stat_task bstat_task; /* * Snapshots at the last reading. These are used to calculate the * deltas to propagate to the global counters. */ struct cgroup_base_stat last_bstat; + struct cgroup_base_stat_task last_bstat_task; /* * Child cgroups with stat updates on this cpu since the last read @@ -383,8 +384,11 @@ struct cgroup_rstat_cpu { */ struct cgroup *updated_children; /* terminated by self cgroup */ struct cgroup *updated_next; /* NULL iff not on the list */ - CK_KABI_EXTEND(struct cgroup_base_stat_task bstat_task) - CK_KABI_EXTEND(struct cgroup_base_stat_task last_bstat_task) + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cgroup_freezer_state { @@ -507,7 +511,9 @@ struct cgroup { /* cgroup basic resource statistics */ struct cgroup_base_stat last_bstat; + struct cgroup_base_stat_task last_bstat_task; struct cgroup_base_stat bstat; + struct cgroup_base_stat_task bstat_task; struct prev_cputime prev_cputime; /* for printing out cputime */ /* @@ -544,8 +550,10 @@ struct cgroup { struct kernfs_root *hidden_place; /* tree to hide cgroup in pool. */ struct delayed_work supply_pool_work; - CK_KABI_USE(1, 2, struct cgroup_base_stat_task last_bstat_task) - CK_KABI_USE(3, 4, struct cgroup_base_stat_task bstat_task) + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) /* ids of the ancestors at each level including self */ u64 ancestor_ids[]; diff --git a/include/linux/fs.h b/include/linux/fs.h index d0e9620729b65373f7b4c807ee322b4ee83252aa..c2e494247da1d47abd761d4e4ef67ecb1bbc94ce 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -488,9 +488,9 @@ struct address_space { struct list_head private_list; void *private_data; - /* CK_KABI_RESERVE(1) */ - CK_KABI_USE(1, struct fast_reflink_work *fast_reflink_work); + struct fast_reflink_work *fast_reflink_work; + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -1915,7 +1915,6 @@ struct file_operations { int (*fadvise)(struct file *, loff_t, loff_t, int); int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int (*uring_cmd_iopoll)(struct io_uring_cmd *ioucmd); - CK_KABI_DEPRECATE(bool, may_pollfree) CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b7e046a72586a629a5faa5c9f2da88272a0b0b93..b565d2f4840bbf3b02d6d1f2a811cf413d8c6154 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -214,8 +214,9 @@ struct iommu_iotlb_gather { unsigned long end; size_t pgsize; struct page *freelist; + bool queued; - CK_KABI_USE(1, bool queued) + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) }; diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index b53b43973568704066e405f9984e753cc7c5bbec..5e881757cbe6c57dd526b9d7803a9ff13af67a37 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -68,7 +68,7 @@ struct irqstat { struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; - CK_KABI_REPLACE(unsigned int __percpu *kstat_irqs, struct irqstat __percpu *kstat_irqs); + struct irqstat __percpu *kstat_irqs; irq_flow_handler_t handle_irq; struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index d5ac73e6236ad7605e3841b92442f0264ef743f6..c577831778951188a2f15262351c04e0ea112cea 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -104,7 +104,9 @@ struct kernfs_elem_dir { * Monotonic revision counter, used to identify if a directory * node has changed during negative dentry revalidation. */ - CK_KABI_USE(1, unsigned long rev) + unsigned long rev; + + CK_KABI_RESERVE(1) }; struct kernfs_elem_symlink { diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index cdb97ec6ac053fe6e03483b59c9856497bdafc25..704c3cfb832310a8898c5115c8c79c1c18240201 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -530,12 +530,12 @@ struct mem_cgroup { #endif #ifdef CONFIG_LRU_GEN - CK_KABI_USE(1, unsigned long mglru_batch_size) - CK_KABI_USE(2, unsigned long mglru_reclaim_pages) -#else + unsigned long mglru_batch_size; + unsigned long mglru_reclaim_pages; +#endif + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) -#endif CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) CK_KABI_RESERVE(5) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index e7ec2a7b8a6993cff8497145580192833ffec46d..cbec5d177e881cd3fec4e9a142b2ba50ab9a9d9d 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -101,9 +101,10 @@ struct dev_pagemap_ops { * When this is not implemented, or it returns -EOPNOTSUPP, the caller * will fall back to a common handler called mf_generic_kill_procs(). */ - CK_KABI_USE(1, - int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, - unsigned long nr_pages, int mf_flags)); + int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, + unsigned long nr_pages, int mf_flags); + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) }; diff --git a/include/linux/mm.h b/include/linux/mm.h index ad2cbc0f2afc76c5af2c444c1f565ceae48ed066..1c48efef07b8c4e5d6b5093d032f72b96e751bb5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -563,10 +563,10 @@ struct vm_fault { * atomic context. */ #ifdef CONFIG_PAGETABLE_SHARE - CK_KABI_USE(1, struct vm_area_struct *orig_vma) /* Original VMA */ -#else - CK_KABI_RESERVE(1) + struct vm_area_struct *orig_vma; /* Original VMA */ #endif + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 80fbded07e43dea5b1fdf8839f79979fe43528d6..67bf788f2fba6ca35743946fc7a644ce17e3d87e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -394,13 +394,12 @@ struct vm_area_struct { #endif #ifdef CONFIG_PAGETABLE_SHARE - CK_KABI_USE(1, struct pgtable_share_struct *pgtable_share_data) -#else - CK_KABI_RESERVE(1) + struct pgtable_share_struct *pgtable_share_data; #endif + bool fast_reflink; - CK_KABI_USE_SPLIT(2, bool fast_reflink); - + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) } __randomize_layout; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4079eab8a7990459fda574394979aeb528db8686..5938a17e119de0394520dc443e1e0b862692cb56 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -858,7 +858,9 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; - CK_KABI_USE(1, unsigned long reported_pages) + unsigned long reported_pages; + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7e972b4f7ca1d120a9e15d56d84323ec3ae58cd1..0316acd94d75c4f74a91205c50eca22b015c0779 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2177,6 +2177,8 @@ struct net_device { void *ml_priv; enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type:8; + union { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; @@ -2234,9 +2236,10 @@ struct net_device { /* protected by rtnl_lock */ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; - CK_KABI_USE_SPLIT(1, enum netdev_stat_type pcpu_stat_type:8) /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ - CK_KABI_USE(2, struct dim_irq_moder *irq_moder) + struct dim_irq_moder *irq_moder; + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f662a84120a17a959dbade00bf3e9b6894f6157a..1381f54bbee79ab9aa17f2495342ab56a26da89e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -666,6 +666,7 @@ struct perf_event { /* The cumulative AND of all event_caps for events in this group. */ int group_caps; + unsigned int group_generation; struct perf_event *group_leader; struct pmu *pmu; void *pmu_private; @@ -776,7 +777,7 @@ struct perf_event { struct list_head sb_list; #endif /* CONFIG_PERF_EVENTS */ - CK_KABI_USE_SPLIT(1, unsigned int group_generation) + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -1025,6 +1026,7 @@ struct perf_sample_data { u64 addr; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; u64 period; union perf_sample_weight weight; u64 txn; @@ -1058,7 +1060,6 @@ struct perf_sample_data { u64 cgroup; u64 data_page_size; u64 code_page_size; - CK_KABI_EXTEND(u64 *br_stack_cntr); } ____cacheline_aligned; /* default value for data source */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e9e93c54477e2d90edc01cd7e7acd12cb9dd7b8..16f819e19f455978b67e3a65e503436c6679d887 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -498,29 +498,25 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; + u64 core_forceidle_task_sum; + u64 forceidled_sum; + u64 forceidled_sum_base; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) u64 core_sibidle_sum; + u64 core_sibidle_task_sum; #endif - CK_KABI_USE(1, unsigned long forceidled_sum) - CK_KABI_USE(2, unsigned long forceidled_sum_base) -#ifdef CONFIG_SCHED_CORE - CK_KABI_USE(3, unsigned long core_forceidle_task_sum) -#else + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) -#endif -#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) - CK_KABI_USE(4, unsigned long core_sibidle_task_sum) -#else CK_KABI_RESERVE(4) -#endif CK_KABI_RESERVE(5) CK_KABI_RESERVE(6) CK_KABI_RESERVE(7) CK_KABI_RESERVE(8) -#endif +#endif /* CONFIG_SCHEDSTATS */ }; struct sched_entity { @@ -546,7 +542,7 @@ struct sched_entity { u64 cg_iowait_start; u64 cg_ineffective_sum; u64 cg_ineffective_start; - CK_KABI_REPLACE_SPLIT(seqlock_t idle_seqlock, seqcount_t idle_seqcount); + seqcount_t idle_seqcount; spinlock_t iowait_lock; u64 nr_migrations; @@ -592,7 +588,9 @@ struct sched_entity { #endif #endif - CK_KABI_USE(1, long priority) + long priority; + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -1032,6 +1030,9 @@ struct task_struct { /* CLONE_CHILD_CLEARTID: */ int __user *clear_child_tid; + /* PF_IO_WORKER */ + void *pf_io_worker; + u64 utime; u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME @@ -1513,8 +1514,8 @@ struct task_struct { struct cpumask cpus_allowed_alt; int soft_cpus_version; #endif - /* PF_IO_WORKER */ - CK_KABI_USE(1, void *pf_io_worker) + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 6f106a14662688480986c65f890313a22b5e86e5..7636440d448ce5e253454319a355f60376065226 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -129,8 +129,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PREZERO_HW_CLEAR, PREZERO_HW_CLEAR_PAGES, #endif - CK_KABI_EXTEND_ENUM(ALLOC_REPORTED_PAGE) - CK_KABI_EXTEND_ENUM(REPORT_PAGE) + ALLOC_REPORTED_PAGE, + REPORT_PAGE, NR_VM_EVENT_ITEMS, }; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 098cbc479acc9b160a4d36cecc828cbc363ca5d4..d3b94dff556b01555e0535f4bfdca6fa66738597 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -96,7 +96,6 @@ struct net { struct list_head dev_base_head; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; - CK_KABI_DEPRECATE(struct proc_dir_entry *, proc_net_smc) #ifdef CONFIG_SYSCTL struct ctl_table_set sysctls; diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index 50c22f16a9386dbe547e290b8bca90833fea7748..e3eadec6881a39e4d536091fb8eac98f102fd75b 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -28,8 +28,10 @@ struct netns_smc { int sysctl_wmem; int sysctl_rmem; int sysctl_tcp2smc; - CK_KABI_USE_SPLIT(1, int sysctl_max_links_per_lgr, int sysctl_max_conns_per_lgr) + int sysctl_max_links_per_lgr; + int sysctl_max_conns_per_lgr; + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3542f43cdfd0ea612e07e2d1b7ea171c920f9b8f..f8568548c7b5d653e0700a4e766112f69ff4ed10 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -44,9 +44,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) -DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, cfsb_csd); -#endif #ifdef CONFIG_SCHED_DEBUG /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 85f9c1050d6ebd1cdca541e908438d0f1638851f..e71ba2e2c775c25fe1c9a55a873b768bc98d2fc5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7130,7 +7130,7 @@ static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) first = list_empty(&rq->cfsb_csd_list); list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list); if (first) - smp_call_function_single_async(cpu_of(rq), cpu_cfsb_csd(cpu_of(rq))); + smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd); } #else static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) @@ -14334,7 +14334,7 @@ __init void init_sched_fair_class(void) for_each_possible_cpu(i) { #ifdef CONFIG_CFS_BANDWIDTH - INIT_CSD(cpu_cfsb_csd(i), __cfsb_csd_unthrottle, cpu_rq(i)); + INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i)); INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list); #endif } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1045ed300f426d33ffc0fe888b1ac03994774257..84345ae98a940fb994b7b6568d868903b87d7fb2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -554,8 +554,9 @@ struct task_group { int specs_ratio; bool group_balancer; #endif + long priority; - CK_KABI_USE(1, long priority) + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -695,11 +696,11 @@ struct cfs_rq { #ifdef CONFIG_GROUP_IDENTITY unsigned int nr_tasks; + unsigned int h_nr_expel_immune; u64 min_under_vruntime; -#ifdef CONFIG_SCHED_SMT u64 expel_spread; u64 expel_start; - unsigned int h_nr_expel_immune; +#ifdef CONFIG_SCHED_SMT struct list_head expel_list; #endif struct rb_root_cached under_timeline; @@ -785,20 +786,14 @@ struct cfs_rq { unsigned long nr_uninterruptible; #ifdef CONFIG_SMP - CK_KABI_USE(1, 2, struct list_head throttled_csd_list) -#else + struct list_head throttled_csd_list; +#endif + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) -#endif -#if defined(CONFIG_GROUP_IDENTITY) && !defined(CONFIG_SCHED_SMT) - CK_KABI_USE(3, unsigned int h_nr_expel_immune) - CK_KABI_USE(4, u64 expel_spread) - CK_KABI_USE(5, u64 expel_start) -#else CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) CK_KABI_RESERVE(5) -#endif CK_KABI_RESERVE(6) CK_KABI_RESERVE(7) CK_KABI_RESERVE(8) @@ -1332,6 +1327,7 @@ struct rq { unsigned int core_forceidle_seq; unsigned int core_sibidle_occupation; u64 core_sibidle_start; + u64 core_sibidle_start_task; unsigned int core_id; unsigned int core_sibidle_count; bool in_forceidle; @@ -1339,35 +1335,28 @@ struct rq { #endif #ifdef CONFIG_SCHED_ACPU - u64 acpu_idle_sum; - u64 sibidle_sum; - u64 last_acpu_update_time; + u64 acpu_idle_sum; + u64 sibidle_sum; + u64 last_acpu_update_time; + u64 sibidle_task_sum; + u64 last_acpu_update_time_task; #endif #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) - CK_KABI_USE(1, 2, struct list_head cfsb_csd_list) -#else - CK_KABI_RESERVE(1) - CK_KABI_RESERVE(2) + call_single_data_t cfsb_csd; + struct list_head cfsb_csd_list; #endif #if defined(CONFIG_IRQ_TIME_ACCOUNTING) && defined(CONFIG_ARM64) - CK_KABI_USE(3, u64 prev_irq_time); -#else - CK_KABI_RESERVE(3) + u64 prev_irq_time; #endif -#ifdef CONFIG_SCHED_CORE - CK_KABI_USE(4, u64 core_sibidle_start_task) -#else + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) -#endif -#ifdef CONFIG_SCHED_ACPU - CK_KABI_USE(5, u64 sibidle_task_sum) - CK_KABI_USE(6, u64 last_acpu_update_time_task) -#else CK_KABI_RESERVE(5) CK_KABI_RESERVE(6) -#endif CK_KABI_RESERVE(7) CK_KABI_RESERVE(8) }; @@ -1422,11 +1411,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() raw_cpu_ptr(&runqueues) -#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) -DECLARE_PER_CPU_SHARED_ALIGNED(call_single_data_t, cfsb_csd); -#define cpu_cfsb_csd(cpu) (&per_cpu(cfsb_csd, (cpu))) -#endif - struct sched_group; #ifdef CONFIG_SCHED_CORE static inline struct cpumask *sched_group_span(struct sched_group *sg);