From 2ec0226832b0c4adfbef31c9b2e3f8338a6bab17 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Thu, 13 Jun 2024 10:41:01 +0800 Subject: [PATCH] anolis: kabi: revert all kabi_use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9320 Because we dont take care of kABI consistency between major version now, revert kabi use, these reserved space are intended to ensure kABI compatibility between minor versions and their corresponding major version. Signed-off-by: Guixin Liu Signed-off-by:Cruz Zhao --- include/linux/cgroup-defs.h | 18 +++++++---- include/linux/fs.h | 5 ++-- include/linux/iommu.h | 3 +- include/linux/irqdesc.h | 2 +- include/linux/kernfs.h | 4 ++- include/linux/memcontrol.h | 8 ++--- include/linux/memremap.h | 7 +++-- include/linux/mm.h | 6 ++-- include/linux/mm_types.h | 9 +++--- include/linux/mmzone.h | 4 ++- include/linux/netdevice.h | 7 +++-- include/linux/perf_event.h | 5 ++-- include/linux/sched.h | 31 +++++++++---------- include/linux/vm_event_item.h | 4 +-- include/net/net_namespace.h | 1 - include/net/netns/smc.h | 4 ++- kernel/sched/core.c | 3 -- kernel/sched/fair.c | 4 +-- kernel/sched/sched.h | 56 +++++++++++++---------------------- 19 files changed, 90 insertions(+), 91 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index aa02523f2426..872a637f9d66 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -363,12 +363,13 @@ struct cgroup_rstat_cpu { */ struct u64_stats_sync bsync; struct cgroup_base_stat bstat; - + struct cgroup_base_stat_task bstat_task; /* * Snapshots at the last reading. These are used to calculate the * deltas to propagate to the global counters. */ struct cgroup_base_stat last_bstat; + struct cgroup_base_stat_task last_bstat_task; /* * Child cgroups with stat updates on this cpu since the last read @@ -383,8 +384,11 @@ struct cgroup_rstat_cpu { */ struct cgroup *updated_children; /* terminated by self cgroup */ struct cgroup *updated_next; /* NULL iff not on the list */ - CK_KABI_EXTEND(struct cgroup_base_stat_task bstat_task) - CK_KABI_EXTEND(struct cgroup_base_stat_task last_bstat_task) + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cgroup_freezer_state { @@ -507,7 +511,9 @@ struct cgroup { /* cgroup basic resource statistics */ struct cgroup_base_stat last_bstat; + struct cgroup_base_stat_task last_bstat_task; struct cgroup_base_stat bstat; + struct cgroup_base_stat_task bstat_task; struct prev_cputime prev_cputime; /* for printing out cputime */ /* @@ -544,8 +550,10 @@ struct cgroup { struct kernfs_root *hidden_place; /* tree to hide cgroup in pool. */ struct delayed_work supply_pool_work; - CK_KABI_USE(1, 2, struct cgroup_base_stat_task last_bstat_task) - CK_KABI_USE(3, 4, struct cgroup_base_stat_task bstat_task) + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) /* ids of the ancestors at each level including self */ u64 ancestor_ids[]; diff --git a/include/linux/fs.h b/include/linux/fs.h index d0e9620729b6..c2e494247da1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -488,9 +488,9 @@ struct address_space { struct list_head private_list; void *private_data; - /* CK_KABI_RESERVE(1) */ - CK_KABI_USE(1, struct fast_reflink_work *fast_reflink_work); + struct fast_reflink_work *fast_reflink_work; + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -1915,7 +1915,6 @@ struct file_operations { int (*fadvise)(struct file *, loff_t, loff_t, int); int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int (*uring_cmd_iopoll)(struct io_uring_cmd *ioucmd); - CK_KABI_DEPRECATE(bool, may_pollfree) CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b7e046a72586..b565d2f4840b 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -214,8 +214,9 @@ struct iommu_iotlb_gather { unsigned long end; size_t pgsize; struct page *freelist; + bool queued; - CK_KABI_USE(1, bool queued) + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) }; diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index b53b43973568..5e881757cbe6 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -68,7 +68,7 @@ struct irqstat { struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; - CK_KABI_REPLACE(unsigned int __percpu *kstat_irqs, struct irqstat __percpu *kstat_irqs); + struct irqstat __percpu *kstat_irqs; irq_flow_handler_t handle_irq; struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index d5ac73e6236a..c57783177895 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -104,7 +104,9 @@ struct kernfs_elem_dir { * Monotonic revision counter, used to identify if a directory * node has changed during negative dentry revalidation. */ - CK_KABI_USE(1, unsigned long rev) + unsigned long rev; + + CK_KABI_RESERVE(1) }; struct kernfs_elem_symlink { diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index cdb97ec6ac05..704c3cfb8323 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -530,12 +530,12 @@ struct mem_cgroup { #endif #ifdef CONFIG_LRU_GEN - CK_KABI_USE(1, unsigned long mglru_batch_size) - CK_KABI_USE(2, unsigned long mglru_reclaim_pages) -#else + unsigned long mglru_batch_size; + unsigned long mglru_reclaim_pages; +#endif + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) -#endif CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) CK_KABI_RESERVE(5) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index e7ec2a7b8a69..cbec5d177e88 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -101,9 +101,10 @@ struct dev_pagemap_ops { * When this is not implemented, or it returns -EOPNOTSUPP, the caller * will fall back to a common handler called mf_generic_kill_procs(). */ - CK_KABI_USE(1, - int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, - unsigned long nr_pages, int mf_flags)); + int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, + unsigned long nr_pages, int mf_flags); + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) }; diff --git a/include/linux/mm.h b/include/linux/mm.h index ad2cbc0f2afc..1c48efef07b8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -563,10 +563,10 @@ struct vm_fault { * atomic context. */ #ifdef CONFIG_PAGETABLE_SHARE - CK_KABI_USE(1, struct vm_area_struct *orig_vma) /* Original VMA */ -#else - CK_KABI_RESERVE(1) + struct vm_area_struct *orig_vma; /* Original VMA */ #endif + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 80fbded07e43..67bf788f2fba 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -394,13 +394,12 @@ struct vm_area_struct { #endif #ifdef CONFIG_PAGETABLE_SHARE - CK_KABI_USE(1, struct pgtable_share_struct *pgtable_share_data) -#else - CK_KABI_RESERVE(1) + struct pgtable_share_struct *pgtable_share_data; #endif + bool fast_reflink; - CK_KABI_USE_SPLIT(2, bool fast_reflink); - + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) } __randomize_layout; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4079eab8a799..5938a17e119d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -858,7 +858,9 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; - CK_KABI_USE(1, unsigned long reported_pages) + unsigned long reported_pages; + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7e972b4f7ca1..0316acd94d75 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2177,6 +2177,8 @@ struct net_device { void *ml_priv; enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type:8; + union { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; @@ -2234,9 +2236,10 @@ struct net_device { /* protected by rtnl_lock */ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; - CK_KABI_USE_SPLIT(1, enum netdev_stat_type pcpu_stat_type:8) /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ - CK_KABI_USE(2, struct dim_irq_moder *irq_moder) + struct dim_irq_moder *irq_moder; + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f662a84120a1..1381f54bbee7 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -666,6 +666,7 @@ struct perf_event { /* The cumulative AND of all event_caps for events in this group. */ int group_caps; + unsigned int group_generation; struct perf_event *group_leader; struct pmu *pmu; void *pmu_private; @@ -776,7 +777,7 @@ struct perf_event { struct list_head sb_list; #endif /* CONFIG_PERF_EVENTS */ - CK_KABI_USE_SPLIT(1, unsigned int group_generation) + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -1025,6 +1026,7 @@ struct perf_sample_data { u64 addr; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; u64 period; union perf_sample_weight weight; u64 txn; @@ -1058,7 +1060,6 @@ struct perf_sample_data { u64 cgroup; u64 data_page_size; u64 code_page_size; - CK_KABI_EXTEND(u64 *br_stack_cntr); } ____cacheline_aligned; /* default value for data source */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e9e93c54477..16f819e19f45 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -498,29 +498,25 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; + u64 core_forceidle_task_sum; + u64 forceidled_sum; + u64 forceidled_sum_base; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) u64 core_sibidle_sum; + u64 core_sibidle_task_sum; #endif - CK_KABI_USE(1, unsigned long forceidled_sum) - CK_KABI_USE(2, unsigned long forceidled_sum_base) -#ifdef CONFIG_SCHED_CORE - CK_KABI_USE(3, unsigned long core_forceidle_task_sum) -#else + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) -#endif -#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) - CK_KABI_USE(4, unsigned long core_sibidle_task_sum) -#else CK_KABI_RESERVE(4) -#endif CK_KABI_RESERVE(5) CK_KABI_RESERVE(6) CK_KABI_RESERVE(7) CK_KABI_RESERVE(8) -#endif +#endif /* CONFIG_SCHEDSTATS */ }; struct sched_entity { @@ -546,7 +542,7 @@ struct sched_entity { u64 cg_iowait_start; u64 cg_ineffective_sum; u64 cg_ineffective_start; - CK_KABI_REPLACE_SPLIT(seqlock_t idle_seqlock, seqcount_t idle_seqcount); + seqcount_t idle_seqcount; spinlock_t iowait_lock; u64 nr_migrations; @@ -592,7 +588,9 @@ struct sched_entity { #endif #endif - CK_KABI_USE(1, long priority) + long priority; + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -1032,6 +1030,9 @@ struct task_struct { /* CLONE_CHILD_CLEARTID: */ int __user *clear_child_tid; + /* PF_IO_WORKER */ + void *pf_io_worker; + u64 utime; u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME @@ -1513,8 +1514,8 @@ struct task_struct { struct cpumask cpus_allowed_alt; int soft_cpus_version; #endif - /* PF_IO_WORKER */ - CK_KABI_USE(1, void *pf_io_worker) + + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 6f106a146626..7636440d448c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -129,8 +129,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PREZERO_HW_CLEAR, PREZERO_HW_CLEAR_PAGES, #endif - CK_KABI_EXTEND_ENUM(ALLOC_REPORTED_PAGE) - CK_KABI_EXTEND_ENUM(REPORT_PAGE) + ALLOC_REPORTED_PAGE, + REPORT_PAGE, NR_VM_EVENT_ITEMS, }; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 098cbc479acc..d3b94dff556b 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -96,7 +96,6 @@ struct net { struct list_head dev_base_head; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; - CK_KABI_DEPRECATE(struct proc_dir_entry *, proc_net_smc) #ifdef CONFIG_SYSCTL struct ctl_table_set sysctls; diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index 50c22f16a938..e3eadec6881a 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -28,8 +28,10 @@ struct netns_smc { int sysctl_wmem; int sysctl_rmem; int sysctl_tcp2smc; - CK_KABI_USE_SPLIT(1, int sysctl_max_links_per_lgr, int sysctl_max_conns_per_lgr) + int sysctl_max_links_per_lgr; + int sysctl_max_conns_per_lgr; + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3542f43cdfd0..f8568548c7b5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -44,9 +44,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) -DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, cfsb_csd); -#endif #ifdef CONFIG_SCHED_DEBUG /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 85f9c1050d6e..e71ba2e2c775 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7130,7 +7130,7 @@ static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) first = list_empty(&rq->cfsb_csd_list); list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list); if (first) - smp_call_function_single_async(cpu_of(rq), cpu_cfsb_csd(cpu_of(rq))); + smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd); } #else static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) @@ -14334,7 +14334,7 @@ __init void init_sched_fair_class(void) for_each_possible_cpu(i) { #ifdef CONFIG_CFS_BANDWIDTH - INIT_CSD(cpu_cfsb_csd(i), __cfsb_csd_unthrottle, cpu_rq(i)); + INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i)); INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list); #endif } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1045ed300f42..84345ae98a94 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -554,8 +554,9 @@ struct task_group { int specs_ratio; bool group_balancer; #endif + long priority; - CK_KABI_USE(1, long priority) + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) @@ -695,11 +696,11 @@ struct cfs_rq { #ifdef CONFIG_GROUP_IDENTITY unsigned int nr_tasks; + unsigned int h_nr_expel_immune; u64 min_under_vruntime; -#ifdef CONFIG_SCHED_SMT u64 expel_spread; u64 expel_start; - unsigned int h_nr_expel_immune; +#ifdef CONFIG_SCHED_SMT struct list_head expel_list; #endif struct rb_root_cached under_timeline; @@ -785,20 +786,14 @@ struct cfs_rq { unsigned long nr_uninterruptible; #ifdef CONFIG_SMP - CK_KABI_USE(1, 2, struct list_head throttled_csd_list) -#else + struct list_head throttled_csd_list; +#endif + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) -#endif -#if defined(CONFIG_GROUP_IDENTITY) && !defined(CONFIG_SCHED_SMT) - CK_KABI_USE(3, unsigned int h_nr_expel_immune) - CK_KABI_USE(4, u64 expel_spread) - CK_KABI_USE(5, u64 expel_start) -#else CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) CK_KABI_RESERVE(5) -#endif CK_KABI_RESERVE(6) CK_KABI_RESERVE(7) CK_KABI_RESERVE(8) @@ -1332,6 +1327,7 @@ struct rq { unsigned int core_forceidle_seq; unsigned int core_sibidle_occupation; u64 core_sibidle_start; + u64 core_sibidle_start_task; unsigned int core_id; unsigned int core_sibidle_count; bool in_forceidle; @@ -1339,35 +1335,28 @@ struct rq { #endif #ifdef CONFIG_SCHED_ACPU - u64 acpu_idle_sum; - u64 sibidle_sum; - u64 last_acpu_update_time; + u64 acpu_idle_sum; + u64 sibidle_sum; + u64 last_acpu_update_time; + u64 sibidle_task_sum; + u64 last_acpu_update_time_task; #endif #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) - CK_KABI_USE(1, 2, struct list_head cfsb_csd_list) -#else - CK_KABI_RESERVE(1) - CK_KABI_RESERVE(2) + call_single_data_t cfsb_csd; + struct list_head cfsb_csd_list; #endif #if defined(CONFIG_IRQ_TIME_ACCOUNTING) && defined(CONFIG_ARM64) - CK_KABI_USE(3, u64 prev_irq_time); -#else - CK_KABI_RESERVE(3) + u64 prev_irq_time; #endif -#ifdef CONFIG_SCHED_CORE - CK_KABI_USE(4, u64 core_sibidle_start_task) -#else + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) CK_KABI_RESERVE(4) -#endif -#ifdef CONFIG_SCHED_ACPU - CK_KABI_USE(5, u64 sibidle_task_sum) - CK_KABI_USE(6, u64 last_acpu_update_time_task) -#else CK_KABI_RESERVE(5) CK_KABI_RESERVE(6) -#endif CK_KABI_RESERVE(7) CK_KABI_RESERVE(8) }; @@ -1422,11 +1411,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() raw_cpu_ptr(&runqueues) -#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) -DECLARE_PER_CPU_SHARED_ALIGNED(call_single_data_t, cfsb_csd); -#define cpu_cfsb_csd(cpu) (&per_cpu(cfsb_csd, (cpu))) -#endif - struct sched_group; #ifdef CONFIG_SCHED_CORE static inline struct cpumask *sched_group_span(struct sched_group *sg); -- Gitee