From 30085b7a4e33b170b0d124caf9979d22c7d3b4a6 Mon Sep 17 00:00:00 2001 From: "Yury Norov [NVIDIA]" Date: Mon, 29 Sep 2025 20:47:22 +0800 Subject: [PATCH] RDMA: hfi1: fix possible divide-by-zero in find_hw_thread_mask() stable inclusion from stable-v6.6.103 commit 89fdac333a17ed990b41565630ef4791782e02f5 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICXMAI CVE: CVE-2025-39742 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=89fdac333a17ed990b41565630ef4791782e02f5 -------------------------------- [ Upstream commit 59f7d2138591ef8f0e4e4ab5f1ab674e8181ad3a ] The function divides number of online CPUs by num_core_siblings, and later checks the divider by zero. This implies a possibility to get and divide-by-zero runtime error. Fix it by moving the check prior to division. This also helps to save one indentation level. Signed-off-by: Yury Norov [NVIDIA] Link: https://patch.msgid.link/20250604193947.11834-3-yury.norov@gmail.com Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin Signed-off-by: Zhang Changzhong --- drivers/infiniband/hw/hfi1/affinity.c | 44 +++++++++++++++------------ 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index bbc957c578e1..e5db39f4720d 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -964,31 +964,35 @@ static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask, struct hfi1_affinity_node_list *affinity) { int possible, curr_cpu, i; - uint num_cores_per_socket = node_affinity.num_online_cpus / + uint num_cores_per_socket; + + cpumask_copy(hw_thread_mask, &affinity->proc.mask); + + if (affinity->num_core_siblings == 0) + return; + + num_cores_per_socket = node_affinity.num_online_cpus / affinity->num_core_siblings / node_affinity.num_online_nodes; - cpumask_copy(hw_thread_mask, &affinity->proc.mask); - if (affinity->num_core_siblings > 0) { - /* Removing other siblings not needed for now */ - possible = cpumask_weight(hw_thread_mask); - curr_cpu = cpumask_first(hw_thread_mask); - for (i = 0; - i < num_cores_per_socket * node_affinity.num_online_nodes; - i++) - curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); - - for (; i < possible; i++) { - cpumask_clear_cpu(curr_cpu, hw_thread_mask); - curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); - } + /* Removing other siblings not needed for now */ + possible = cpumask_weight(hw_thread_mask); + curr_cpu = cpumask_first(hw_thread_mask); + for (i = 0; + i < num_cores_per_socket * node_affinity.num_online_nodes; + i++) + curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); - /* Identifying correct HW threads within physical cores */ - cpumask_shift_left(hw_thread_mask, hw_thread_mask, - num_cores_per_socket * - node_affinity.num_online_nodes * - hw_thread_no); + for (; i < possible; i++) { + cpumask_clear_cpu(curr_cpu, hw_thread_mask); + curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); } + + /* Identifying correct HW threads within physical cores */ + cpumask_shift_left(hw_thread_mask, hw_thread_mask, + num_cores_per_socket * + node_affinity.num_online_nodes * + hw_thread_no); } int hfi1_get_proc_affinity(int node) -- Gitee