From 84e122368ec3a37e074c9657bc09422b62f6ccd0 Mon Sep 17 00:00:00 2001 From: Yizhen Fan Date: Mon, 20 Nov 2023 20:03:32 +0800 Subject: [PATCH] ub: add new feature for urma driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8HQP7 CVE: NA ------------------------------------------------ The UB feature has evolved. The UB protocol has been optimized and the URMA feature has been added to support the UB hardware. The following features are added: - Device management Configuring the EID and UPI Supports device source IP address management, UBoE MAC address, and VLAN. Function hot swap - Supports TP link setup management. Creating and deleting TPs of the RM, RC, and UM types The following TP parameters can be configured: MTU, out-of-order reception, TP retransmission times and timeout, congestion algorithm and parameters, selective retransmission, DSCP, PSN, UDP port number, hop_limit, spray_en, DCA (Dynamic Connection Management)" Supports destination IP address management and UBoE destination MAC address and VLAN query and configuration. TP fault handling - TPG management TPGs can be created and deleted. TP attributes in a TPG can be updated. - Jetty management The suspend mode of Jetty and jfs is supported. The flush jetty and jfs functions are supported. The Jetty, JFS, and JFR attributes or status can be queried. The Jetty and jfs status can be modified. The Jetty and JFR thresholds and watermarks can be modified. RC tables can be created and deleted. - Supports Jetty groups. Jetty groups can be created and deleted. Importing and Unimporting Jetty Groups Creating and Deleting a Jetty in a Jetty Group The jetty group can be used to perform operations on the data plane. - Supports segment security management. Allocates and releases token IDs, and supports kernel-mode and user-mode permissions. Supports the segment security mechanism. - VPC information can be configured on OpenStack. Supports VM EID configuration and management. Supports VM UB device management. - Supports gaea connect. Supports gaea hook framework connect. - Create and delete VTPs, manage VTPs, and configure the mapping between VTPs and TPs, TPGs, or UTPs (DIPs) to drivers. Creates and deletes VTPs, and configures the mapping between VTPs and TPs, TPGs, or UTPs (DIPs) to drivers. The UVS/UBCore supports VTP table management, including initializing the VTP table and adding and deleting VTP entries. - Supports the UVS dynamic library and UVS servitization. Supports the UVS dynamic library (running in the Gaea process). Supports the UVS daemon process and UVS admin process. - Kernel-mode and user-mode VTP resources and DFX such as RC tables, Kernel-mode and user-mode VTP resource DFX RC table DFX Signed-off-by: Guoxin Qian Signed-off-by: Yizhen Fan Signed-off-by: Chunzhi Hu --- drivers/ub/Kconfig | 2 +- drivers/ub/hw/hns3/Makefile | 2 +- drivers/ub/hw/hns3/hns3_udma_cmd.c | 4 +- drivers/ub/hw/hns3/hns3_udma_cmd.h | 1 + drivers/ub/hw/hns3/hns3_udma_device.h | 13 +- drivers/ub/hw/hns3/hns3_udma_dfx.c | 72 +- drivers/ub/hw/hns3/hns3_udma_dfx.h | 3 +- drivers/ub/hw/hns3/hns3_udma_eid.c | 183 ++ drivers/ub/hw/hns3/hns3_udma_eid.h | 75 + drivers/ub/hw/hns3/hns3_udma_hw.c | 97 +- drivers/ub/hw/hns3/hns3_udma_hw.h | 55 - drivers/ub/hw/hns3/hns3_udma_jetty.c | 64 +- drivers/ub/hw/hns3/hns3_udma_jetty.h | 9 +- drivers/ub/hw/hns3/hns3_udma_jfc.c | 12 +- drivers/ub/hw/hns3/hns3_udma_jfc.h | 6 +- drivers/ub/hw/hns3/hns3_udma_jfr.c | 20 +- drivers/ub/hw/hns3/hns3_udma_jfr.h | 10 +- drivers/ub/hw/hns3/hns3_udma_jfs.c | 23 +- drivers/ub/hw/hns3/hns3_udma_jfs.h | 4 +- drivers/ub/hw/hns3/hns3_udma_main.c | 82 +- drivers/ub/hw/hns3/hns3_udma_qp.c | 273 +-- drivers/ub/hw/hns3/hns3_udma_qp.h | 30 +- drivers/ub/hw/hns3/hns3_udma_segment.c | 24 +- drivers/ub/hw/hns3/hns3_udma_segment.h | 8 +- drivers/ub/hw/hns3/hns3_udma_sysfs.c | 10 +- drivers/ub/hw/hns3/hns3_udma_tp.c | 25 +- drivers/ub/hw/hns3/hns3_udma_tp.h | 12 +- drivers/ub/urma/ubcore/Makefile | 9 +- drivers/ub/urma/ubcore/ubcore_cmd.h | 64 +- drivers/ub/urma/ubcore/ubcore_ctp.c | 86 + drivers/ub/urma/ubcore/ubcore_ctp.h | 28 + drivers/ub/urma/ubcore/ubcore_device.c | 776 ++++++- drivers/ub/urma/ubcore/ubcore_dp.c | 9 +- drivers/ub/urma/ubcore/ubcore_hash_table.c | 14 +- drivers/ub/urma/ubcore/ubcore_hash_table.h | 8 +- drivers/ub/urma/ubcore/ubcore_jetty.c | 626 +++++- drivers/ub/urma/ubcore/ubcore_main.c | 823 ++++++-- drivers/ub/urma/ubcore/ubcore_msg.c | 461 +++++ drivers/ub/urma/ubcore/ubcore_msg.h | 96 + drivers/ub/urma/ubcore/ubcore_netdev.c | 391 ++++ drivers/ub/urma/ubcore/ubcore_netdev.h | 46 + drivers/ub/urma/ubcore/ubcore_netlink.c | 227 ++- drivers/ub/urma/ubcore/ubcore_netlink.h | 109 +- drivers/ub/urma/ubcore/ubcore_priv.h | 125 +- drivers/ub/urma/ubcore/ubcore_segment.c | 103 +- drivers/ub/urma/ubcore/ubcore_tp.c | 1282 +++++------- drivers/ub/urma/ubcore/ubcore_tp.h | 16 +- drivers/ub/urma/ubcore/ubcore_tp_table.c | 32 +- drivers/ub/urma/ubcore/ubcore_tp_table.h | 4 +- drivers/ub/urma/ubcore/ubcore_tpg.c | 200 ++ drivers/ub/urma/ubcore/ubcore_tpg.h | 34 + drivers/ub/urma/ubcore/ubcore_umem.c | 20 +- drivers/ub/urma/ubcore/ubcore_utp.c | 120 ++ drivers/ub/urma/ubcore/ubcore_utp.h | 29 + drivers/ub/urma/ubcore/ubcore_uvs_cmd.c | 2148 ++++++++++++++++++++ drivers/ub/urma/ubcore/ubcore_uvs_cmd.h | 421 ++++ drivers/ub/urma/ubcore/ubcore_vtp.c | 549 +++++ drivers/ub/urma/ubcore/ubcore_vtp.h | 115 ++ drivers/ub/urma/uburma/uburma_cdev_file.c | 573 ++++-- drivers/ub/urma/uburma/uburma_cdev_file.h | 50 +- drivers/ub/urma/uburma/uburma_cmd.c | 605 ++++-- drivers/ub/urma/uburma/uburma_cmd.h | 218 +- drivers/ub/urma/uburma/uburma_dev_ops.c | 2 +- drivers/ub/urma/uburma/uburma_event.c | 40 +- drivers/ub/urma/uburma/uburma_main.c | 474 ++++- drivers/ub/urma/uburma/uburma_types.h | 37 +- drivers/ub/urma/uburma/uburma_uobj.c | 108 +- drivers/ub/urma/uburma/uburma_uobj.h | 56 +- include/urma/ubcore_api.h | 61 +- include/urma/ubcore_opcode.h | 105 +- include/urma/ubcore_types.h | 1699 +++++++++++----- include/urma/ubcore_uapi.h | 149 +- 72 files changed, 11451 insertions(+), 2816 deletions(-) create mode 100644 drivers/ub/hw/hns3/hns3_udma_eid.c create mode 100644 drivers/ub/hw/hns3/hns3_udma_eid.h create mode 100644 drivers/ub/urma/ubcore/ubcore_ctp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_ctp.h create mode 100644 drivers/ub/urma/ubcore/ubcore_msg.c create mode 100644 drivers/ub/urma/ubcore/ubcore_msg.h create mode 100644 drivers/ub/urma/ubcore/ubcore_netdev.c create mode 100644 drivers/ub/urma/ubcore/ubcore_netdev.h create mode 100644 drivers/ub/urma/ubcore/ubcore_tpg.c create mode 100644 drivers/ub/urma/ubcore/ubcore_tpg.h create mode 100644 drivers/ub/urma/ubcore/ubcore_utp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_utp.h create mode 100644 drivers/ub/urma/ubcore/ubcore_uvs_cmd.c create mode 100644 drivers/ub/urma/ubcore/ubcore_uvs_cmd.h create mode 100644 drivers/ub/urma/ubcore/ubcore_vtp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_vtp.h diff --git a/drivers/ub/Kconfig b/drivers/ub/Kconfig index 74640ce3c933..c1b97ea70272 100644 --- a/drivers/ub/Kconfig +++ b/drivers/ub/Kconfig @@ -2,7 +2,7 @@ menuconfig UB tristate "Unified Bus (UB) core support" - depends on ARM64 || X86_64 || COMPILE_TEST + depends on ARM64 || X86_64 default n help Core support for Unified Bus (UB). diff --git a/drivers/ub/hw/hns3/Makefile b/drivers/ub/hw/hns3/Makefile index 533cadbd8263..077926fc8aaf 100644 --- a/drivers/ub/hw/hns3/Makefile +++ b/drivers/ub/hw/hns3/Makefile @@ -12,6 +12,6 @@ $(MODULE_NAME)-objs := hns3_udma_hw.o hns3_udma_main.o hns3_udma_cmd.o \ hns3_udma_db.o hns3_udma_jfc.o hns3_udma_jfr.o \ hns3_udma_segment.o hns3_udma_tp.o hns3_udma_jfs.o \ hns3_udma_jetty.o hns3_udma_sysfs.o hns3_udma_dca.o \ - hns3_udma_dfx.o + hns3_udma_dfx.o hns3_udma_eid.o obj-$(CONFIG_UB_UDMA_HNS3) := hns3_udma.o diff --git a/drivers/ub/hw/hns3/hns3_udma_cmd.c b/drivers/ub/hw/hns3/hns3_udma_cmd.c index 7271ab5cc002..0216299bebdd 100644 --- a/drivers/ub/hw/hns3/hns3_udma_cmd.c +++ b/drivers/ub/hw/hns3/hns3_udma_cmd.c @@ -161,6 +161,7 @@ static uint32_t udma_cmd_hw_resetting(struct udma_dev *dev, ret = read_poll_timeout_atomic(ops->ae_dev_reset_cnt, val, val > dev->reset_cnt, HW_RESET_DELAY_US, HW_RESET_TIMEOUT_US, false, handle); + cond_resched(); if (!ret) dev->is_reset = true; @@ -298,10 +299,11 @@ static int __udma_cmq_send(struct udma_dev *dev, struct udma_cmq_desc *desc, int ret = 0; int i; - tail = csq->head; mutex_lock(&csq->lock); + tail = csq->head; + for (i = 0; i < num; i++) { csq->desc[csq->head++] = desc[i]; if (csq->head == csq->desc_num) diff --git a/drivers/ub/hw/hns3/hns3_udma_cmd.h b/drivers/ub/hw/hns3/hns3_udma_cmd.h index ece4ed394456..6de684799916 100644 --- a/drivers/ub/hw/hns3/hns3_udma_cmd.h +++ b/drivers/ub/hw/hns3/hns3_udma_cmd.h @@ -101,6 +101,7 @@ enum { UDMA_CMD_DESTROY_CEQC = 0x93, /* SCC CTX BT commands */ + UDMA_CMD_QUERY_SCCC = 0xa2, UDMA_CMD_READ_SCCC_BT0 = 0xa4, UDMA_CMD_WRITE_SCCC_BT0 = 0xa5, diff --git a/drivers/ub/hw/hns3/hns3_udma_device.h b/drivers/ub/hw/hns3/hns3_udma_device.h index 78e03558fe75..152c2a059bef 100644 --- a/drivers/ub/hw/hns3/hns3_udma_device.h +++ b/drivers/ub/hw/hns3/hns3_udma_device.h @@ -61,6 +61,8 @@ #define UDMA_HOP_NUM_0 0xff #define UDMA_CAP_FLAGS_EX_SHIFT 12 +#define UDMA_MAX_EID_NUM 1024 + #define UDMA_CMQ_TX_TIMEOUT 30000 #define UDMA_CMQ_DESC_NUM_S 3 #define UDMA_CMD_CSQ_DESC_NUM 1024 @@ -76,6 +78,8 @@ #define UDMA_MAX_BT_REGION 3 #define UDMA_MAX_BT_LEVEL 3 +#define CQ_BANKID_MASK GENMASK(1, 0) + #define CQC_FIELD_LOC(h, l) ((uint64_t)(h) << 32 | (l)) #define CQC_CQE_BA_L_OFFSET 3 @@ -430,6 +434,7 @@ struct udma_ucontext { uint64_t pdn; struct udma_dca_ctx dca_ctx; void *dca_dbgfs; + uint32_t eid_index; }; struct udma_cmd_context { @@ -544,7 +549,6 @@ struct udma_hw { int (*clear_hem)(struct udma_dev *udma_dev, struct udma_hem_table *table, int obj, int step_idx); - int (*set_eid)(struct udma_dev *udma_dev, union ubcore_eid eid); int (*init_eq)(struct udma_dev *udma_dev); void (*cleanup_eq)(struct udma_dev *udma_dev); }; @@ -552,7 +556,6 @@ struct udma_hw { struct udma_caps { uint64_t fw_ver; uint8_t num_ports; - int gid_table_len[UDMA_MAX_PORTS]; int pkey_table_len[UDMA_MAX_PORTS]; int local_ca_ack_delay; int num_uars; @@ -696,6 +699,7 @@ struct udma_caps { uint32_t num_jetty_shift; uint8_t poe_ch_num; uint32_t speed; + uint32_t max_eid_cnt; }; struct udma_idx_table { @@ -819,7 +823,6 @@ struct udma_dev { int irq[UDMA_MAX_IRQ_NUM]; const char *irq_names[UDMA_MAX_IRQ_NUM]; char dev_name[UBCORE_MAX_DEV_NAME]; - uint64_t sys_image_guid; struct udma_cmdq cmd; int cmd_mod; struct page *reset_page; /* store reset state */ @@ -842,6 +845,7 @@ struct udma_dev { struct udma_hem_table qpc_timer_table; struct udma_hem_table cqc_timer_table; struct udma_hem_table gmv_table; + struct xarray eid_table; uint64_t dwqe_page; uint64_t dfx_cnt[UDMA_DFX_EQ_TOTAL]; struct list_head qp_list; @@ -862,6 +866,7 @@ struct udma_seg { uint32_t pbl_hop_num; struct udma_mtr pbl_mtr; uint32_t npages; + struct udma_ucontext *ctx; }; static inline void *udma_buf_offset(struct udma_buf *buf, @@ -895,7 +900,7 @@ static inline struct udma_ucontext return container_of(uctx, struct udma_ucontext, uctx); } -static inline struct udma_dev *to_udma_dev(const struct ubcore_device *ubcore_dev) +static inline struct udma_dev *to_udma_dev(struct ubcore_device *ubcore_dev) { return container_of(ubcore_dev, struct udma_dev, ub_dev); } diff --git a/drivers/ub/hw/hns3/hns3_udma_dfx.c b/drivers/ub/hw/hns3/hns3_udma_dfx.c index c7437290ecb0..421ae76085ec 100644 --- a/drivers/ub/hw/hns3/hns3_udma_dfx.c +++ b/drivers/ub/hw/hns3/hns3_udma_dfx.c @@ -153,12 +153,42 @@ static void udma_dfx_qpc_print(struct udma_dev *udma_dev, uint32_t qpn, "*********************************************************\n"); } +static void udma_dfx_query_sccc(struct udma_dev *udma_dev, uint32_t sccc_id) +{ + uint32_t *sccc, *temp; + int ret, i, loop_cnt; + + sccc = kzalloc(udma_dev->caps.scc_ctx_sz, GFP_KERNEL); + if (!sccc) + return; + + ret = udma_dfx_query_context(udma_dev, sccc_id, (void *)sccc, + udma_dev->caps.scc_ctx_sz, + UDMA_CMD_QUERY_SCCC); + if (ret) { + dev_err(udma_dev->dev, + "query sccc failed, ret = %d\n", ret); + kfree(sccc); + return; + } + + dev_info(udma_dev->dev, "************ SCC(0x%8x) CONTEXT INFO *************\n", sccc_id); + temp = sccc; + loop_cnt = udma_dev->caps.scc_ctx_sz / sizeof(uint32_t); + for (i = 0; i < loop_cnt; i++) { + pr_info("SCCC(byte%4lu): 0x%08x\n", (i + 1) * sizeof(uint32_t), *temp); + temp++; + } + dev_info(udma_dev->dev, "*********************************************************\n"); + kfree(sccc); +} + static int udma_dfx_tp_store(const char *p_buf, struct udma_dfx_info *udma_dfx) { struct udma_dev *udma_dev = (struct udma_dev *)udma_dfx->priv; struct udma_qp_context qp_context; char str[UDMA_DFX_STR_LEN_MAX]; - uint32_t tpn; + uint32_t tpn, sccc_id; int ret; ret = udma_dfx_read_buf(str, p_buf); @@ -181,6 +211,14 @@ static int udma_dfx_tp_store(const char *p_buf, struct udma_dfx_info *udma_dfx) } udma_dfx_qpc_print(udma_dev, tpn, &qp_context); + if ((udma_dev->caps.flags & UDMA_CAP_FLAG_QP_FLOW_CTRL) && + udma_reg_read(&qp_context, QPC_QP_ST)) { + sccc_id = tpn; + if (udma_reg_read(&qp_context.ext, QPCEX_DIP_CTX_IDX_VLD)) + sccc_id = udma_reg_read(&qp_context.ext, QPCEX_DIP_CTX_IDX); + udma_dfx_query_sccc(udma_dev, sccc_id); + } + return 0; } @@ -322,9 +360,9 @@ static int udma_query_res_tp(struct udma_dev *udma_dev, } tp->tpn = key->key; - tp->psn = udma_reg_read(&qp_context, QPC_SQ_CUR_PSN); - tp->pri = udma_reg_read(&qp_context, QPC_SL); - tp->oor = udma_reg_read(&qp_context.ext, QPCEX_OOR_EN); + tp->tx_psn = udma_reg_read(&qp_context, QPC_SQ_CUR_PSN); + tp->dscp = udma_reg_read(&qp_context, QPC_DSCP); + tp->oor_en = udma_reg_read(&qp_context.ext, QPCEX_OOR_EN); tp->state = udma_reg_read(&qp_context, QPC_QP_ST); tp->data_udp_start = udma_reg_read(&qp_context.ext, QPCEX_DATA_UDP_SRCPORT_L) | udma_reg_read(&qp_context.ext, QPCEX_DATA_UDP_SRCPORT_H) << @@ -449,7 +487,6 @@ static int udma_query_res_jetty(struct udma_dev *udma_dev, jetty->jetty_id = jetty_now->jetty_id; jetty->state = jetty_now->state; jetty->jfs_depth = jetty_now->jfs_depth; - jetty->jfr_depth = jetty_now->jfr_depth; jetty->pri = jetty_now->pri; jetty->jfr_id = jetty_now->jfr_id; jetty->send_jfc_id = jetty_now->jfc_s_id; @@ -502,8 +539,13 @@ static int udma_query_res_seg(struct udma_dev *udma_dev, { struct ubcore_res_seg_val *seg = (struct ubcore_res_seg_val *)val->addr; struct udma_mpt_entry mpt_entry; + struct seg_list *seg_now; uint32_t mpt_index; - int ret; + int ret, i; + + ret = udma_find_dfx_dev(udma_dev, &i); + if (ret) + return ret; if (val->len < sizeof(struct ubcore_res_seg_val)) { dev_err(udma_dev->dev, @@ -522,15 +564,20 @@ static int udma_query_res_seg(struct udma_dev *udma_dev, return ret; } - seg->ubva.eid = udma_dev->ub_dev.attr.eid; - seg->ubva.uasid = udma_reg_read(&mpt_entry, MPT_PD); seg->ubva.va = udma_reg_read(&mpt_entry, MPT_VA_L) | udma_reg_read(&mpt_entry, MPT_VA_H) << MPT_VA_H_SHIFT; seg->len = udma_reg_read(&mpt_entry, MPT_LEN_L) | udma_reg_read(&mpt_entry, MPT_LEN_H) << MPT_LEN_H_SHIFT; - seg->key_id = udma_reg_read(&mpt_entry, MPT_LKEY); + seg->token_id = udma_reg_read(&mpt_entry, MPT_LKEY); + list_for_each_entry(seg_now, + &g_udma_dfx_list[i].dfx->seg_list->node, node) { + if (seg_now->key_id == seg->token_id) { + memcpy(&seg->ubva.eid, &seg_now->eid, sizeof(union ubcore_eid)); + break; + } + } val->len = sizeof(struct ubcore_res_seg_val); @@ -679,11 +726,10 @@ static int udma_query_res_dev_seg(struct udma_dev *udma_dev, list_for_each_entry(seg_now, &g_udma_dfx_list[i].dfx->seg_list->node, node) { - seg_list_ptr->ubva.eid = udma_dev->ub_dev.attr.eid; - seg_list_ptr->ubva.uasid = seg_now->pd; + memcpy(&seg_list_ptr->ubva.eid, &seg_now->eid, sizeof(union ubcore_eid)); seg_list_ptr->ubva.va = seg_now->iova; seg_list_ptr->len = seg_now->len; - seg_list_ptr->key_id = seg_now->key_id; + seg_list_ptr->token_id = seg_now->key_id; seg_list_ptr++; dev->seg_cnt++; if (dev->seg_cnt == MAX_SEG_CNT) @@ -763,7 +809,7 @@ static int udma_check_key_type(struct udma_dev *udma_dev, return 0; } -int udma_query_res(const struct ubcore_device *dev, +int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, struct ubcore_res_val *val) { struct udma_dev *udma_dev = to_udma_dev(dev); diff --git a/drivers/ub/hw/hns3/hns3_udma_dfx.h b/drivers/ub/hw/hns3/hns3_udma_dfx.h index faacd0a536f9..0befa37a4da8 100644 --- a/drivers/ub/hw/hns3/hns3_udma_dfx.h +++ b/drivers/ub/hw/hns3/hns3_udma_dfx.h @@ -107,6 +107,7 @@ struct seg_list { uint32_t key_id; struct list_head node; spinlock_t node_lock; + union ubcore_eid eid; }; struct udma_dfx_info { @@ -133,7 +134,7 @@ extern struct udma_dfx_dev g_udma_dfx_list[MAX_UDMA_DEV]; int udma_dfx_init(struct udma_dev *udma_dev); void udma_dfx_uninit(struct udma_dev *udma_dev); int udma_find_dfx_dev(struct udma_dev *udma_dev, int *num); -int udma_query_res(const struct ubcore_device *dev, struct ubcore_res_key *key, +int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, struct ubcore_res_val *val); #endif /* _UDMA_DFX_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_eid.c b/drivers/ub/hw/hns3/hns3_udma_eid.c new file mode 100644 index 000000000000..a2b0c88943df --- /dev/null +++ b/drivers/ub/hw/hns3/hns3_udma_eid.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei UDMA Linux driver + * Copyright (c) 2023-2023 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include "hns3_udma_cmd.h" +#include "hns3_udma_eid.h" + +static int config_gmv_table(struct udma_dev *udma_dev, struct udma_eid *udma_eid, + uint32_t eid_index) +{ + enum udma_sgid_type sgid_type = udma_eid->type; + struct udma_cfg_gmv_tb_a *tb_a; + struct udma_cfg_gmv_tb_b *tb_b; + struct udma_cmq_desc desc[2]; + uint16_t guid_shift = 0; + uint16_t smac_l; + + tb_a = (struct udma_cfg_gmv_tb_a *)desc[0].data; + tb_b = (struct udma_cfg_gmv_tb_b *)desc[1].data; + + udma_cmq_setup_basic_desc(&desc[0], UDMA_OPC_CFG_GMV_TBL, false); + desc[0].flag |= cpu_to_le16(UDMA_CMD_FLAG_NEXT); + udma_cmq_setup_basic_desc(&desc[1], UDMA_OPC_CFG_GMV_TBL, false); + + if (udma_dev->uboe.netdevs[0]->addr_len > UBOE_MAC_LEN) + guid_shift = UDMA_SMAC_OFFSET; + + smac_l = + *(uint16_t *)&udma_dev->uboe.netdevs[0]->dev_addr[SMAC_L_SHIFT + guid_shift]; + udma_set_field(tb_a->vf_type_vlan_smac, CFG_GMV_TB_VF_SMAC_L_M, + CFG_GMV_TB_VF_SMAC_L_S, smac_l); + + tb_a->vf_smac_h = + *(uint32_t *)&udma_dev->uboe.netdevs[0]->dev_addr[SMAC_H_SHIFT + guid_shift]; + + udma_set_field(tb_a->vf_type_vlan_smac, CFG_GMV_TB_VF_SGID_TYPE_M, + CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type); + + memcpy(tb_a, &udma_eid->eid, sizeof(udma_eid->eid)); + + udma_set_bit(tb_a->vf_type_vlan_smac, CFG_GMV_TB_VF_PATTERN_S, 0); + tb_b->table_idx_rsv = eid_index; + tb_b->vf_id = 0; + + return udma_cmq_send(udma_dev, desc, CFG_GMV_TBL_CMD_NUM); +} + +static int clear_gmv_table(struct udma_dev *udma_dev, uint32_t eid_index) +{ + struct udma_eid eid = {}; + + return config_gmv_table(udma_dev, &eid, eid_index); +} + +static int add_eid_entry(struct udma_dev *udma_dev, union ubcore_eid eid, + uint32_t eid_index) +{ + struct udma_eid *udma_eid; + int ret; + + udma_eid = kcalloc(1, sizeof(*udma_eid), GFP_KERNEL); + if (!udma_eid) + return -ENOMEM; + + memcpy(&udma_eid->eid, &eid, sizeof(eid)); + udma_eid->type = get_sgid_type_from_eid(eid); + + ret = config_gmv_table(udma_dev, udma_eid, eid_index); + if (ret) { + dev_err(udma_dev->dev, "Set EID to GMV table failed, ret = %d.\n", + ret); + goto err_config; + } + + ret = xa_err(xa_store(&udma_dev->eid_table, eid_index, udma_eid, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, "Failed to store eid, ret = %d.\n", ret); + goto err_store; + } + + return ret; +err_store: + clear_gmv_table(udma_dev, eid_index); +err_config: + kfree(udma_eid); + return ret; +} + +static int del_eid_entry(struct udma_dev *udma_dev, uint32_t eid_index) +{ + struct udma_eid *udma_eid; + + udma_eid = (struct udma_eid *)xa_load(&udma_dev->eid_table, eid_index); + if (IS_ERR_OR_NULL(udma_eid)) { + dev_err(udma_dev->dev, "Failed to find eid, index = %d\n.", + eid_index); + return -EINVAL; + } + + xa_erase(&udma_dev->eid_table, eid_index); + kfree(udma_eid); + + return clear_gmv_table(udma_dev, eid_index); +} + +int udma_set_eid(struct ubcore_device *dev, union ubcore_eid eid) +{ + return 0; +} + +static int udma_check_ueid_cfg(struct udma_dev *dev, uint16_t fe_idx, + uint32_t eid_index) +{ + if (fe_idx != UDMA_NON_VIRTUALIZATION_FE_ID) { + dev_err(dev->dev, "Check FE ID failed.\n"); + return -EINVAL; + } + if (eid_index >= dev->caps.max_eid_cnt) { + dev_err(dev->dev, "Invalid EID index(%u), max value is %u.\n", + eid_index, dev->caps.max_eid_cnt); + return -EINVAL; + } + + return 0; +} + +int udma_add_ueid(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + uint32_t eid_index = cfg->eid_index; + union ubcore_eid eid = cfg->eid; + int ret; + + ret = udma_check_ueid_cfg(udma_dev, fe_idx, eid_index); + if (ret) + return ret; + + return add_eid_entry(udma_dev, eid, eid_index); +} + +int udma_delete_ueid(struct ubcore_device *dev, uint16_t fe_idx, + struct ubcore_ueid_cfg *cfg) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + uint32_t eid_index = cfg->eid_index; + int ret; + + ret = udma_check_ueid_cfg(udma_dev, fe_idx, eid_index); + if (ret) + return ret; + + return del_eid_entry(udma_dev, eid_index); +} + +int udma_find_eid_idx(struct udma_dev *dev, union ubcore_eid eid) +{ + struct udma_eid *udma_eid; + int eid_index = -EINVAL; + unsigned long index; + + xa_lock(&dev->eid_table); + xa_for_each(&dev->eid_table, index, udma_eid) { + if (!memcmp(&udma_eid->eid, &eid, sizeof(eid))) { + eid_index = index; + break; + } + } + xa_unlock(&dev->eid_table); + + return eid_index; +} diff --git a/drivers/ub/hw/hns3/hns3_udma_eid.h b/drivers/ub/hw/hns3/hns3_udma_eid.h new file mode 100644 index 000000000000..f7baf74bf113 --- /dev/null +++ b/drivers/ub/hw/hns3/hns3_udma_eid.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei UDMA Linux driver + * Copyright (c) 2023-2023 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef _UDMA_EID_H +#define _UDMA_EID_H + +#define UBOE_MAC_LEN 6 +#define UDMA_SMAC_OFFSET 10 + +#define SMAC_L_SHIFT 0 +#define SMAC_H_SHIFT 2 + +#define CFG_GMV_TBL_CMD_NUM 2 +#define CFG_GMV_TB_VF_SGID_TYPE_S 0 +#define CFG_GMV_TB_VF_SMAC_L_S 16 +#define CFG_GMV_TB_VF_PATTERN_S 3 +#define CFG_GMV_TB_VF_SGID_TYPE_M GENMASK(1, 0) +#define CFG_GMV_TB_VF_SMAC_L_M GENMASK(31, 16) + +#define UDMA_NON_VIRTUALIZATION_FE_ID 0xffff +#define UDMA_IPV4_MAP_IPV6_PREFIX 0x0000ffff + +enum udma_sgid_type { + SGID_TYPE_IPV6, + SGID_TYPE_IPV4, +}; + +struct udma_cfg_gmv_tb_a { + uint32_t vf_sgid_l; + uint32_t vf_sgid_ml; + uint32_t vf_sgid_mh; + uint32_t vf_sgid_h; + uint32_t vf_type_vlan_smac; + uint32_t vf_smac_h; +}; + +struct udma_cfg_gmv_tb_b { + uint32_t vf_upi; + uint32_t vf_eid_high; + uint32_t table_idx_rsv; + uint32_t vf_id; + uint32_t resv[2]; +}; + +struct udma_eid { + union ubcore_eid eid; + enum udma_sgid_type type; +}; + +int udma_set_eid(struct ubcore_device *dev, union ubcore_eid eid); +int udma_add_ueid(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg); +int udma_delete_ueid(struct ubcore_device *dev, uint16_t fe_idx, + struct ubcore_ueid_cfg *cfg); +int udma_find_eid_idx(struct udma_dev *dev, union ubcore_eid eid); + +static inline enum udma_sgid_type get_sgid_type_from_eid(union ubcore_eid eid) +{ + if (eid.in4.reserved == 0 && eid.in4.prefix == htonl(UDMA_IPV4_MAP_IPV6_PREFIX)) + return SGID_TYPE_IPV4; + return SGID_TYPE_IPV6; +} + +#endif /* _UDMA_EID_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_hw.c b/drivers/ub/hw/hns3/hns3_udma_hw.c index b050ce2fe003..fa9cbbdacae6 100644 --- a/drivers/ub/hw/hns3/hns3_udma_hw.c +++ b/drivers/ub/hw/hns3/hns3_udma_hw.c @@ -276,9 +276,6 @@ static int udma_query_caps(struct udma_dev *udma_dev) caps->num_cqs = 1 << udma_get_field(resp_c->max_gid_num_cqs, QUERY_PF_CAPS_C_NUM_CQS_M, QUERY_PF_CAPS_C_NUM_CQS_S); - caps->gid_table_len[0] = udma_get_field(resp_c->max_gid_num_cqs, - QUERY_PF_CAPS_C_MAX_GID_M, - QUERY_PF_CAPS_C_MAX_GID_S); caps->max_cqes = 1 << udma_get_field(resp_c->cq_depth, QUERY_PF_CAPS_C_CQ_DEPTH_M, @@ -700,13 +697,11 @@ static void apply_func_caps(struct udma_dev *udma_dev) /* The following caps are not in ncl config */ caps->gmv_entry_sz = UDMA_GMV_ENTRY_SZ; - caps->gmv_hop_num = UDMA_HOP_NUM_0; - caps->gid_table_len[0] = caps->gmv_bt_num * - (UDMA_PAGE_SIZE / caps->gmv_entry_sz); - caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE / caps->gmv_entry_sz); + caps->max_eid_cnt = (caps->gmv_entry_num > UDMA_MAX_EID_NUM) ? + UDMA_MAX_EID_NUM : caps->gmv_entry_num; set_hem_page_size(udma_dev); } @@ -941,8 +936,6 @@ static int udma_profile(struct udma_dev *udma_dev) return ret; } - udma_dev->sys_image_guid = be64_to_cpu(udma_dev->ub_dev.attr.guid); - return udma_pf_profile(udma_dev); } @@ -1031,89 +1024,6 @@ static void udma_cmq_exit(struct udma_dev *udma_dev) udma_free_cmq_desc(udma_dev, &priv->cmq.csq); } -static int config_gmv_table(struct udma_dev *udma_dev, union ubcore_eid eid) -{ - uint32_t sgid_type = SGID_TYPE_IPV4; - struct udma_cfg_gmv_tb_a *tb_a; - struct udma_cfg_gmv_tb_b *tb_b; - struct udma_cmq_desc desc[2]; - uint16_t smac_l; - - tb_a = (struct udma_cfg_gmv_tb_a *)desc[0].data; - tb_b = (struct udma_cfg_gmv_tb_b *)desc[1].data; - - udma_cmq_setup_basic_desc(&desc[0], UDMA_OPC_CFG_GMV_TBL, false); - desc[0].flag |= cpu_to_le16(UDMA_CMD_FLAG_NEXT); - udma_cmq_setup_basic_desc(&desc[1], UDMA_OPC_CFG_GMV_TBL, false); - - smac_l = - *(uint16_t *)&udma_dev->uboe.netdevs[0]->dev_addr[SMAC_L_SHIFT]; - udma_set_field(tb_a->vf_type_vlan_smac, CFG_GMV_TB_VF_SMAC_L_M, - CFG_GMV_TB_VF_SMAC_L_S, smac_l); - tb_a->vf_smac_h = - *(uint32_t *)&udma_dev->uboe.netdevs[0]->dev_addr[SMAC_H_SHIFT]; - udma_set_field(tb_a->vf_type_vlan_smac, CFG_GMV_TB_VF_SGID_TYPE_M, - CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type); - memcpy(tb_a, &eid, sizeof(eid)); - udma_set_bit(tb_a->vf_type_vlan_smac, CFG_GMV_TB_VF_PATTERN_S, 0); - tb_b->vf_id = 0; - - return udma_cmq_send(udma_dev, desc, CFG_GMV_TBL_CMD_NUM); -} - -static void udma_fill_eid_addr(struct udma_eid_tbl_entry_cmd *eid_entry, - union ubcore_eid eid) -{ - int i; - - /* big endian */ - for (i = 0; i < UDMA_EID_SIZE_IDX; i++) - eid_entry->eid_addr[i] = (*(((uint32_t *)eid.raw) + i)); -} - -static int set_eid_table(struct udma_dev *udma_dev, union ubcore_eid eid) -{ - struct udma_eid_tbl_entry_cmd *eid_entry; - struct udma_cmq_desc desc = {}; - uint8_t resp_code; - int ret; - - eid_entry = (struct udma_eid_tbl_entry_cmd *)desc.data; - udma_cmq_setup_basic_desc(&desc, UDMA_OPC_DEID_TBL_ADD, false); - udma_set_field(eid_entry->eid_ad, UDMA_EID_TB_VFID_M, - UDMA_EID_TB_VFID_S, 0); - udma_fill_eid_addr(eid_entry, eid); - ret = udma_cmq_send(udma_dev, &desc, 1); - if (ret) { - dev_err(udma_dev->dev, "Send set eid table cmd failed.\n"); - return ret; - } - - resp_code = (le32_to_cpu(desc.data[0])) & 0xff; - if (resp_code == UDMA_EID_TB_RES_SUCCESS || - resp_code == UDMA_EID_TB_RES_MODIFY) - return 0; - else - return -EIO; -} - -static int udma_hw_set_eid(struct udma_dev *udma_dev, union ubcore_eid eid) -{ - int ret; - - ret = config_gmv_table(udma_dev, eid); - if (ret) { - dev_err(udma_dev->dev, "Set EID to GMV table failed.\n"); - return ret; - } - - ret = set_eid_table(udma_dev, eid); - if (ret) - dev_err(udma_dev->dev, "Set EID table failed.\n"); - - return ret; -} - static void func_clr_hw_resetting_state(struct udma_dev *udma_dev, struct hnae3_handle *handle) { @@ -1796,7 +1706,6 @@ static const struct udma_hw udma_hw = { .chk_mbox_avail = udma_chk_mbox_is_avail, .set_hem = udma_set_hem, .clear_hem = udma_clear_hem, - .set_eid = udma_hw_set_eid, .init_eq = udma_init_eq_table, .cleanup_eq = udma_cleanup_eq_table, }; @@ -2101,7 +2010,7 @@ static void udma_link_status_change(struct hnae3_handle *handle, bool linkup) if (linkup) event.event_type = UBCORE_EVENT_PORT_ACTIVE; else - event.event_type = UBCORE_EVENT_PORT_ERR; + event.event_type = UBCORE_EVENT_PORT_DOWN; event.ub_dev = &dev->ub_dev; event.element.port_id = port_id; diff --git a/drivers/ub/hw/hns3/hns3_udma_hw.h b/drivers/ub/hw/hns3/hns3_udma_hw.h index 17e1c2750cd8..56f22f30830c 100644 --- a/drivers/ub/hw/hns3/hns3_udma_hw.h +++ b/drivers/ub/hw/hns3/hns3_udma_hw.h @@ -334,9 +334,6 @@ struct udma_query_pf_caps_e { #define QUERY_PF_CAPS_C_NUM_CQS_S 0 #define QUERY_PF_CAPS_C_NUM_CQS_M GENMASK(19, 0) -#define QUERY_PF_CAPS_C_MAX_GID_S 20 -#define QUERY_PF_CAPS_C_MAX_GID_M GENMASK(28, 20) - #define QUERY_PF_CAPS_C_CQ_DEPTH_S 0 #define QUERY_PF_CAPS_C_CQ_DEPTH_M GENMASK(22, 0) @@ -447,59 +444,7 @@ enum { #define UDMA_INT_NAME_LEN 32 -struct udma_cfg_gmv_tb_a { - uint32_t vf_sgid_l; - uint32_t vf_sgid_ml; - uint32_t vf_sgid_mh; - uint32_t vf_sgid_h; - uint32_t vf_type_vlan_smac; - uint32_t vf_smac_h; -}; - -struct udma_cfg_gmv_tb_b { - uint32_t vf_upi; - uint32_t vf_eid_high; - uint32_t table_idx_rsv; - uint32_t vf_id; - uint32_t resv[2]; -}; - -#define SGID_TYPE_IPV4 1 - -#define CFG_GMV_TB_VF_SGID_TYPE_S 0 -#define CFG_GMV_TB_VF_SMAC_L_S 16 -#define CFG_GMV_TB_VF_PATTERN_S 3 -#define CFG_GMV_TB_VF_SGID_TYPE_M GENMASK(1, 0) -#define CFG_GMV_TB_VF_SMAC_L_M GENMASK(31, 16) - #define SGID_H_SHIFT 12 -#define SMAC_L_SHIFT 0 -#define SMAC_H_SHIFT 2 - -#define CFG_GMV_TBL_CMD_NUM 2 - -struct udma_eid_tbl_entry_cmd { - uint8_t resp_code; - uint8_t rsv1[3]; - uint32_t eid_addr[4]; - uint16_t eid_ad; - uint8_t rsv2[2]; -}; - -#define UDMA_EID_TB_VFID_S 0 -#define UDMA_EID_TB_VFID_M GENMASK(7, 0) -#define UDMA_EID_TB_RES_SUCCESS 0 -#define UDMA_EID_TB_RES_MODIFY 2 - -union udma_eid { - union ubcore_eid ubcore_eid; - struct { - uint32_t eid_l; - uint32_t eid_ml; - uint32_t eid_mh; - uint32_t eid_h; - } bit32_data; -}; struct udma_poe_cfg_addr_cmq { uint32_t channel_id; diff --git a/drivers/ub/hw/hns3/hns3_udma_jetty.c b/drivers/ub/hw/hns3/hns3_udma_jetty.c index 31d40200c32c..eb44c85e12d3 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jetty.c +++ b/drivers/ub/hw/hns3/hns3_udma_jetty.c @@ -23,7 +23,7 @@ #include "hns3_udma_jetty.h" static void init_jetty_cfg(struct udma_jetty *jetty, - const struct ubcore_jetty_cfg *cfg) + struct ubcore_jetty_cfg *cfg) { jetty->shared_jfr = cfg->flag.bs.share_jfr; jetty->tp_mode = cfg->trans_mode; @@ -33,11 +33,12 @@ static void init_jetty_cfg(struct udma_jetty *jetty, static void udma_fill_jetty_um_qp_attr(struct udma_dev *dev, struct udma_jetty *jetty, struct ubcore_ucontext *uctx, - const struct ubcore_jetty_cfg *cfg) + struct ubcore_jetty_cfg *cfg) { struct udma_ucontext *udma_ctx = to_udma_ucontext(uctx); struct udma_qp_attr *qp_attr = &jetty->qp.qp_attr; + qp_attr->is_tgt = false; qp_attr->is_jetty = true; qp_attr->uctx = uctx; qp_attr->pdn = udma_ctx->pdn; @@ -46,7 +47,6 @@ static void udma_fill_jetty_um_qp_attr(struct udma_dev *dev, qp_attr->cap.max_send_wr = cfg->jfs_depth; qp_attr->cap.max_send_sge = cfg->max_send_sge; qp_attr->cap.max_inline_data = cfg->max_inline_data; - qp_attr->cap.retry_cnt = cfg->retry_cnt; qp_attr->cap.rnr_retry = cfg->rnr_retry; qp_attr->cap.ack_timeout = cfg->err_timeout; qp_attr->qp_type = QPT_UD; @@ -54,6 +54,7 @@ static void udma_fill_jetty_um_qp_attr(struct udma_dev *dev, qp_attr->jfr = jetty->udma_jfr; qp_attr->qpn_map = &jetty->qpn_map; qp_attr->recv_jfc = to_udma_jfc(cfg->recv_jfc); + qp_attr->eid_index = udma_ctx->eid_index; if (jetty->ubcore_jetty.jetty_cfg.priority >= dev->caps.sl_num) { qp_attr->priority = dev->caps.sl_num > 0 ? dev->caps.sl_num - 1 : 0; @@ -78,8 +79,7 @@ static int udma_modify_qp_jetty(struct udma_dev *dev, struct udma_jetty *jetty, qp->send_jfc = qp->qp_attr.send_jfc; qp->recv_jfc = qp->qp_attr.recv_jfc; - m_attr.path_mtu = UBCORE_MTU_4096; - m_attr.hop_limit = MAX_HOP_LIMIT; + m_attr.sgid_index = qp->qp_attr.eid_index; ubcore_attr_mask.value = 0; qp->m_attr = &m_attr; @@ -92,7 +92,7 @@ static int udma_modify_qp_jetty(struct udma_dev *dev, struct udma_jetty *jetty, } static int set_jetty_jfr(struct udma_dev *dev, struct udma_jetty *jetty, - const struct ubcore_jetty_cfg *cfg, uint32_t jfr_id) + struct ubcore_jetty_cfg *cfg, uint32_t jfr_id) { if (cfg->jfr) { jetty->shared_jfr = true; @@ -111,7 +111,7 @@ static int set_jetty_jfr(struct udma_dev *dev, struct udma_jetty *jetty, } static int alloc_jetty_um_qp(struct udma_dev *dev, struct udma_jetty *jetty, - const struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata) { int ret; @@ -207,22 +207,23 @@ static int set_jetty_buf_attr(struct udma_dev *udma_dev, } static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, - const struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata) + struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata) { struct udma_create_jetty_ucmd ucmd = {}; struct udma_buf_attr buf_attr = {}; int ret; - if (udata) { - ret = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(ucmd))); - if (ret) { - dev_err(dev->dev, - "failed to copy jetty udata, ret = %d.\n", - ret); - return -EFAULT; - } + if (!udata) + return -EINVAL; + + ret = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, + min(udata->udrv_data->in_len, + (uint32_t)sizeof(ucmd))); + if (ret) { + dev_err(dev->dev, + "failed to copy jetty udata, ret = %d.\n", + ret); + return -EFAULT; } ret = set_jetty_jfr(dev, jetty, cfg, ucmd.jfr_id); @@ -236,7 +237,16 @@ static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, } else if (cfg->trans_mode == UBCORE_TP_RM) { xa_init(&jetty->srm_node_table); } else if (cfg->trans_mode == UBCORE_TP_RC) { + ret = udma_db_map_user(dev, ucmd.sdb_addr, &jetty->rc_node.sdb); + if (ret) { + dev_err(dev->dev, + "failed to map user sdb_addr, ret = %d.\n", + ret); + return ret; + } + jetty->rc_node.buf_addr = ucmd.buf_addr; + jetty->rc_node.context = to_udma_ucontext(udata->uctx); if (!ucmd.buf_addr) { jetty->dca_en = true; return 0; @@ -257,15 +267,7 @@ static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, dev_err(dev->dev, "failed to create WQE mtr for RC Jetty, ret = %d.\n", ret); - return ret; - } - - ret = udma_db_map_user(dev, ucmd.sdb_addr, &jetty->rc_node.sdb); - if (ret) { - dev_err(dev->dev, - "failed to map user sdb_addr, ret = %d.\n", - ret); - udma_mtr_destroy(dev, &jetty->rc_node.mtr); + udma_db_unmap_user(dev, &jetty->rc_node.sdb); return ret; } } @@ -390,8 +392,8 @@ static void free_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty) } struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev, - const struct ubcore_jetty_cfg *cfg, - struct ubcore_udata *udata) + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); struct udma_jetty *jetty; @@ -474,8 +476,8 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) } struct ubcore_tjetty *udma_import_jetty(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, - struct ubcore_udata *udata) + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) { struct ubcore_tjetty *tjetty; diff --git a/drivers/ub/hw/hns3/hns3_udma_jetty.h b/drivers/ub/hw/hns3/hns3_udma_jetty.h index 25c8b6e3bc54..c766cb044d25 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jetty.h +++ b/drivers/ub/hw/hns3/hns3_udma_jetty.h @@ -30,6 +30,7 @@ struct rc_node { uint32_t sge_shift; struct udma_db sdb; struct ubcore_jetty_id tjetty_id; + struct udma_ucontext *context; }; struct udma_jetty { @@ -54,12 +55,12 @@ static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *ubcore_jetty } struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev, - const struct ubcore_jetty_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); struct ubcore_tjetty *udma_import_jetty(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); int udma_unimport_jetty(struct ubcore_tjetty *tjetty); #endif /* _UDMA_JETTY_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_jfc.c b/drivers/ub/hw/hns3/hns3_udma_jfc.c index 4abb9ad2e84f..5d6ffe89cdb7 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfc.c +++ b/drivers/ub/hw/hns3/hns3_udma_jfc.c @@ -45,7 +45,7 @@ static int udma_hw_destroy_cq(struct udma_dev *dev, uint32_t cqn) return udma_cmd_mbox(dev, &desc, UDMA_CMD_TIMEOUT_MSECS, 0); } -static int check_jfc_cfg(struct udma_dev *udma_dev, const struct ubcore_jfc_cfg *cfg) +static int check_jfc_cfg(struct udma_dev *udma_dev, struct ubcore_jfc_cfg *cfg) { if (!cfg->depth || cfg->depth > udma_dev->caps.max_cqes) { dev_err(udma_dev->dev, @@ -122,7 +122,7 @@ static int check_jfc_attr_ex(struct udma_dev *udma_dev, } static int check_create_jfc(struct udma_dev *udma_dev, - const struct ubcore_jfc_cfg *cfg, + struct ubcore_jfc_cfg *cfg, struct udma_create_jfc_ucmd *ucmd, struct ubcore_udata *udata) { @@ -159,7 +159,7 @@ static int check_create_jfc(struct udma_dev *udma_dev, return 0; } -void set_jfc_param(struct udma_jfc *udma_jfc, const struct ubcore_jfc_cfg *cfg) +void set_jfc_param(struct udma_jfc *udma_jfc, struct ubcore_jfc_cfg *cfg) { udma_jfc->jfc_depth = roundup_pow_of_two(cfg->depth); memcpy(&udma_jfc->ubcore_jfc.jfc_cfg, cfg, sizeof(struct ubcore_jfc_cfg)); @@ -558,8 +558,8 @@ static void free_jfc_id(struct udma_dev *udma_dev, struct udma_jfc *udma_jfc) mutex_unlock(&jfc_table->bank_mutex); } -struct ubcore_jfc *udma_create_jfc(struct ubcore_device *dev, const struct ubcore_jfc_cfg *cfg, - struct ubcore_udata *udata) +struct ubcore_jfc *udma_create_jfc(struct ubcore_device *dev, struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); struct udma_create_jfc_ucmd ucmd = {}; @@ -624,7 +624,7 @@ int udma_destroy_jfc(struct ubcore_jfc *jfc) return 0; } -int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, const struct ubcore_jfc_attr *attr, +int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata) { struct udma_dev *udma_device = to_udma_dev(ubcore_jfc->ub_dev); diff --git a/drivers/ub/hw/hns3/hns3_udma_jfc.h b/drivers/ub/hw/hns3/hns3_udma_jfc.h index 5af78b398aea..e48f2462d3e7 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfc.h +++ b/drivers/ub/hw/hns3/hns3_udma_jfc.h @@ -67,10 +67,10 @@ static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) return container_of(jfc, struct udma_jfc, ubcore_jfc); } -struct ubcore_jfc *udma_create_jfc(struct ubcore_device *dev, const struct ubcore_jfc_cfg *cfg, - struct ubcore_udata *udata); +struct ubcore_jfc *udma_create_jfc(struct ubcore_device *dev, struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_jfc(struct ubcore_jfc *jfc); -int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, const struct ubcore_jfc_attr *attr, +int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); void udma_jfc_completion(struct udma_dev *udma_dev, uint32_t cqn); void udma_jfc_event(struct udma_dev *udma_dev, uint32_t cqn, int event_type); diff --git a/drivers/ub/hw/hns3/hns3_udma_jfr.c b/drivers/ub/hw/hns3/hns3_udma_jfr.c index 9deb39423bec..4f644c576eb7 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfr.c +++ b/drivers/ub/hw/hns3/hns3_udma_jfr.c @@ -22,7 +22,7 @@ #include "hns3_udma_jfr.h" static int init_jfr_cfg(struct udma_dev *dev, struct udma_jfr *jfr, - const struct ubcore_jfr_cfg *cfg) + struct ubcore_jfr_cfg *cfg) { if (!cfg->max_sge || cfg->depth > dev->caps.max_srq_wrs || @@ -468,7 +468,7 @@ static int udma_modify_jfr_um_qpc(struct udma_dev *dev, struct udma_jfr *jfr, struct udma_qp *qp = jfr->um_qp; int ret; - attr.path_mtu = UBCORE_MTU_4096; + attr.sgid_index = qp->qp_attr.eid_index; qp->udma_device = dev; qp->qp_attr.jfr = jfr; qp->recv_jfc = to_udma_jfc(jfr->ubcore_jfr.jfr_cfg.jfc); @@ -495,10 +495,13 @@ static int alloc_jfr_um_qp(struct udma_dev *dev, struct udma_jfr *jfr) return -ENOMEM; qp->qp_type = QPT_UD; + qp->qp_attr.is_tgt = true; qp->qp_attr.qp_type = QPT_UD; qp->qp_attr.qpn_map = &jfr->qpn_map; qp->qp_attr.recv_jfc = to_udma_jfc(jfr->ubcore_jfr.jfr_cfg.jfc); qp->qp_attr.send_jfc = NULL; + qp->qp_attr.eid_index = + to_udma_ucontext(jfr->ubcore_jfr.uctx)->eid_index; udata.uctx = NULL; ret = udma_create_qp_common(dev, qp, &udata); if (ret) { @@ -531,8 +534,8 @@ void destroy_jfr_um_qp(struct udma_dev *dev, struct udma_jfr *jfr) kfree(jfr->um_qp); } -struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, const struct ubcore_jfr_cfg *cfg, - struct ubcore_udata *udata) +struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); struct udma_jfr *jfr; @@ -559,6 +562,7 @@ struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, const struct ubcor udma_dev->caps.num_jfr_shift, UDMA_JFR_QPN_PREFIX, jfr->jfrn); if (cfg->trans_mode == UBCORE_TP_UM) { + jfr->ubcore_jfr.uctx = udata->uctx; ret = alloc_jfr_um_qp(udma_dev, jfr); if (ret) goto err_alloc_jfrc; @@ -614,8 +618,8 @@ struct udma_jfr *get_udma_jfr(struct ubcore_device *dev, uint32_t jfr_id) } struct ubcore_tjetty *udma_import_jfr(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, - struct ubcore_udata *udata) + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) { struct ubcore_tjetty *tjfr; @@ -665,7 +669,7 @@ static int udma_hw_modify_srq(struct udma_dev *dev, uint32_t jfrn, return ret; } -int udma_modify_jfr(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, +int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); @@ -711,7 +715,7 @@ void udma_jfr_event(struct udma_dev *udma_dev, uint32_t jfrn, int event_type) return; } - event.event_type = UBCORE_EVENT_JFR_ACCESS_ERR; + event.event_type = UBCORE_EVENT_JFR_LIMIT_REACHED; refcount_inc(&jfr->refcount); ubcore_jfr = &jfr->ubcore_jfr; diff --git a/drivers/ub/hw/hns3/hns3_udma_jfr.h b/drivers/ub/hw/hns3/hns3_udma_jfr.h index c804889e3ac1..868538cc06ab 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfr.h +++ b/drivers/ub/hw/hns3/hns3_udma_jfr.h @@ -84,15 +84,15 @@ static inline struct udma_jfr *to_udma_jfr(struct ubcore_jfr *ubcore_jfr) return container_of(ubcore_jfr, struct udma_jfr, ubcore_jfr); } -struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, const struct ubcore_jfr_cfg *cfg, - struct ubcore_udata *udata); +struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_jfr(struct ubcore_jfr *jfr); struct udma_jfr *get_udma_jfr(struct ubcore_device *dev, uint32_t jfr_id); struct ubcore_tjetty *udma_import_jfr(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); int udma_unimport_jfr(struct ubcore_tjetty *tjfr); -int udma_modify_jfr(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, +int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, struct ubcore_udata *udata); void udma_jfr_event(struct udma_dev *udma_dev, uint32_t jfrn, int event_type); diff --git a/drivers/ub/hw/hns3/hns3_udma_jfs.c b/drivers/ub/hw/hns3/hns3_udma_jfs.c index 9e4944c5d732..5e30073c269e 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfs.c +++ b/drivers/ub/hw/hns3/hns3_udma_jfs.c @@ -22,10 +22,9 @@ #include "hns3_udma_jfs.h" static int init_jfs_cfg(struct udma_dev *dev, struct udma_jfs *jfs, - const struct ubcore_jfs_cfg *cfg) + struct ubcore_jfs_cfg *cfg) { - if (!cfg->max_sge || - cfg->depth > dev->caps.max_wqes || + if (!cfg->depth || cfg->depth > dev->caps.max_wqes || cfg->max_sge > dev->caps.max_sq_sg) { dev_err(dev->dev, "invalid jfs cfg, depth = %u, sge = %u.\n", cfg->depth, cfg->max_sge); @@ -49,11 +48,8 @@ static int udma_modify_jfs_um_qp(struct udma_dev *dev, struct udma_jfs *jfs, qp->udma_device = dev; qp->send_jfc = qp->qp_attr.send_jfc; qp->recv_jfc = qp->qp_attr.recv_jfc; - qp->ubcore_path_mtu = UBCORE_MTU_4096; - qp->path_mtu = UDMA_MTU_4096; - m_attr.path_mtu = UBCORE_MTU_4096; - m_attr.hop_limit = MAX_HOP_LIMIT; + m_attr.sgid_index = qp->qp_attr.eid_index; ubcore_attr_mask.value = 0; qp->m_attr = &m_attr; @@ -69,10 +65,11 @@ static int udma_modify_jfs_um_qp(struct udma_dev *dev, struct udma_jfs *jfs, static void udma_fill_jfs_um_qp_attr(struct udma_dev *dev, struct udma_jfs *jfs, struct udma_qp_attr *qp_attr, struct ubcore_ucontext *uctx, - const struct ubcore_jfs_cfg *cfg) + struct ubcore_jfs_cfg *cfg) { struct udma_ucontext *udma_ctx = to_udma_ucontext(uctx); + qp_attr->is_tgt = false; qp_attr->is_jetty = false; qp_attr->jfs = jfs; qp_attr->uctx = uctx; @@ -80,12 +77,12 @@ static void udma_fill_jfs_um_qp_attr(struct udma_dev *dev, struct udma_jfs *jfs, qp_attr->cap.max_send_wr = cfg->depth; qp_attr->cap.max_send_sge = cfg->max_sge; qp_attr->cap.max_inline_data = cfg->max_inline_data; - qp_attr->cap.retry_cnt = cfg->retry_cnt; qp_attr->cap.rnr_retry = cfg->rnr_retry; qp_attr->cap.ack_timeout = cfg->err_timeout; qp_attr->qp_type = QPT_UD; qp_attr->recv_jfc = NULL; qp_attr->send_jfc = to_udma_jfc(cfg->jfc); + qp_attr->eid_index = udma_ctx->eid_index; if (jfs->ubcore_jfs.jfs_cfg.priority >= dev->caps.sl_num) { qp_attr->priority = dev->caps.sl_num > 0 ? dev->caps.sl_num - 1 : 0; @@ -99,7 +96,7 @@ static void udma_fill_jfs_um_qp_attr(struct udma_dev *dev, struct udma_jfs *jfs, } static int create_jfs_um_qp(struct udma_dev *dev, struct udma_jfs *jfs, - const struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata) + struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata) { int ret; @@ -130,7 +127,7 @@ int destroy_jfs_qp(struct udma_dev *dev, struct udma_jfs *jfs) } static int alloc_jfs_buf(struct udma_dev *udma_dev, struct udma_jfs *jfs, - const struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata) { struct udma_create_jfs_ucmd ucmd = {}; @@ -267,8 +264,8 @@ static void free_jfs_id(struct udma_dev *udma_dev, struct udma_jfs *jfs) ida_free(&jfs_ida->ida, (int)jfs->jfs_id); } -struct ubcore_jfs *udma_create_jfs(struct ubcore_device *dev, const struct ubcore_jfs_cfg *cfg, - struct ubcore_udata *udata) +struct ubcore_jfs *udma_create_jfs(struct ubcore_device *dev, struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); struct udma_jfs *jfs; diff --git a/drivers/ub/hw/hns3/hns3_udma_jfs.h b/drivers/ub/hw/hns3/hns3_udma_jfs.h index eee4058bb0b3..8460a767f23a 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfs.h +++ b/drivers/ub/hw/hns3/hns3_udma_jfs.h @@ -34,8 +34,8 @@ static inline struct udma_jfs *to_udma_jfs(struct ubcore_jfs *jfs) } struct ubcore_jfs *udma_create_jfs(struct ubcore_device *dev, - const struct ubcore_jfs_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_jfs(struct ubcore_jfs *ubcore_jfs); #endif /* _UDMA_JFS_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_main.c b/drivers/ub/hw/hns3/hns3_udma_main.c index ab105f589a64..b31f315dc0dc 100644 --- a/drivers/ub/hw/hns3/hns3_udma_main.c +++ b/drivers/ub/hw/hns3/hns3_udma_main.c @@ -29,17 +29,7 @@ #include "hns3_udma_dca.h" #include "hns3_udma_cmd.h" #include "hns3_udma_dfx.h" - -static int udma_set_eid(struct ubcore_device *dev, union ubcore_eid eid) -{ - struct udma_dev *udma_dev = to_udma_dev(dev); - uint8_t port = 0; - - if (port >= udma_dev->caps.num_ports) - return -EINVAL; - - return udma_dev->hw->set_eid(udma_dev, eid); -} +#include "hns3_udma_eid.h" static int udma_uar_alloc(struct udma_dev *udma_dev, struct udma_uar *uar) { @@ -114,17 +104,31 @@ static void udma_uar_free(struct udma_dev *udma_dev, } static struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *dev, - uint32_t uasid, + uint32_t eid_index, struct ubcore_udrv_priv *udrv_data) { struct udma_dev *udma_dev = to_udma_dev(dev); struct udma_ucontext *context; + struct udma_eid *udma_eid; int ret; context = kzalloc(sizeof(struct udma_ucontext), GFP_KERNEL); if (!context) return NULL; + udma_eid = (struct udma_eid *)xa_load(&udma_dev->eid_table, eid_index); + if (IS_ERR_OR_NULL(udma_eid)) { + dev_err(udma_dev->dev, "Failed to find eid, index = %d\n.", + eid_index); + goto err_alloc_ucontext; + } + if (udma_eid->type != SGID_TYPE_IPV4) { + dev_err(udma_dev->dev, "Failed to check type, index = %d\n.", + eid_index); + goto err_alloc_ucontext; + } + context->eid_index = eid_index; + ret = udma_uar_alloc(udma_dev, &context->uar); if (ret) { dev_err(udma_dev->dev, "Alloc udma_uar Failed.\n"); @@ -137,7 +141,6 @@ static struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *dev, goto err_alloc_uar; } - context->pdn = uasid; /* Use the UASID as pd number */ ret = udma_init_ctx_resp(udma_dev, udrv_data, &context->dca_ctx); if (ret) { dev_err(udma_dev->dev, "Init ctx resp failed.\n"); @@ -261,7 +264,7 @@ static int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) return 0; } -static int udma_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, +static int udma_query_stats(struct ubcore_device *dev, struct ubcore_stats_key *key, struct ubcore_stats_val *val) { struct ubcore_stats_com_val *com_val = (struct ubcore_stats_com_val *)val->addr; @@ -333,7 +336,7 @@ static int udma_query_device_attr(struct ubcore_device *dev, struct net_device *net_dev; int i; - attr->guid = udma_dev->sys_image_guid; + attr->max_eid_cnt = udma_dev->caps.max_eid_cnt; attr->dev_cap.max_jfc = (1 << udma_dev->caps.num_jfc_shift); attr->dev_cap.max_jfs = (1 << udma_dev->caps.num_jfs_shift); attr->dev_cap.max_jfr = (1 << udma_dev->caps.num_jfr_shift); @@ -347,13 +350,14 @@ static int udma_query_device_attr(struct ubcore_device *dev, attr->dev_cap.max_msg_size = UDMA_MAX_MSG_LEN; attr->dev_cap.trans_mode = UBCORE_TP_RM | UBCORE_TP_UM; attr->dev_cap.feature.bs.oor = udma_dev->caps.oor_en; - attr->port_cnt = udma_dev->caps.num_ports; - attr->dev_cap.comp_vector_cnt = udma_dev->caps.num_comp_vectors; - attr->vf_cnt = udma_dev->func_num - 1; + attr->dev_cap.ceq_cnt = udma_dev->caps.num_comp_vectors; attr->dev_cap.feature.bs.jfc_inline = !!(udma_dev->caps.flags & UDMA_CAP_FLAG_CQE_INLINE); attr->dev_cap.feature.bs.spray_en = 1; attr->dev_cap.max_jfs_rsge = udma_dev->caps.max_sq_sg; attr->dev_cap.congestion_ctrl_alg = query_congest_alg(udma_dev->caps.cong_type); + attr->fe_cnt = udma_dev->func_num - 1; + attr->port_cnt = udma_dev->caps.num_ports; + attr->tp_maintainer = true; for (i = 0; i < udma_dev->caps.num_ports; i++) { net_dev = udma_dev->uboe.netdevs[i]; @@ -383,7 +387,7 @@ static int udma_get_active_speed(uint32_t speed, struct ubcore_port_status *port return 0; } -static int udma_query_device_status(const struct ubcore_device *dev, +static int udma_query_device_status(struct ubcore_device *dev, struct ubcore_device_status *dev_status) { struct udma_dev *udma_dev = to_udma_dev(dev); @@ -423,6 +427,23 @@ static int udma_query_device_status(const struct ubcore_device *dev, return 0; } +int udma_send_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + int ret; + + if (msg == NULL) { + dev_err(udma_dev->dev, "The message to be sent is empty.\n"); + return -EINVAL; + } + + ret = ubcore_recv_msg(dev, msg); + if (ret) + dev_err(udma_dev->dev, "Fail to recv msg, ret = %d.\n", ret); + + return ret; +} + int udma_user_ctl_flush_cqe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out, struct ubcore_udrv_priv *udrv_data) @@ -779,7 +800,7 @@ int udma_user_ctl_dca_attach(struct ubcore_ucontext *uctx, struct ubcore_user_ct (uint32_t)sizeof(struct udma_dca_attach_resp))); if (ret) { udma_dca_disattach(udma_device, &attr); - dev_err(udma_device->dev, "copy_to_user failed in dca_attach, ret:%d.\n", + dev_err(udma_device->dev, "cp to user failed in dca_attach, ret:%d.\n", ret); return -EFAULT; } @@ -820,7 +841,7 @@ int udma_user_ctl_dca_query(struct ubcore_ucontext *uctx, struct ubcore_user_ctl ret = (int)copy_from_user(&attr, (void *)in->addr, sizeof(struct udma_dca_query_attr)); if (ret) { - dev_err(udma_device->dev, "copy_from_user failed in dca query, ret:%d.\n", + dev_err(udma_device->dev, "cp from user failed in dca query, ret:%d.\n", ret); return -EFAULT; } @@ -883,6 +904,8 @@ static struct ubcore_ops g_udma_dev_ops = { .owner = THIS_MODULE, .abi_version = 1, .set_eid = udma_set_eid, + .add_ueid = udma_add_ueid, + .delete_ueid = udma_delete_ueid, .query_device_attr = udma_query_device_attr, .query_device_status = udma_query_device_status, .query_res = udma_query_res, @@ -910,6 +933,7 @@ static struct ubcore_ops g_udma_dev_ops = { .create_tp = udma_create_tp, .modify_tp = udma_modify_tp, .destroy_tp = udma_destroy_tp, + .send_msg = udma_send_msg, .user_ctl = udma_user_ctl, .query_stats = udma_query_stats, }; @@ -1079,6 +1103,18 @@ static void udma_init_jetty_table(struct udma_dev *dev) jetty_ida->min = 1; } +static void udma_cleanup_eid_table(struct udma_dev *dev) +{ + if (!xa_empty(&dev->eid_table)) + dev_err(dev->dev, "EID table not empty.\n"); + xa_destroy(&dev->eid_table); +} + +static void udma_init_eid_table(struct udma_dev *dev) +{ + xa_init(&dev->eid_table); +} + int udma_init_eq_idx_table(struct udma_dev *udma_dev) { uint32_t eq_num; @@ -1122,6 +1158,7 @@ int udma_setup_hca(struct udma_dev *udma_dev) udma_init_jfr_table(udma_dev); udma_init_jfs_table(udma_dev); udma_init_jetty_table(udma_dev); + udma_init_eid_table(udma_dev); ret = udma_init_eq_idx_table(udma_dev); if (ret) { dev_err(dev, "Failed to init eq_table.\n"); @@ -1130,6 +1167,7 @@ int udma_setup_hca(struct udma_dev *udma_dev) return 0; err_eq_table: + udma_cleanup_eid_table(udma_dev); udma_cleanup_jetty_table(udma_dev); udma_cleanup_jfs_table(udma_dev); udma_cleanup_jfr_table(udma_dev); @@ -1145,6 +1183,7 @@ int udma_setup_hca(struct udma_dev *udma_dev) void udma_teardown_hca(struct udma_dev *udma_dev) { kfree(udma_dev->eq_table.idx_table); + udma_cleanup_eid_table(udma_dev); udma_cleanup_jetty_table(udma_dev); udma_cleanup_jfs_table(udma_dev); udma_cleanup_jfr_table(udma_dev); @@ -1373,7 +1412,6 @@ static int udma_register_device(struct udma_dev *udma_dev) ub_dev->netdev = udma_dev->uboe.netdevs[0]; scnprintf(ub_dev->ops->driver_name, UBCORE_MAX_DRIVER_NAME, "udma_v1"); udma_set_devname(udma_dev, ub_dev); - ub_dev->num_comp_vectors = udma_dev->irq_num; return ubcore_register_device(ub_dev); } diff --git a/drivers/ub/hw/hns3/hns3_udma_qp.c b/drivers/ub/hw/hns3/hns3_udma_qp.c index 7b98f7188d83..dbd2667a1bdb 100644 --- a/drivers/ub/hw/hns3/hns3_udma_qp.c +++ b/drivers/ub/hw/hns3/hns3_udma_qp.c @@ -27,6 +27,7 @@ #include "hns3_udma_tp.h" #include "hns3_udma_db.h" #include "hns3_udma_qp.h" +#include "hns3_udma_eid.h" static bool um_spray_en; static ushort um_data_udp_start; @@ -129,16 +130,13 @@ static int config_qp_sq_buf(struct udma_dev *udma_device, return 0; } -static void udma_set_path(const struct udma_modify_tp_attr *attr, +static void udma_set_path(struct udma_modify_tp_attr *attr, struct udma_qp_context *context, struct udma_qp_context *context_mask) { if (attr == NULL) return; - udma_reg_write(context, QPC_HOPLIMIT, attr->hop_limit); - udma_reg_clear(context_mask, QPC_HOPLIMIT); - udma_reg_write(context, QPC_GMV_IDX, attr->sgid_index); udma_reg_clear(context_mask, QPC_GMV_IDX); @@ -207,7 +205,7 @@ int udma_set_dca_buf(struct udma_dev *dev, struct udma_qp *qp) } static bool check_qp_timeout_cfg_range(struct udma_dev *udma_device, - const uint8_t *timeout) + uint8_t *timeout) { if (*timeout > QP_TIMEOUT_MAX) { dev_warn(udma_device->dev, @@ -235,7 +233,7 @@ static enum udma_mtu to_udma_mtu(enum ubcore_mtu core_mtu) } static inline enum ubcore_mtu get_mtu(struct udma_qp *qp, - const struct ubcore_tp_attr *attr) + struct ubcore_tp_attr *attr) { if (qp->qp_type == QPT_UD || attr == NULL) return UBCORE_MTU_4096; @@ -336,7 +334,7 @@ static void edit_qpc_for_rxcqn(struct udma_qp *qp, static void edit_qpc_for_retransmission_parm(struct udma_dev *udma_device, struct udma_qp *qp, - const struct udma_modify_tp_attr *attr, + struct udma_modify_tp_attr *attr, struct udma_qp_context *context, struct udma_qp_context *context_mask) { @@ -389,7 +387,7 @@ static void edit_qpc_for_write(struct udma_qp *qp, } static void edit_qpc_for_receive(struct udma_qp *qp, - const struct udma_modify_tp_attr *attr, + struct udma_modify_tp_attr *attr, struct udma_qp_context *context, struct udma_qp_context *context_mask) { @@ -413,7 +411,7 @@ static void edit_qpc_for_receive(struct udma_qp *qp, } static int modify_qp_reset_to_rtr(struct udma_qp *qp, - const struct udma_modify_tp_attr *attr, + struct udma_modify_tp_attr *attr, struct udma_qp_context *context, struct udma_qp_context *context_mask) { @@ -591,7 +589,7 @@ static int get_dip_ctx_idx(struct udma_qp *qp, static int udma_set_cong_fields(struct udma_qp_context *context, struct udma_qp_context *context_mask, struct udma_qp *qp, - const struct ubcore_tp_attr *attr) + struct ubcore_tp_attr *attr) { struct udma_congestion_algorithm congest_filed; enum udma_cong_type qp_cong_alg; @@ -655,68 +653,47 @@ static int udma_set_cong_fields(struct udma_qp_context *context, } static void udma_set_spray_field(struct udma_qp *qp, - const struct ubcore_tp_attr *attr, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask ubcore_mask, struct udma_qp_context *context, struct udma_qp_context *context_mask) { - struct udma_dev *udma_dev = qp->udma_device; - struct udma_modify_tp_attr *m_attr; uint16_t dus_regval; uint16_t aus_regval; uint16_t real_range; - m_attr = qp->m_attr; - real_range = (m_attr->udp_range + - UDP_SRCPORT_RANGE_BASE) & - UDP_SRCPORT_RANGE_SIZE_MASK; - dus_regval = m_attr->data_udp_start & - GENMASK(real_range, 0); - aus_regval = m_attr->ack_udp_start & - GENMASK(real_range, 0); + real_range = (attr->udp_range + UDP_SRCPORT_RANGE_BASE) & + UDP_SRCPORT_RANGE_SIZE_MASK; + dus_regval = attr->data_udp_start & GENMASK(real_range, 0); + aus_regval = attr->ack_udp_start & GENMASK(real_range, 0); udma_reg_enable(&context->ext, QPCEX_AR_EN); udma_reg_clear(&context_mask->ext, QPCEX_AR_EN); - udma_reg_write(&context->ext, QPCEX_ACK_UDP_SRCPORT, - aus_regval); - udma_reg_clear(&context_mask->ext, QPCEX_ACK_UDP_SRCPORT); - - udma_reg_write(&context->ext, QPCEX_DATA_UDP_SRCPORT_L, - dus_regval); - udma_reg_clear(&context_mask->ext, QPCEX_DATA_UDP_SRCPORT_L); - - udma_reg_write(&context->ext, QPCEX_DATA_UDP_SRCPORT_H, - dus_regval >> - QPCEX_DATA_UDP_SRCPORT_H_SHIFT); - udma_reg_clear(&context_mask->ext, QPCEX_DATA_UDP_SRCPORT_H); - - udma_reg_write(&context->ext, QPCEX_UDP_SRCPORT_RANGE, - m_attr->udp_range); - udma_reg_clear(&context_mask->ext, QPCEX_UDP_SRCPORT_RANGE); - - if (udma_dev->caps.reorder_cq_buffer_en && - qp->qp_attr.reorder_cq_addr) { - udma_reg_enable(&context->ext, QPCEX_REORDER_CQ_EN); - udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_EN); - - udma_reg_write(&context->ext, QPCEX_REORDER_CQ_ADDR_L, - lower_32_bits(qp->qp_attr.reorder_cq_addr) >> - QPCEX_REORDER_CQ_ADDR_SHIFT); - udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_ADDR_L); + if (ubcore_mask.bs.ack_udp_start) { + udma_reg_write(&context->ext, QPCEX_ACK_UDP_SRCPORT, + aus_regval); + udma_reg_clear(&context_mask->ext, QPCEX_ACK_UDP_SRCPORT); + } - udma_reg_write(&context->ext, QPCEX_REORDER_CQ_ADDR_H, - upper_32_bits(qp->qp_attr.reorder_cq_addr)); - udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_ADDR_H); + if (ubcore_mask.bs.data_udp_start) { + udma_reg_write(&context->ext, QPCEX_DATA_UDP_SRCPORT_L, + dus_regval); + udma_reg_clear(&context_mask->ext, QPCEX_DATA_UDP_SRCPORT_L); + udma_reg_write(&context->ext, QPCEX_DATA_UDP_SRCPORT_H, + dus_regval >> QPCEX_DATA_UDP_SRCPORT_H_SHIFT); + udma_reg_clear(&context_mask->ext, QPCEX_DATA_UDP_SRCPORT_H); + } - udma_reg_write(&context->ext, QPCEX_REORDER_CQ_SHIFT, - udma_dev->caps.reorder_cq_shift); - udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_SHIFT); + if (ubcore_mask.bs.udp_range) { + udma_reg_write(&context->ext, QPCEX_UDP_SRCPORT_RANGE, + attr->udp_range); + udma_reg_clear(&context_mask->ext, QPCEX_UDP_SRCPORT_RANGE); } } static void udma_set_oor_field(struct udma_qp *qp, - const struct ubcore_tp_attr *attr, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask ubcore_mask, struct udma_qp_context *context, struct udma_qp_context *context_mask) @@ -742,10 +719,29 @@ static void udma_set_oor_field(struct udma_qp *qp, udma_reg_write(&context->ext, QPCEX_DYN_AT, udma_dev->caps.dynamic_ack_timeout); udma_reg_clear(&context_mask->ext, QPCEX_DYN_AT); + + if (udma_dev->caps.reorder_cq_buffer_en && + qp->qp_attr.reorder_cq_addr) { + udma_reg_enable(&context->ext, QPCEX_REORDER_CQ_EN); + udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_EN); + + udma_reg_write(&context->ext, QPCEX_REORDER_CQ_ADDR_L, + lower_32_bits(qp->qp_attr.reorder_cq_addr) >> + QPCEX_REORDER_CQ_ADDR_SHIFT); + udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_ADDR_L); + + udma_reg_write(&context->ext, QPCEX_REORDER_CQ_ADDR_H, + upper_32_bits(qp->qp_attr.reorder_cq_addr)); + udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_ADDR_H); + + udma_reg_write(&context->ext, QPCEX_REORDER_CQ_SHIFT, + udma_dev->caps.reorder_cq_shift); + udma_reg_clear(&context_mask->ext, QPCEX_REORDER_CQ_SHIFT); + } } static void udma_set_opt_fields(struct udma_qp *qp, - const struct ubcore_tp_attr *attr, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask ubcore_mask, struct udma_qp_context *context, struct udma_qp_context *context_mask) @@ -759,12 +755,16 @@ static void udma_set_opt_fields(struct udma_qp *qp, if (ubcore_mask.bs.flag && attr->flag.bs.oor_en && udma_dev->caps.oor_en) udma_set_oor_field(qp, attr, ubcore_mask, context, context_mask); - if (ubcore_mask.bs.flag) + if (ubcore_mask.bs.flag && attr->flag.bs.cc_en) udma_set_cong_fields(context, context_mask, qp, attr); if (ubcore_mask.bs.flag && attr->flag.bs.spray_en && - (udma_dev->caps.flags & UDMA_CAP_FLAG_AR)) + (udma_dev->caps.flags & UDMA_CAP_FLAG_AR)) { udma_set_spray_field(qp, attr, ubcore_mask, context, context_mask); + } else { + udma_reg_write(context, QPC_UDPSPN, attr->data_udp_start); + udma_reg_clear(context_mask, QPC_UDPSPN); + } if (ubcore_mask.bs.peer_tpn) { udma_reg_write(context, QPC_DQPN, attr->peer_tpn); @@ -812,10 +812,15 @@ static void udma_set_opt_fields(struct udma_qp *qp, udma_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini); udma_reg_clear(context_mask, QPC_ACK_REQ_FREQ); } + + if (ubcore_mask.bs.hop_limit) { + udma_reg_write(context, QPC_HOPLIMIT, attr->hop_limit); + udma_reg_clear(context_mask, QPC_HOPLIMIT); + } } static int udma_set_abs_fields(struct udma_qp *qp, - const struct udma_modify_tp_attr *attr, + struct udma_modify_tp_attr *attr, enum udma_qp_state curr_state, enum udma_qp_state new_state, struct udma_qp_context *context, @@ -880,7 +885,7 @@ static void udma_set_um_attr(struct udma_qp *qp, } int udma_modify_qp_common(struct udma_qp *qp, - const struct ubcore_tp_attr *attr, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask ubcore_mask, enum udma_qp_state curr_state, enum udma_qp_state new_state) @@ -956,7 +961,6 @@ int fill_jfs_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, qp_attr->cap.max_send_wr = jfs->jfs_cfg.depth; qp_attr->cap.max_send_sge = jfs->jfs_cfg.max_sge; qp_attr->cap.max_inline_data = jfs->jfs_cfg.max_inline_data; - qp_attr->cap.retry_cnt = jfs->jfs_cfg.retry_cnt; qp_attr->cap.rnr_retry = jfs->jfs_cfg.rnr_retry; qp_attr->cap.ack_timeout = jfs->jfs_cfg.err_timeout; qp_attr->qp_type = QPT_RC; @@ -1019,7 +1023,7 @@ int fill_jetty_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, udma_jetty = (struct udma_jetty *)xa_load(&udma_dev->jetty_table.xa, jetty_id); if (IS_ERR_OR_NULL(udma_jetty)) { - dev_err(udma_dev->dev, "failed to find jetty\n"); + dev_err(udma_dev->dev, "failed to find jetty, id = %u.\n", jetty_id); return -EINVAL; } @@ -1030,6 +1034,7 @@ int fill_jetty_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, } qp_attr->jetty = udma_jetty; + qp_attr->tp_mode = udma_jetty->tp_mode; if (!qp_attr->is_tgt || udma_jetty->tp_mode == UBCORE_TP_RC) { qp_attr->uctx = jetty->uctx; qp_attr->qpn_map = &udma_jetty->qpn_map; @@ -1059,7 +1064,6 @@ int fill_jetty_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, qp_attr->cap.min_rnr_timer = udma_jetty->udma_jfr->ubcore_jfr.jfr_cfg.min_rnr_timer; - qp_attr->cap.retry_cnt = jetty->jetty_cfg.retry_cnt; qp_attr->cap.ack_timeout = jetty->jetty_cfg.err_timeout; qp_attr->cap.rnr_retry = jetty->jetty_cfg.rnr_retry; @@ -1067,42 +1071,47 @@ int fill_jetty_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, } int udma_fill_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, - const struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata) + struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata) { - bool is_target = udata->uctx == NULL ? true : false; + bool is_target = cfg->flag.bs.target; struct udma_create_tp_ucmd ucmd; struct udma_ucontext *udma_ctx; - int status = 0; + int status, eid_index; if (!udata) return 0; - if (!is_target) { - status = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(ucmd))); - if (status) { - dev_err(udma_dev->dev, "failed to copy create tp ucmd\n"); - return status; - } - } else { - memcpy(&ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, (uint32_t)sizeof(ucmd))); + status = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, + min(udata->udrv_data->in_len, + (uint32_t)sizeof(ucmd))); + if (status) { + dev_err(udma_dev->dev, + "failed to copy create tp ucmd, status = %d.\n", status); + return status; } qp_attr->is_tgt = is_target; qp_attr->is_jetty = ucmd.is_jetty; qp_attr->remote_eid = cfg->peer_eid; qp_attr->local_eid = cfg->local_eid; - udma_ctx = to_udma_ucontext(udata->uctx); if (!is_target) { + udma_ctx = to_udma_ucontext(udata->uctx); qp_attr->pdn = udma_ctx->pdn; + qp_attr->eid_index = udma_ctx->eid_index; if (!ucmd.is_jetty) return fill_jfs_qp_attr(udma_dev, qp_attr, &ucmd); else return fill_jetty_qp_attr(udma_dev, qp_attr, &ucmd); } else { + eid_index = udma_find_eid_idx(udma_dev, cfg->local_eid); + if (eid_index < 0) { + dev_err(udma_dev->dev, + "Failed to find eid index, eid = "EID_FMT".\n", + EID_ARGS(cfg->local_eid)); + return -EINVAL; + } + qp_attr->eid_index = eid_index; if (!ucmd.is_jetty) return fill_jfr_qp_attr(udma_dev, qp_attr, &ucmd); else @@ -1185,7 +1194,7 @@ static int set_user_sq_size(struct udma_dev *udma_dev, struct udma_qp *qp, static bool is_rc_jetty(struct udma_qp_attr *qp_attr) { if (qp_attr->is_jetty && qp_attr->jetty && - qp_attr->jetty->tp_mode == UBCORE_TP_RC) + qp_attr->tp_mode == UBCORE_TP_RC) return true; return false; @@ -1202,7 +1211,6 @@ static int set_qp_param(struct udma_dev *udma_dev, struct udma_qp *qp, qp->qp_type = qp_attr->qp_type; if (!qp_attr->is_tgt) { - qp->retry_cnt = qp_attr->cap.retry_cnt; qp->ack_timeout = qp_attr->cap.ack_timeout; qp->rnr_retry = qp_attr->cap.rnr_retry; if (qp_attr->is_jetty) @@ -1211,7 +1219,6 @@ static int set_qp_param(struct udma_dev *udma_dev, struct udma_qp *qp, } else { qp->min_rnr_timer = qp_attr->cap.min_rnr_timer; if (qp_attr->is_jetty) { - qp->retry_cnt = qp_attr->cap.retry_cnt; qp->ack_timeout = qp_attr->cap.ack_timeout; qp->rnr_retry = qp_attr->cap.rnr_retry; qp->priority = qp_attr->priority; @@ -1225,7 +1232,7 @@ static int set_qp_param(struct udma_dev *udma_dev, struct udma_qp *qp, set_rq_size(qp, &qp_attr->cap); - if (udata && udata->uctx != NULL) { + if (!qp_attr->is_tgt) { ret = copy_from_user(ucmd, (void *)udata->udrv_data->in_addr, min(udata->udrv_data->in_len, (uint32_t)sizeof(struct udma_create_tp_ucmd))); @@ -1251,14 +1258,25 @@ static int set_qp_param(struct udma_dev *udma_dev, struct udma_qp *qp, return ret; } -static uint8_t get_least_load_bankid_for_qp(struct udma_bank *bank) +static uint8_t get_least_load_bankid_for_qp(struct udma_bank *bank, + struct udma_qp_attr *attr, + bool is_target) { - uint32_t least_load = bank[0].inuse; + uint32_t least_load = UDMA_INVALID_LOAD_QPNUM; + struct udma_jfc *jfc; uint8_t bankid = 0; uint32_t bankcnt; uint8_t i; - for (i = 1; i < UDMA_QP_BANK_NUM; i++) { + if (!is_target) + jfc = attr->send_jfc; + else + jfc = attr->recv_jfc; + + for (i = 0; i < UDMA_QP_BANK_NUM; ++i) { + if (jfc && (get_affinity_cq_bank(i) != (jfc->cqn & CQ_BANKID_MASK))) + continue; + bankcnt = bank[i].inuse; if (bankcnt < least_load) { least_load = bankcnt; @@ -1291,9 +1309,10 @@ static int alloc_qpn_with_bankid(struct udma_bank *bank, uint8_t bankid, return 0; } -static int alloc_qpn(struct udma_dev *udma_dev, struct udma_qp *qp) +static int alloc_qpn(struct udma_dev *udma_dev, struct udma_qp *qp, bool is_target) { struct udma_qpn_bitmap *qpn_map = qp->qp_attr.qpn_map; + struct udma_qp_attr *attr = &qp->qp_attr; struct device *dev = udma_dev->dev; uint64_t num = 0; uint8_t bankid; @@ -1304,7 +1323,7 @@ static int alloc_qpn(struct udma_dev *udma_dev, struct udma_qp *qp) qpn_map->jid << qpn_map->qpn_shift, 0); } else { mutex_lock(&qpn_map->bank_mutex); - bankid = get_least_load_bankid_for_qp(qpn_map->bank); + bankid = get_least_load_bankid_for_qp(qpn_map->bank, attr, is_target); ret = alloc_qpn_with_bankid(&qpn_map->bank[bankid], bankid, &num); if (ret) { @@ -1717,7 +1736,6 @@ static void free_qpc(struct udma_dev *udma_dev, struct udma_qp *qp) if (udma_dev->caps.flags & UDMA_CAP_FLAG_QP_FLOW_CTRL) { udma_table_put(udma_dev, &qp_table->sccc_table, qp->qpn); - if (qp->dip_idx != qp->qpn && qp->dip_idx >= 0) udma_table_put(udma_dev, &qp_table->sccc_table, qp->dip_idx); @@ -1826,6 +1844,47 @@ static uint32_t udma_get_jetty_qpn(struct udma_qp *qp) return qpn; } +static int udma_alloc_qp_sq(struct udma_dev *udma_dev, struct udma_qp *qp, + struct ubcore_udata *udata, + struct udma_create_tp_ucmd *ucmd) +{ + struct udma_qp_attr *qp_attr = &qp->qp_attr; + int ret = 0; + + if (is_rc_jetty(qp_attr)) { + qp->sdb = qp_attr->jetty->rc_node.sdb; + qp->en_flags |= UDMA_QP_CAP_SQ_RECORD_DB; + qp->dca_ctx = &qp_attr->jetty->rc_node.context->dca_ctx; + if (qp_attr->jetty->rc_node.buf_addr) { + qp->mtr = qp_attr->jetty->rc_node.mtr; + } else { + ret = alloc_qp_wqe(udma_dev, qp, qp_attr->jetty->rc_node.buf_addr); + if (ret) + dev_err(udma_dev->dev, + "failed to alloc QP buffer, ret = %d.\n", + ret); + } + } else { + ret = alloc_qp_wqe(udma_dev, qp, ucmd->buf_addr); + if (ret) { + dev_err(udma_dev->dev, + "failed to alloc QP buffer, ret = %d.\n", + ret); + goto out; + } + ret = alloc_qp_db(udma_dev, qp, udata, ucmd); + if (ret) { + dev_err(udma_dev->dev, + "failed to alloc QP doorbell, ret = %d.\n", + ret); + free_qp_db(udma_dev, qp); + } + } + +out: + return ret; +} + int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, struct ubcore_udata *udata) { @@ -1835,12 +1894,12 @@ int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, struct device *dev = udma_dev->dev; struct udma_create_tp_ucmd ucmd; struct udma_create_tp_resp resp; - bool udma_alloc_sq_flag = false; int ret; qp->state = QPS_RESET; qp->dip_idx = UDMA_SCC_DIP_INVALID_IDX; - qp->dca_ctx = udata->uctx ? &uctx->dca_ctx : qp->dca_ctx; + if (!qp_attr->is_tgt) + qp->dca_ctx = &uctx->dca_ctx; ret = set_qp_param(udma_dev, qp, udata, &ucmd); if (ret) { @@ -1848,7 +1907,7 @@ int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, return ret; } - ret = alloc_qpn(udma_dev, qp); + ret = alloc_qpn(udma_dev, qp, !udata->uctx); if (ret) { dev_err(dev, "failed to alloc QPN, ret = %d.\n", ret); goto err_qpn; @@ -1860,27 +1919,10 @@ int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, } if (udma_qp_need_alloc_sq(qp_attr)) { - udma_alloc_sq_flag = is_rc_jetty(qp_attr) && (ucmd.buf_addr || qp_attr->is_tgt); - if (udma_alloc_sq_flag) { - qp->mtr = qp_attr->jetty->rc_node.mtr; - qp->sdb = qp_attr->jetty->rc_node.sdb; - qp->en_flags |= UDMA_QP_CAP_SQ_RECORD_DB; - } else { - ret = alloc_qp_wqe(udma_dev, qp, ucmd.buf_addr); - if (ret) { - dev_err(dev, - "failed to alloc QP buffer, ret = %d.\n", - ret); - goto err_buf; - } - - ret = alloc_qp_db(udma_dev, qp, udata, &ucmd); - if (ret) { - dev_err(dev, - "failed to alloc QP doorbell, ret = %d.\n", - ret); - goto err_db; - } + ret = udma_alloc_qp_sq(udma_dev, qp, udata, &ucmd); + if (ret) { + dev_err(dev, "failed to alloc QP sq, ret = %d.\n", ret); + goto err_sq; } } @@ -1897,7 +1939,7 @@ int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, goto err_store; } - if (udata && udata->uctx) { + if (!qp_attr->is_tgt) { resp.cap_flags = qp->en_flags; resp.qpn = qp->qpn; resp.priority = qp->priority; @@ -1934,12 +1976,11 @@ int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, err_store: free_qpc(udma_dev, qp); err_qpc: - if (udma_qp_need_alloc_sq(&qp->qp_attr)) + if (udma_qp_need_alloc_sq(&qp->qp_attr) && !is_rc_jetty(qp_attr)) { free_qp_db(udma_dev, qp); -err_db: - if (udma_qp_need_alloc_sq(&qp->qp_attr)) free_qp_wqe(udma_dev, qp); -err_buf: + } +err_sq: free_qpn(qp); err_qpn: return ret; diff --git a/drivers/ub/hw/hns3/hns3_udma_qp.h b/drivers/ub/hw/hns3/hns3_udma_qp.h index 24a1d8a2e50a..35c1a5999682 100644 --- a/drivers/ub/hw/hns3/hns3_udma_qp.h +++ b/drivers/ub/hw/hns3/hns3_udma_qp.h @@ -61,6 +61,7 @@ struct udma_qp_context { #define QPC_SQ_SHIFT QPC_FIELD_LOC(139, 136) #define QPC_GMV_IDX QPC_FIELD_LOC(159, 144) #define QPC_HOPLIMIT QPC_FIELD_LOC(167, 160) +#define QPC_DSCP QPC_FIELD_LOC(172, 168) #define QPC_VLAN_ID QPC_FIELD_LOC(187, 176) #define QPC_MTU QPC_FIELD_LOC(191, 188) #define QPC_FL QPC_FIELD_LOC(211, 192) @@ -68,6 +69,7 @@ struct udma_qp_context { #define QPC_AT QPC_FIELD_LOC(223, 219) #define QPC_DMAC_L QPC_FIELD_LOC(383, 352) #define QPC_DMAC_H QPC_FIELD_LOC(399, 384) +#define QPC_UDPSPN QPC_FIELD_LOC(415, 400) #define QPC_DQPN QPC_FIELD_LOC(439, 416) #define QPC_LP_PKTN_INI QPC_FIELD_LOC(447, 444) #define QPC_CONGEST_ALGO_TMPL_ID QPC_FIELD_LOC(455, 448) @@ -165,8 +167,6 @@ struct udma_modify_tp_attr { int qp_access_flags; uint8_t min_rnr_timer; uint32_t qkey; - enum ubcore_mtu path_mtu; - uint8_t hop_limit; uint8_t dgid[UDMA_GID_SIZE]; uint8_t dipv4[4]; uint8_t sgid_index; @@ -218,6 +218,8 @@ struct udma_qp_attr { union ubcore_eid local_eid; int tgt_id; uint8_t priority; + uint32_t eid_index; + enum ubcore_transport_mode tp_mode; }; struct udma_wq { @@ -258,11 +260,13 @@ struct udma_qp { struct udma_jfc *send_jfc; struct udma_jfc *recv_jfc; uint64_t en_flags; + enum udma_sig_type sq_signal_bits; struct udma_mtr mtr; struct udma_dca_cfg dca_cfg; struct udma_dca_ctx *dca_ctx; uint32_t buff_size; enum udma_qp_state state; + uint32_t atomic_rd_en; void (*event)(struct udma_qp *qp, enum udma_event event_type); uint64_t qpn; @@ -277,7 +281,6 @@ struct udma_qp { struct list_head node; /* all qps are on a list */ struct list_head rq_node; /* all recv qps are on a list */ struct list_head sq_node; /* all send qps are on a list */ - uint8_t retry_cnt; uint8_t rnr_retry; uint8_t ack_timeout; uint8_t min_rnr_timer; @@ -302,6 +305,8 @@ struct udma_dip { struct list_head node; /* all dips are on a list */ }; +#define UDMA_INVALID_LOAD_QPNUM 0xFFFFFFFF + #define UDMA_CONGEST_SIZE 64 #define UDMA_SCC_DIP_INVALID_IDX (-1) @@ -327,20 +332,30 @@ enum { DIP_VALID, }; +enum { + SUB_ALG_LDCP, + SUB_ALG_HC3, +}; + enum { WND_LIMIT, WND_UNLIMIT, }; +enum { + QP_IS_USER = 1 << 0, + QP_DCA_EN = 1 << 1, +}; + #define gen_qpn(high, mid, low) ((high) | (mid) | (low)) int udma_modify_qp_common(struct udma_qp *qp, - const struct ubcore_tp_attr *attr, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask ubcore_mask, enum udma_qp_state curr_state, enum udma_qp_state new_state); int udma_fill_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, - const struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata); + struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata); int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, struct ubcore_udata *udata); void udma_destroy_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp); @@ -355,4 +370,9 @@ void udma_qp_event(struct udma_dev *udma_dev, uint32_t qpn, int event_type); void copy_send_jfc(struct udma_qp *from_qp, struct udma_qp *to_qp); int udma_set_dca_buf(struct udma_dev *dev, struct udma_qp *qp); +static inline uint8_t get_affinity_cq_bank(uint8_t qp_bank) +{ + return (qp_bank >> 1) & CQ_BANKID_MASK; +} + #endif /* _UDMA_QP_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_segment.c b/drivers/ub/hw/hns3/hns3_udma_segment.c index 817bea1a83fd..2ec399422877 100644 --- a/drivers/ub/hw/hns3/hns3_udma_segment.c +++ b/drivers/ub/hw/hns3/hns3_udma_segment.c @@ -18,6 +18,7 @@ #include "hns3_udma_hem.h" #include "hns3_udma_cmd.h" #include "hns3_udma_dfx.h" +#include "hns3_udma_eid.h" #include "hns3_udma_segment.h" static uint32_t hw_index_to_key(int ind) @@ -236,6 +237,7 @@ static void free_seg_key(struct udma_dev *udma_dev, struct udma_seg *seg) static void store_seg_id(struct udma_dev *udma_dev, struct udma_seg *seg) { + struct udma_eid *udma_eid; struct seg_list *seg_new; struct seg_list *seg_now; unsigned long flags; @@ -247,6 +249,14 @@ static void store_seg_id(struct udma_dev *udma_dev, struct udma_seg *seg) if (ret) return; + udma_eid = (struct udma_eid *)xa_load(&udma_dev->eid_table, + seg->ctx->eid_index); + if (IS_ERR_OR_NULL(udma_eid)) { + dev_err(udma_dev->dev, "Failed to find eid, index = %d\n.", + seg->ctx->eid_index); + return; + } + seg_new = kzalloc(sizeof(struct seg_list), GFP_KERNEL); if (seg_new == NULL) return; @@ -256,6 +266,8 @@ static void store_seg_id(struct udma_dev *udma_dev, struct udma_seg *seg) list_for_each_entry(seg_now, &g_udma_dfx_list[i].dfx->seg_list->node, node) { if (seg_now->key_id == seg->key) { + memcpy(&seg_now->eid, &udma_eid->eid, + sizeof(union ubcore_eid)); seg_now->pd = seg->pd; seg_now->iova = seg->iova; seg_now->len = seg->size; @@ -263,6 +275,7 @@ static void store_seg_id(struct udma_dev *udma_dev, struct udma_seg *seg) } } + memcpy(&seg_new->eid, &udma_eid->eid, sizeof(union ubcore_eid)); seg_new->pd = seg->pd; seg_new->iova = seg->iova; seg_new->len = seg->size; @@ -305,8 +318,8 @@ static void delete_seg_id(struct udma_dev *udma_dev, struct udma_seg *seg) } struct ubcore_target_seg *udma_register_seg(struct ubcore_device *dev, - const struct ubcore_seg_cfg *cfg, - struct ubcore_udata *udata) + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); struct udma_ucontext *udma_ctx; @@ -328,6 +341,7 @@ struct ubcore_target_seg *udma_register_seg(struct ubcore_device *dev, seg->size = cfg->len; seg->pd = udma_ctx->pdn; seg->access = cfg->flag.bs.access; + seg->ctx = udma_ctx; ret = alloc_seg_key(udma_dev, seg); if (ret) @@ -341,7 +355,7 @@ struct ubcore_target_seg *udma_register_seg(struct ubcore_device *dev, if (ret) goto err_enable_seg; seg->enabled = 1; - seg->ubcore_seg.seg.key_id = seg->key; + seg->ubcore_seg.seg.token_id = seg->key; if (dfx_switch) store_seg_id(udma_dev, seg); @@ -389,8 +403,8 @@ int udma_unregister_seg(struct ubcore_target_seg *seg) } struct ubcore_target_seg *udma_import_seg(struct ubcore_device *dev, - const struct ubcore_target_seg_cfg *cfg, - struct ubcore_udata *udata) + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata) { struct ubcore_target_seg *tseg; diff --git a/drivers/ub/hw/hns3/hns3_udma_segment.h b/drivers/ub/hw/hns3/hns3_udma_segment.h index c97ab98b57dd..b3d217543975 100644 --- a/drivers/ub/hw/hns3/hns3_udma_segment.h +++ b/drivers/ub/hw/hns3/hns3_udma_segment.h @@ -76,12 +76,12 @@ struct udma_mpt_entry { #define UDMA_MAX_INNER_MTPT_NUM 2 struct ubcore_target_seg *udma_register_seg(struct ubcore_device *dev, - const struct ubcore_seg_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); int udma_unregister_seg(struct ubcore_target_seg *seg); struct ubcore_target_seg *udma_import_seg(struct ubcore_device *dev, - const struct ubcore_target_seg_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); int udma_unimport_seg(struct ubcore_target_seg *tseg); uint64_t key_to_hw_index(uint32_t key); diff --git a/drivers/ub/hw/hns3/hns3_udma_sysfs.c b/drivers/ub/hw/hns3/hns3_udma_sysfs.c index 0d4e3ac373e7..9dfbb8a8dfff 100644 --- a/drivers/ub/hw/hns3/hns3_udma_sysfs.c +++ b/drivers/ub/hw/hns3/hns3_udma_sysfs.c @@ -85,7 +85,7 @@ static int udma_query_scc_param(struct udma_dev *udma_dev, ret = udma_cmq_send(udma_dev, &desc, 1); if (ret) { dev_err_ratelimited(udma_dev->dev, - "failed to query scc param, opecode: 0x%x, ret = %d.\n", + "failed to query scc param, opcode: 0x%x, ret = %d.\n", le16_to_cpu(desc.opcode), ret); return ret; } @@ -442,7 +442,13 @@ static ssize_t udma_port_attr_store(struct kobject *kobj, static void udma_port_release(struct kobject *kobj) { - struct udma_port *pdata = container_of(kobj, struct udma_port, kobj); + struct udma_port *pdata; + int i; + + pdata = container_of(kobj, struct udma_port, kobj); + + for (i = 0; i < UDMA_CONG_TYPE_TOTAL; i++) + cancel_delayed_work_sync(&pdata->scc_param[i].scc_cfg_dwork); kfree(pdata->scc_param); pdata->scc_param = NULL; diff --git a/drivers/ub/hw/hns3/hns3_udma_tp.c b/drivers/ub/hw/hns3/hns3_udma_tp.c index ef2eb335fb87..2242507df2b0 100644 --- a/drivers/ub/hw/hns3/hns3_udma_tp.c +++ b/drivers/ub/hw/hns3/hns3_udma_tp.c @@ -36,7 +36,7 @@ static enum udma_qp_state to_udma_qp_state(enum ubcore_tp_state state) return QPS_RTR; case UBCORE_TP_STATE_RTS: return QPS_RTS; - case UBCORE_TP_STATE_ERROR: + case UBCORE_TP_STATE_ERR: return QPS_ERR; default: return QPS_ERR; @@ -44,7 +44,7 @@ static enum udma_qp_state to_udma_qp_state(enum ubcore_tp_state state) } struct udma_modify_tp_attr *udma_get_m_attr(struct ubcore_tp *tp, struct udma_qp *qp, - const struct ubcore_tp_attr *attr, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask mask) { struct udma_modify_tp_attr *m_attr; @@ -57,15 +57,14 @@ struct udma_modify_tp_attr *udma_get_m_attr(struct ubcore_tp *tp, struct udma_qp m_attr->data_udp_start = tp->data_udp_start; m_attr->ack_udp_start = tp->ack_udp_start; m_attr->udp_range = tp->udp_range; - m_attr->hop_limit = MAX_HOP_LIMIT; - m_attr->sgid_index = 0; + m_attr->sgid_index = qp->qp_attr.eid_index; *(uint32_t *)(&m_attr->dipv4) = *(uint32_t *)(tp->peer_eid.raw + SGID_H_SHIFT); memcpy(m_attr->dgid, tp->peer_eid.raw, sizeof(tp->peer_eid.raw)); if (!qp->qp_attr.is_tgt) { - m_attr->retry_cnt = qp->retry_cnt; - m_attr->ack_timeout = qp->ack_timeout; + m_attr->retry_cnt = tp->retry_num; + m_attr->ack_timeout = tp->ack_timeout; m_attr->rnr_retry = qp->rnr_retry; m_attr->priority = qp->priority; if (qp->qp_attr.is_jetty) @@ -73,8 +72,8 @@ struct udma_modify_tp_attr *udma_get_m_attr(struct ubcore_tp *tp, struct udma_qp } else { m_attr->min_rnr_timer = qp->min_rnr_timer; if (qp->qp_attr.is_jetty) { - m_attr->retry_cnt = qp->retry_cnt; - m_attr->ack_timeout = qp->ack_timeout; + m_attr->retry_cnt = tp->retry_num; + m_attr->ack_timeout = tp->ack_timeout; m_attr->rnr_retry = qp->rnr_retry; m_attr->priority = qp->priority; } @@ -83,7 +82,7 @@ struct udma_modify_tp_attr *udma_get_m_attr(struct ubcore_tp *tp, struct udma_qp return m_attr; } -int udma_modify_tp(struct ubcore_tp *tp, const struct ubcore_tp_attr *attr, +int udma_modify_tp(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask mask) { struct udma_modify_tp_attr *m_attr; @@ -120,6 +119,7 @@ int udma_modify_tp(struct ubcore_tp *tp, const struct ubcore_tp_attr *attr, qp->ubcore_path_mtu = attr->mtu; ret = udma_modify_qp_common(qp, attr, mask, curr_state, target_state); kfree(m_attr); + qp->m_attr = NULL; error: return ret; } @@ -232,7 +232,6 @@ static void delete_tpn(struct udma_dev *udma_device, struct ubcore_tp *tp) spin_unlock_irqrestore(lock, flags); } - int udma_destroy_tp(struct ubcore_tp *tp) { struct udma_dev *udma_device = to_udma_dev(tp->ub_dev); @@ -298,7 +297,7 @@ static void udma_set_tp(struct ubcore_device *dev, const struct ubcore_tp_cfg *c tp->ubcore_tp.udp_range = cfg->udp_range; tp->ubcore_tp.retry_num = cfg->retry_num; tp->ubcore_tp.ack_timeout = cfg->ack_timeout; - tp->ubcore_tp.tc = cfg->tc; + tp->ubcore_tp.dscp = cfg->dscp; tp->ubcore_tp.state = UBCORE_TP_STATE_RESET; } @@ -444,8 +443,8 @@ static void unlock_jetty(struct udma_qp_attr *qp_attr) mutex_unlock(&jetty->tp_mutex); } -struct ubcore_tp *udma_create_tp(struct ubcore_device *dev, const struct ubcore_tp_cfg *cfg, - struct ubcore_udata *udata) +struct ubcore_tp *udma_create_tp(struct ubcore_device *dev, struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); struct ubcore_tp *fail_ret_tp = NULL; diff --git a/drivers/ub/hw/hns3/hns3_udma_tp.h b/drivers/ub/hw/hns3/hns3_udma_tp.h index 84781db93ad7..77e490eaf3f9 100644 --- a/drivers/ub/hw/hns3/hns3_udma_tp.h +++ b/drivers/ub/hw/hns3/hns3_udma_tp.h @@ -19,10 +19,8 @@ #include #include "hns3_udma_qp.h" -#define MAX_HOP_LIMIT 255 - struct udma_tp { - struct ubcore_tp ubcore_tp; + struct ubcore_tp ubcore_tp; struct udma_qp qp; struct ubcore_jetty_id tjetty_id; }; @@ -32,16 +30,16 @@ static inline struct udma_tp *to_udma_tp(struct ubcore_tp *ubcore_tp) return container_of(ubcore_tp, struct udma_tp, ubcore_tp); } -static inline uint32_t udma_get_jetty_hash(const struct ubcore_jetty_id *jetty_id) +static inline uint32_t udma_get_jetty_hash(struct ubcore_jetty_id *jetty_id) { return jhash(jetty_id, sizeof(struct ubcore_jetty_id), 0); } struct ubcore_tp *udma_create_tp(struct ubcore_device *dev, - const struct ubcore_tp_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_tp(struct ubcore_tp *tp); -int udma_modify_tp(struct ubcore_tp *tp, const struct ubcore_tp_attr *attr, +int udma_modify_tp(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask mask); struct udma_qp *get_qp(struct udma_dev *udma_device, uint32_t qpn); diff --git a/drivers/ub/urma/ubcore/Makefile b/drivers/ub/urma/ubcore/Makefile index 21242a76024c..51ab913357fc 100644 --- a/drivers/ub/urma/ubcore/Makefile +++ b/drivers/ub/urma/ubcore/Makefile @@ -12,6 +12,13 @@ ubcore-objs := ubcore_main.o \ ubcore_tp.o \ ubcore_tp_table.o \ ubcore_netlink.o \ - ubcore_dp.o + ubcore_dp.o \ + ubcore_ctp.o \ + ubcore_msg.o \ + ubcore_netdev.o \ + ubcore_tpg.o \ + ubcore_utp.o \ + ubcore_uvs_cmd.o \ + ubcore_vtp.o obj-$(CONFIG_UB) += ubcore.o diff --git a/drivers/ub/urma/ubcore/ubcore_cmd.h b/drivers/ub/urma/ubcore/ubcore_cmd.h index 7e8e49a09d73..5e947c65aaa1 100644 --- a/drivers/ub/urma/ubcore/ubcore_cmd.h +++ b/drivers/ub/urma/ubcore/ubcore_cmd.h @@ -34,40 +34,24 @@ struct ubcore_cmd_hdr { #define UBCORE_CMD_MAGIC 'C' #define UBCORE_CMD _IOWR(UBCORE_CMD_MAGIC, 1, struct ubcore_cmd_hdr) -#define UBCORE_MAX_CMD_SIZE 4096 +#define UBCORE_MAX_CMD_SIZE 8192 #define UBCORE_CMD_EID_SIZE 16 +#define UBCORE_CMD_DEV_MAX 64 /* only for ubcore device ioctl */ enum ubcore_cmd { - UBCORE_CMD_SET_UASID = 1, - UBCORE_CMD_PUT_UASID, - UBCORE_CMD_SET_UTP, + UBCORE_CMD_SET_UTP = 1, UBCORE_CMD_SHOW_UTP, UBCORE_CMD_QUERY_STATS, - UBCORE_CMD_QUERY_RES -}; - -struct ubcore_cmd_set_uasid { - struct { - uint64_t token; - uint32_t uasid; - } in; - struct { - uint32_t uasid; - } out; -}; - -struct ubcore_cmd_put_uasid { - struct { - uint32_t uasid; - } in; + UBCORE_CMD_QUERY_RES, + UBCORE_CMD_ADD_EID, + UBCORE_CMD_DEL_EID, + UBCORE_CMD_SET_EID_MODE }; struct ubcore_cmd_query_stats { struct { char dev_name[UBCORE_MAX_DEV_NAME]; - uint8_t eid[UBCORE_CMD_EID_SIZE]; - uint32_t tp_type; uint32_t type; uint32_t key; } in; @@ -84,10 +68,10 @@ struct ubcore_cmd_query_stats { struct ubcore_cmd_query_res { struct { char dev_name[UBCORE_MAX_DEV_NAME]; - uint8_t eid[UBCORE_CMD_EID_SIZE]; - uint32_t tp_type; uint32_t type; uint32_t key; + uint32_t key_ext; + uint32_t key_cnt; } in; struct { uint64_t addr; @@ -98,8 +82,7 @@ struct ubcore_cmd_query_res { struct ubcore_cmd_set_utp { struct { char dev_name[UBCORE_MAX_DEV_NAME]; - uint8_t eid[UBCORE_CMD_EID_SIZE]; - uint32_t transport_type; + uint8_t utp_id; bool spray_en; uint16_t data_udp_start; uint8_t udp_range; @@ -109,16 +92,34 @@ struct ubcore_cmd_set_utp { struct ubcore_cmd_show_utp { struct { char dev_name[UBCORE_MAX_DEV_NAME]; - uint8_t eid[UBCORE_CMD_EID_SIZE]; - uint32_t transport_type; + uint8_t utp_id; + } in; + struct { + uint64_t addr; + uint32_t len; + } out; +}; + +struct ubcore_cmd_add_ueid { + struct { + char dev_name[UBCORE_CMD_DEV_MAX]; + uint32_t eid_index; + } in; +}; + +struct ubcore_cmd_set_eid_mode { + struct { + char dev_name[UBCORE_CMD_DEV_MAX]; + bool eid_mode; } in; }; /* copy from user_space addr to kernel args */ static inline int ubcore_copy_from_user(void *args, const void *args_addr, unsigned long args_size) { - int ret = (int)copy_from_user(args, args_addr, args_size); + int ret; + ret = (int)copy_from_user(args, args_addr, args_size); if (ret != 0) ubcore_log_err("copy from user failed, ret:%d.\n", ret); return ret; @@ -127,8 +128,9 @@ static inline int ubcore_copy_from_user(void *args, const void *args_addr, unsig /* copy kernel args to user_space addr */ static inline int ubcore_copy_to_user(void *args_addr, const void *args, unsigned long args_size) { - int ret = (int)copy_to_user(args_addr, args, args_size); + int ret; + ret = (int)copy_to_user(args_addr, args, args_size); if (ret != 0) ubcore_log_err("copy to user failed ret:%d.\n", ret); return ret; diff --git a/drivers/ub/urma/ubcore/ubcore_ctp.c b/drivers/ub/urma/ubcore/ubcore_ctp.c new file mode 100644 index 000000000000..82e2bd26fef0 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_ctp.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore ctp implementation + * Author: Xu Zhicong + * Create: 2023-10-12 + * Note: + * History: 2023-10-12: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_ctp.h" + +struct ubcore_ctp *ubcore_create_ctp(struct ubcore_device *dev, struct ubcore_ctp_cfg *cfg) +{ + struct ubcore_ctp *ctp; + int ret; + + if (dev->ops == NULL || dev->ops->create_ctp == NULL) + return NULL; + + ctp = dev->ops->create_ctp(dev, cfg, NULL); + if (ctp == NULL) { + ubcore_log_err("Failed to create ctp"); + return NULL; + } + ctp->ub_dev = dev; + ctp->ctp_cfg = *cfg; + atomic_set(&ctp->use_cnt, 1); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_CTP], &ctp->hnode, ctp->ctpn); + if (ret != 0) { + (void)dev->ops->destroy_ctp(ctp); + ctp = NULL; + ubcore_log_err("Failed to add ctp to the ctp table"); + return ctp; + } + + ubcore_log_info("Success to create ctp, ctp_idx %u", ctp->ctpn); + return ctp; +} + +int ubcore_destroy_ctp(struct ubcore_ctp *ctp) +{ + struct ubcore_device *dev = ctp->ub_dev; + uint32_t ctp_idx = ctp->ctpn; + int ret; + + if (dev->ops == NULL || dev->ops->destroy_ctp == NULL) + return -EINVAL; + + if (atomic_dec_return(&ctp->use_cnt) > 0) { + ubcore_log_err("ctp in use"); + return -EBUSY; + } + + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_CTP], &ctp->hnode); + + ret = dev->ops->destroy_ctp(ctp); + if (ret != 0) { + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_CTP], &ctp->hnode, ctp->ctpn); + /* inc ctp use cnt? */ + ubcore_log_err("Failed to destroy ctp"); + return ret; + } + + ubcore_log_info("Success to destroy ctp, ctp_idx %u", ctp_idx); + return ret; +} + +struct ubcore_ctp *ubcore_find_ctp(struct ubcore_device *dev, uint32_t idx) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_CTP], idx, &idx); +} diff --git a/drivers/ub/urma/ubcore/ubcore_ctp.h b/drivers/ub/urma/ubcore/ubcore_ctp.h new file mode 100644 index 000000000000..a63cd01f25ae --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_ctp.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore ctp header + * Author: Xu Zhicong + * Create: 2023-10-12 + * Note: + * History: 2023-10-12: Create file + */ +#ifndef UBCORE_CTP_H +#define UBCORE_CTP_H + +#include + +struct ubcore_ctp *ubcore_create_ctp(struct ubcore_device *dev, struct ubcore_ctp_cfg *cfg); +int ubcore_destroy_ctp(struct ubcore_ctp *ctp); +struct ubcore_ctp *ubcore_find_ctp(struct ubcore_device *dev, uint32_t idx); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_device.c b/drivers/ub/urma/ubcore/ubcore_device.c index d2978665ea84..1229e77aa99f 100644 --- a/drivers/ub/urma/ubcore/ubcore_device.c +++ b/drivers/ub/urma/ubcore/ubcore_device.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "ubcore_log.h" #include @@ -35,6 +36,10 @@ #include "ubcore_hash_table.h" #include "ubcore_tp.h" #include "ubcore_tp_table.h" +#include "ubcore_msg.h" +#include "ubcore_netdev.h" +#include "ubcore_vtp.h" +#include "ubcore_netlink.h" static LIST_HEAD(g_client_list); static LIST_HEAD(g_device_list); @@ -48,8 +53,17 @@ static LIST_HEAD(g_device_list); */ static DEFINE_MUTEX(g_device_mutex); static DECLARE_RWSEM(g_lists_rwsem); +static struct ubcore_device *g_tpf; +static DEFINE_MUTEX(g_upi_lock); +static LIST_HEAD(g_upi_list); -void ubcore_set_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client, +struct ubcore_upi_entry { + struct ubcore_device *dev; + uint32_t upi; + struct list_head node; +}; + +void ubcore_set_client_ctx_data(struct ubcore_device *dev, struct ubcore_client *client, void *data) { struct ubcore_client_ctx *ctx; @@ -70,7 +84,7 @@ void ubcore_set_client_ctx_data(struct ubcore_device *dev, const struct ubcore_c } EXPORT_SYMBOL(ubcore_set_client_ctx_data); -void *ubcore_get_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client) +void *ubcore_get_client_ctx_data(struct ubcore_device *dev, struct ubcore_client *client) { struct ubcore_client_ctx *found_ctx = NULL; struct ubcore_client_ctx *ctx, *tmp; @@ -207,13 +221,34 @@ void ubcore_unregister_client(struct ubcore_client *rm_client) EXPORT_SYMBOL(ubcore_unregister_client); struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, enum ubcore_transport_type type) +{ + struct ubcore_device *dev, *target = NULL; + uint32_t idx; + + mutex_lock(&g_device_mutex); + list_for_each_entry(dev, &g_device_list, list_node) { + for (idx = 0; idx < dev->attr.max_eid_cnt; idx++) { + if (memcmp(&dev->eid_table.eid_entries[idx].eid, eid, + sizeof(union ubcore_eid)) == 0 && dev->transport_type == type) { + target = dev; + ubcore_get_device(target); + break; + } + } + if (target != NULL) + break; + } + mutex_unlock(&g_device_mutex); + return target; +} + +struct ubcore_device *ubcore_find_device_with_name(const char *dev_name) { struct ubcore_device *dev, *target = NULL; mutex_lock(&g_device_mutex); list_for_each_entry(dev, &g_device_list, list_node) { - if (memcmp(&dev->attr.eid, eid, sizeof(union ubcore_eid)) == 0 && - dev->transport_type == type) { + if (strcmp(dev->dev_name, dev_name) == 0) { target = dev; ubcore_get_device(target); break; @@ -223,22 +258,103 @@ struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, enum ubcore_tran return target; } -/* Find only, without get_device */ -static struct ubcore_device *ubcore_find_device_with_name(const char *dev_name) +struct ubcore_device *ubcore_find_device_with_eid_index(union ubcore_eid *eid, + enum ubcore_transport_type type, uint32_t eid_index) { struct ubcore_device *dev, *target = NULL; + int ret; mutex_lock(&g_device_mutex); list_for_each_entry(dev, &g_device_list, list_node) { - if (strcmp(dev->dev_name, dev_name) == 0) { + if (eid_index >= dev->attr.max_eid_cnt) + continue; + + /* use idx == eid_index to compare + * because eid list order is followed by device order + */ + /* so use the idx of device list is the same as use the idx of eid list */ + ret = memcmp(&dev->eid_table.eid_entries[eid_index].eid, + eid, sizeof(union ubcore_eid)); + if (ret == 0 && dev->transport_type == type) { target = dev; + ubcore_log_info( + "find dev:%s with eid = "EID_FMT", trans_type = %u, eid_idx = %u", + dev->dev_name, EID_ARGS(*eid), (uint32_t)type, eid_index + ); + ubcore_get_device(target); break; } + if (target != NULL) + break; } + if (target == NULL) + ubcore_log_warn( + "cannot find dev with eid = "EID_FMT", trans_type = %u, eid_idx = %u", + EID_ARGS(*eid), (uint32_t)type, eid_index + ); mutex_unlock(&g_device_mutex); return target; } +struct ubcore_device *ubcore_find_upi_with_dev_name(const char *dev_name, uint32_t *upi) +{ + struct ubcore_upi_entry *entry = NULL; + struct ubcore_device *dev = NULL; + + mutex_lock(&g_upi_lock); + list_for_each_entry(entry, &g_upi_list, node) { + if (entry != NULL && strcmp(entry->dev->dev_name, dev_name) == 0) { + *upi = entry->upi; + dev = entry->dev; + break; + } + } + mutex_unlock(&g_upi_lock); + return dev; +} + +int ubcore_add_upi_list(struct ubcore_device *dev, uint32_t upi) +{ + struct ubcore_upi_entry *entry, *new_entry; + + mutex_lock(&g_upi_lock); + list_for_each_entry(entry, &g_upi_list, node) { + if (entry != NULL && entry->dev == dev) { + entry->upi = upi; + mutex_unlock(&g_upi_lock); + return 0; + } + } + mutex_unlock(&g_upi_lock); + + new_entry = kzalloc(sizeof(struct ubcore_upi_entry), GFP_ATOMIC); + if (new_entry == NULL) + return -ENOMEM; + + new_entry->dev = dev; + new_entry->upi = upi; + + mutex_lock(&g_upi_lock); + list_add_tail(&new_entry->node, &g_upi_list); + mutex_unlock(&g_upi_lock); + ubcore_log_info("add dev_name: %s, upi: 0x%x to upi list\n", dev->dev_name, upi); + return 0; +} + +void ubcore_destroy_upi_list(void) +{ + struct ubcore_upi_entry *entry = NULL, *next; + + mutex_lock(&g_upi_lock); + list_for_each_entry_safe(entry, next, &g_upi_list, node) { + if (entry != NULL) { + list_del(&entry->node); + kfree(entry); + } + } + mutex_unlock(&g_upi_lock); +} + struct ubcore_device **ubcore_get_devices_from_netdev(struct net_device *netdev, uint32_t *cnt) { struct ubcore_device **devices; @@ -312,9 +428,55 @@ void ubcore_put_device(struct ubcore_device *dev) complete(&dev->comp); } +struct ubcore_device *ubcore_find_tpf_device(struct ubcore_net_addr *netaddr, + enum ubcore_transport_type type) +{ + if (g_tpf == NULL) + ubcore_log_err("tpf is not registered yet"); + + ubcore_get_device(g_tpf); + return g_tpf; +} + +int ubcore_tpf_device_set_global_cfg(struct ubcore_set_global_cfg *cfg) +{ + struct ubcore_device_cfg dev_cfg = {0}; + struct ubcore_device *dev = NULL; + int ret; + + if (cfg == NULL) { + ubcore_log_err("Invalid paramete"); + return -EINVAL; + } + + if (cfg->mask.bs.suspend_period == 1) { + dev_cfg.mask.bs.suspend_period = 1; + dev_cfg.suspend_period = cfg->suspend_period; + } + + if (cfg->mask.bs.suspend_cnt == 1) { + dev_cfg.mask.bs.suspend_cnt = 1; + dev_cfg.suspend_cnt = cfg->suspend_cnt; + } + + /* Query all existing TPF devices and configure */ + mutex_lock(&g_device_mutex); + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->attr.tp_maintainer == false) + continue; + + dev_cfg.fe_idx = dev->attr.fe_idx; + ret = ubcore_config_device(dev, &dev_cfg); + if (ret != 0) + ubcore_log_err("dev: %s set failed, ret: %d", dev->dev_name, ret); + } + mutex_unlock(&g_device_mutex); + return 0; +} + static struct ubcore_ht_param g_ht_params[] = { - [UBCORE_HT_JFS] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfs, hnode), - offsetof(struct ubcore_jfs, id), sizeof(uint32_t), NULL, NULL }, + [UBCORE_HT_JFS] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfs, hnode), + offsetof(struct ubcore_jfs, id), sizeof(uint32_t), NULL, NULL}, [UBCORE_HT_JFR] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfr, hnode), offsetof(struct ubcore_jfr, id), sizeof(uint32_t), NULL, NULL }, @@ -325,9 +487,38 @@ static struct ubcore_ht_param g_ht_params[] = { [UBCORE_HT_JETTY] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jetty, hnode), offsetof(struct ubcore_jetty, id), sizeof(uint32_t), NULL, NULL }, - [UBCORE_HT_TP] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_tp_node, hnode), - offsetof(struct ubcore_tp_node, key), sizeof(struct ubcore_tp_key), NULL, - NULL }, + [UBCORE_HT_TP] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_tp_node, hnode), + offsetof(struct ubcore_tp_node, key), sizeof(struct ubcore_tp_key), NULL, NULL}, + + [UBCORE_HT_TPG] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_tpg, hnode), + offsetof(struct ubcore_tpg, tpgn), sizeof(uint32_t), NULL, NULL}, + + /* key: seid + deid */ + [UBCORE_HT_RM_VTP] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_vtp, hnode), + offsetof(struct ubcore_vtp, cfg) + offsetof(struct ubcore_vtp_cfg, local_eid), + sizeof(union ubcore_eid) * 2, NULL, NULL}, + + /* key: deid + djetty */ + [UBCORE_HT_RC_VTP] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_vtp, hnode), + offsetof(struct ubcore_vtp, cfg) + offsetof(struct ubcore_vtp_cfg, peer_eid), + sizeof(union ubcore_eid) + sizeof(uint32_t), NULL, NULL}, + + /* key: seid + deid */ + [UBCORE_HT_UM_VTP] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_vtp, hnode), + offsetof(struct ubcore_vtp, cfg) + offsetof(struct ubcore_vtp_cfg, local_eid), + sizeof(union ubcore_eid) * 2, NULL, NULL}, + + /* key: src_eid + des_eid */ + [UBCORE_HT_VTPN] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_vtpn, hnode), + offsetof(struct ubcore_vtpn, local_eid), sizeof(union ubcore_eid) * 2, NULL, NULL}, + + /* key: utp idx */ + [UBCORE_HT_UTP] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_utp, hnode), + offsetof(struct ubcore_utp, utpn), sizeof(uint32_t), NULL, NULL}, + + /* key: ctp idx */ + [UBCORE_HT_CTP] = {UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_ctp, hnode), + offsetof(struct ubcore_ctp, ctpn), sizeof(uint32_t), NULL, NULL}, }; static int ubcore_alloc_hash_tables(struct ubcore_device *dev) @@ -342,6 +533,7 @@ static int ubcore_alloc_hash_tables(struct ubcore_device *dev) goto free_tables; } } + return 0; free_tables: @@ -362,6 +554,124 @@ static void ubcore_device_release(struct device *device) { } +static int ubcore_create_eidtable(struct ubcore_device *dev) +{ + struct ubcore_eid_entry *entry_list; + + entry_list = kcalloc(1, + dev->attr.max_eid_cnt * sizeof(struct ubcore_eid_entry), GFP_ATOMIC); + if (entry_list == NULL) + return -ENOMEM; + + dev->eid_table.eid_entries = entry_list; + spin_lock_init(&dev->eid_table.lock); + dev->eid_table.max_valid_pos = 0; + dev->dynamic_eid = 1; + return 0; +} + +static void ubcore_destroy_eidtable(struct ubcore_device *dev) +{ + if (dev->eid_table.eid_entries != NULL) { + kfree(dev->eid_table.eid_entries); + dev->eid_table.eid_entries = NULL; + } +} + +static int ubcore_query_send_tpf_dev_info(struct ubcore_device *dev) +{ + struct ubcore_nlmsg *resp_msg, *req_msg; + struct ubcore_update_tpf_dev_info_resp *resp; + struct ubcore_update_tpf_dev_info_req *data; + struct ubcore_cc_entry *cc_entry; + struct ubcore_cc_entry *array; + uint32_t cc_entry_cnt; + uint32_t cc_len; + int ret; + + if (dev->ops == NULL || dev->ops->query_cc == NULL) { + ubcore_log_err("Invalid parameter!\n"); + return -EINVAL; + } + + cc_entry = dev->ops->query_cc(dev, &cc_entry_cnt); + if (cc_entry == NULL) { + ubcore_log_err("Failed to query cc entry\n"); + return -EPERM; + } + + if (cc_entry_cnt > UBCORE_CC_IDX_TABLE_SIZE || cc_entry_cnt == 0) { + kfree(cc_entry); + ubcore_log_err("cc_entry_cnt invalid, %u.\n", cc_entry_cnt); + return -EINVAL; + } + + cc_len = sizeof(struct ubcore_update_tpf_dev_info_req) + + cc_entry_cnt * sizeof(struct ubcore_cc_entry); + + req_msg = kcalloc(1, sizeof(struct ubcore_nlmsg) + cc_len, GFP_KERNEL); + if (req_msg == NULL) { + kfree(cc_entry); + ubcore_log_err("Failed to alloc update tpf dev req msg.\n"); + return -ENOMEM; + } + + /* fill msg head */ + req_msg->msg_type = UBCORE_NL_UPDATE_TPF_DEV_INFO_REQ; + req_msg->transport_type = dev->transport_type; + req_msg->payload_len = cc_len; + + /* fill msg payload */ + data = (struct ubcore_update_tpf_dev_info_req *)req_msg->payload; + data->dev_fea = dev->attr.dev_cap.feature; + data->cc_entry_cnt = cc_entry_cnt; + (void)strcpy(data->dev_name, dev->dev_name); + array = (struct ubcore_cc_entry *)data->data; + (void)memcpy(array, cc_entry, sizeof(struct ubcore_cc_entry) * cc_entry_cnt); + + resp_msg = ubcore_nl_send_wait(req_msg); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait query response"); + kfree(cc_entry); + kfree(req_msg); + return -1; + } + + resp = (struct ubcore_update_tpf_dev_info_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_NL_UPDATE_TPF_DEV_INFO_RESP || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err("update tpf dev info request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + ret = -1; + } else { + ret = 0; + } + + kfree(cc_entry); + kfree(resp_msg); + kfree(req_msg); + return ret; +} + +int ubcore_query_all_device_tpf_dev_info(void) +{ + struct ubcore_device *dev; + int ret = 0; + + mutex_lock(&g_device_mutex); + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->transport_type == UBCORE_TRANSPORT_UB && dev->attr.tp_maintainer) { + if (ubcore_query_send_tpf_dev_info(dev) != 0) { + ubcore_log_warn("failed to update tpf dev info in ubcore with dev name %s", + dev->dev_name); + ret = -1; + } + } + } + mutex_unlock(&g_device_mutex); + return ret; +} + static int init_ubcore_device(struct ubcore_device *dev) { if (dev->ops->query_device_attr != NULL && @@ -370,6 +680,10 @@ static int init_ubcore_device(struct ubcore_device *dev) return -1; } + /* set tpf device */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && g_tpf == NULL && dev->attr.tp_maintainer) + g_tpf = dev; + device_initialize(&dev->dev); dev_set_drvdata(&dev->dev, dev); dev_set_name(&dev->dev, "%s", dev->dev_name); @@ -382,35 +696,171 @@ static int init_ubcore_device(struct ubcore_device *dev) spin_lock_init(&dev->event_handler_lock); INIT_LIST_HEAD(&dev->event_handler_list); + if (!dev->attr.virtualization) + (void)ubcore_add_upi_list(dev, UCBORE_INVALID_UPI); + init_completion(&dev->comp); atomic_set(&dev->use_cnt, 1); + /* save tpf device to the list g_tpf_list */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && dev->attr.tp_maintainer) { + if (ubcore_get_netlink_valid() && ubcore_query_send_tpf_dev_info(dev) != 0) + ubcore_log_warn( + "failed to query cc info in ubcore with dev name %s", + dev->dev_name); + } + + if (ubcore_create_eidtable(dev) != 0) { + ubcore_log_err("create eidtable failed.\n"); + return -1; + } + if (ubcore_alloc_hash_tables(dev) != 0) { + ubcore_destroy_eidtable(dev); ubcore_log_err("alloc hash tables failed.\n"); return -1; } - ubcore_set_default_eid(dev); + ubcore_update_default_eid(dev, true); return 0; } static void uninit_ubcore_device(struct ubcore_device *dev) { + ubcore_update_default_eid(dev, false); ubcore_free_hash_tables(dev); + ubcore_destroy_eidtable(dev); + + if (!dev->attr.virtualization) + ubcore_destroy_upi_list(); + if (g_tpf == dev && dev->attr.tp_maintainer) + g_tpf = NULL; + put_device(&dev->dev); } +static int ubcore_config_device_rsp_msg_cb(struct ubcore_device *dev, + struct ubcore_msg *msg, void *msg_ctx) +{ + struct ubcore_msg_config_device_resp *data; + struct ubcore_device_cfg cfg = {0}; + + if (dev == NULL || dev->ops == NULL || dev->ops->config_device == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (msg == NULL || msg->hdr.type != UBCORE_MSG_TYPE_TPF2FE || + msg->hdr.len != sizeof(struct ubcore_msg_config_device_resp) || + msg->hdr.opcode != UBCORE_MSG_CONFIG_DEVICE) { + ubcore_log_err("Failed to query data from the UVS. Use the default value.\n"); + return -EINVAL; + } + + data = (struct ubcore_msg_config_device_resp *)msg->data; + cfg.fe_idx = dev->attr.fe_idx; + cfg.mask.bs.rc_cnt = 1; + cfg.mask.bs.rc_depth = 1; + cfg.rc_cfg.rc_cnt = data->rc_cnt; + cfg.rc_cfg.depth = data->rc_depth; + + cfg.mask.bs.slice = 1; + cfg.slice = data->slice; + + /* For a new TPF device, the suspend config needs to be set. */ + if (data->is_tpf_dev) { + cfg.mask.bs.suspend_period = 1; + cfg.suspend_period = data->suspend_period; + cfg.mask.bs.suspend_cnt = 1; + cfg.suspend_cnt = data->suspend_cnt; + } + + return dev->ops->config_device(dev, &cfg); +} + +static int ubcore_config_device_default(struct ubcore_device *dev) +{ + struct ubcore_device_cfg cfg = {0}; + + if (dev == NULL || dev->ops == NULL || dev->ops->config_device == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + cfg.fe_idx = dev->attr.fe_idx; + + cfg.mask.bs.rc_cnt = 1; + cfg.mask.bs.rc_depth = 1; + cfg.rc_cfg.rc_cnt = dev->attr.dev_cap.max_rc; + cfg.rc_cfg.depth = dev->attr.dev_cap.max_rc_depth; + + cfg.mask.bs.slice = 1; + cfg.slice = dev->attr.dev_cap.max_slice; + + /* If suspend_period and cnt cannot be read, do not need to configure it */ + return dev->ops->config_device(dev, &cfg); +} + +static int ubcore_config_device_in_register(struct ubcore_device *dev) +{ + struct ubcore_msg_config_device_req *data; + struct ubcore_msg *req_msg; + struct ubcore_resp_cb cb = { + .callback = ubcore_config_device_rsp_msg_cb, + .user_arg = NULL + }; + int ret; + + if (dev->transport_type != UBCORE_TRANSPORT_UB) + return 0; + + if (ubcore_get_netlink_valid() == false) { + ubcore_log_info("UVS is not connected, and use default config. dev: %s.\n", + dev->dev_name); + return ubcore_config_device_default(dev); + } + + req_msg = kcalloc(1, sizeof(struct ubcore_msg) + + sizeof(struct ubcore_msg_config_device_req), GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->hdr.type = UBCORE_MSG_TYPE_FE2TPF; + req_msg->hdr.opcode = UBCORE_MSG_CONFIG_DEVICE; + req_msg->hdr.len = (uint32_t)sizeof(struct ubcore_msg_config_device_req); + + data = (struct ubcore_msg_config_device_req *)req_msg->data; + (void)memcpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + data->max_rc_cnt = dev->attr.dev_cap.max_rc; + data->max_rc_depth = dev->attr.dev_cap.max_rc_depth; + data->min_slice = dev->attr.dev_cap.min_slice; + data->max_slice = dev->attr.dev_cap.max_slice; + + /* New TPF devices need to be query suspend info. */ + data->is_tpf_dev = dev->attr.tp_maintainer; + + ret = ubcore_send_fe2tpf_msg(dev, req_msg, true, &cb); + if (ret != 0) { + ubcore_log_err("send fe2tpf failed.\n"); + return ubcore_config_device_default(dev); + } + return ret; +} + int ubcore_register_device(struct ubcore_device *dev) { struct ubcore_client *client = NULL; struct ubcore_client_ctx *ctx = NULL; + struct ubcore_device *find_dev = NULL; if (dev == NULL || dev->ops == NULL || strlen(dev->dev_name) == 0) { ubcore_log_err("Invalid parameter.\n"); return -EINVAL; } - if (ubcore_find_device_with_name(dev->dev_name) != NULL) { + find_dev = ubcore_find_device_with_name(dev->dev_name); + if (find_dev != NULL) { ubcore_log_err("Duplicate device name %s.\n", dev->dev_name); + ubcore_put_device(find_dev); return -EEXIST; } @@ -419,6 +869,11 @@ int ubcore_register_device(struct ubcore_device *dev) return -EINVAL; } + if (ubcore_config_device_in_register(dev) != 0) { + ubcore_log_err("failed to config ubcore device.\n"); + return -EPERM; + } + mutex_lock(&g_device_mutex); list_for_each_entry(client, &g_client_list, list_node) { @@ -522,7 +977,49 @@ void ubcore_dispatch_async_event(struct ubcore_event *event) } if (event->event_type == UBCORE_EVENT_TP_ERR && event->element.tp != NULL) { - ubcore_restore_tp(event->ub_dev, event->element.tp); + ubcore_log_info("ubcore detect tp error event"); + if (event->ub_dev->transport_type == UBCORE_TRANSPORT_IB) { + ubcore_restore_tp(event->ub_dev, event->element.tp); + } else if (event->ub_dev->transport_type == UBCORE_TRANSPORT_UB) { + if (event->element.tp->state == UBCORE_TP_STATE_ERR || + event->element.tp->state == UBCORE_TP_STATE_RESET) { + ubcore_log_warn("Tp already in state %d, ignore err event", + (int32_t)event->element.tp->state); + return; + } + + if (ubcore_change_tp_to_err(event->ub_dev, event->element.tp) != 0) + ubcore_log_info("ubcore change tp to error failed"); + } + return; + } else if (event->event_type == UBCORE_EVENT_TP_SUSPEND && event->element.tp != NULL) { + ubcore_log_info("ubcore detect tp suspend event"); + ubcore_report_tp_suspend(event->ub_dev, event->element.tp); + return; + } else if (event->event_type == UBCORE_EVENT_MIGRATE_VTP_SWITCH && + event->element.vtp != NULL) { + ubcore_log_info("ubcore detect migrate vtp switch event"); + ubcore_report_migrate_vtp(event->ub_dev, event->element.vtp, + UBCORE_EVENT_MIGRATE_VTP_SWITCH); + return; + } else if (event->event_type == UBCORE_EVENT_MIGRATE_VTP_ROLLBACK && + event->element.vtp != NULL) { + ubcore_log_info("ubcore detect migrate vtp rollback event"); + ubcore_report_migrate_vtp(event->ub_dev, event->element.vtp, + UBCORE_EVENT_MIGRATE_VTP_ROLLBACK); + return; + } else if (event->event_type == UBCORE_EVENT_TP_FLUSH_DONE) { + ubcore_log_info("ubcore detect tp flush done event"); + if (event->element.tp->state == UBCORE_TP_STATE_RESET) { + ubcore_log_warn("Tp already in state %d, ignore flush done event", + (int32_t)event->element.tp->state); + return; + } + /* flush done means tp already in error, + * and all pkt have been send need uvs to restore + */ + if (event->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + ubcore_report_tp_error(event->ub_dev, event->element.tp); return; } @@ -534,8 +1031,8 @@ void ubcore_dispatch_async_event(struct ubcore_event *event) } EXPORT_SYMBOL(ubcore_dispatch_async_event); -struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t uasid, - struct ubcore_udrv_priv *udrv_data) +struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data) { struct ubcore_ucontext *ucontext; @@ -543,19 +1040,19 @@ struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_ ubcore_log_err("alloc_ucontext not registered.\n"); return NULL; } - ucontext = dev->ops->alloc_ucontext(dev, uasid, udrv_data); + ucontext = dev->ops->alloc_ucontext(dev, eid_index, udrv_data); if (ucontext == NULL) { ubcore_log_err("failed to alloc ucontext.\n"); return NULL; } - ucontext->uasid = uasid; + + ucontext->eid_index = eid_index; ucontext->ub_dev = dev; - ubcore_log_info("success to alloc ucontext with uasid = %u", uasid); return ucontext; } EXPORT_SYMBOL(ubcore_alloc_ucontext); -void ubcore_free_ucontext(const struct ubcore_device *dev, struct ubcore_ucontext *ucontext) +void ubcore_free_ucontext(struct ubcore_device *dev, struct ubcore_ucontext *ucontext) { int ret; @@ -571,26 +1068,7 @@ void ubcore_free_ucontext(const struct ubcore_device *dev, struct ubcore_ucontex } EXPORT_SYMBOL(ubcore_free_ucontext); -int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid) -{ - int ret; - - if (dev == NULL || eid == NULL || dev->ops == NULL || dev->ops->set_eid == NULL) { - ubcore_log_err("Invalid argument.\n"); - return -EINVAL; - } - - ret = dev->ops->set_eid(dev, *eid); - if (ret != 0) { - ubcore_log_err("failed to set eid, ret: %d.\n", ret); - return -EPERM; - } - dev->attr.eid = *eid; - return 0; -} -EXPORT_SYMBOL(ubcore_set_eid); - -int ubcore_set_upi(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx, uint32_t upi) +int ubcore_set_upi(struct ubcore_device *dev, uint16_t fe_idx, uint16_t idx, uint32_t upi) { int ret; @@ -599,52 +1077,16 @@ int ubcore_set_upi(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx return -EINVAL; } - ret = dev->ops->set_upi(dev, vf_id, idx, upi); + ret = dev->ops->set_upi(dev, fe_idx, idx, upi); if (ret != 0) { - ubcore_log_err("failed to set vf%hu upi%hu, ret: %d.\n", vf_id, idx, ret); + ubcore_log_err("failed to set fe%hu upi%hu, ret: %d.\n", fe_idx, idx, ret); return -EPERM; } return 0; } EXPORT_SYMBOL(ubcore_set_upi); -int ubcore_add_eid(struct ubcore_device *dev, union ubcore_eid *eid) -{ - int ret; - - if (dev == NULL || eid == NULL || dev->ops == NULL || dev->ops->add_eid == NULL) { - ubcore_log_err("Invalid argument.\n"); - return -EINVAL; - } - - ret = dev->ops->add_eid(dev, eid); - if (ret != 0) { - ubcore_log_err("failed to add eid, ret: %d.\n", ret); - return -EPERM; - } - return ret; -} -EXPORT_SYMBOL(ubcore_add_eid); - -int ubcore_delete_eid(struct ubcore_device *dev, uint16_t idx) -{ - int ret; - - if (dev == NULL || dev->ops == NULL || dev->ops->delete_eid_by_idx == NULL) { - ubcore_log_err("Invalid argument.\n"); - return -EINVAL; - } - - ret = dev->ops->delete_eid_by_idx(dev, idx); - if (ret != 0) { - ubcore_log_err("failed to delete eid, ret: %d.\n", ret); - return -EPERM; - } - return ret; -} -EXPORT_SYMBOL(ubcore_delete_eid); - -int ubcore_add_ueid(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_ueid_cfg *cfg) +int ubcore_add_ueid(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg) { int ret; @@ -653,7 +1095,7 @@ int ubcore_add_ueid(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_uei return -EINVAL; } - ret = dev->ops->add_ueid(dev, vf_id, cfg); + ret = dev->ops->add_ueid(dev, fe_idx, cfg); if (ret != 0) { ubcore_log_err("failed to add ueid, ret: %d.\n", ret); return -EPERM; @@ -662,16 +1104,16 @@ int ubcore_add_ueid(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_uei } EXPORT_SYMBOL(ubcore_add_ueid); -int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t vf_id, uint16_t idx) +int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg) { int ret; - if (dev == NULL || dev->ops == NULL || dev->ops->delete_ueid_by_idx == NULL) { + if (dev == NULL || dev->ops == NULL || dev->ops->delete_ueid == NULL) { ubcore_log_err("Invalid argument.\n"); return -EINVAL; } - ret = dev->ops->delete_ueid_by_idx(dev, vf_id, idx); + ret = dev->ops->delete_ueid(dev, fe_idx, cfg); if (ret != 0) { ubcore_log_err("failed to delete eid, ret: %d.\n", ret); return -EPERM; @@ -698,7 +1140,7 @@ int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_att } EXPORT_SYMBOL(ubcore_query_device_attr); -int ubcore_query_device_status(const struct ubcore_device *dev, struct ubcore_device_status *status) +int ubcore_query_device_status(struct ubcore_device *dev, struct ubcore_device_status *status) { int ret; @@ -716,7 +1158,7 @@ int ubcore_query_device_status(const struct ubcore_device *dev, struct ubcore_de } EXPORT_SYMBOL(ubcore_query_device_status); -int ubcore_query_resource(const struct ubcore_device *dev, struct ubcore_res_key *key, +int ubcore_query_resource(struct ubcore_device *dev, struct ubcore_res_key *key, struct ubcore_res_val *val) { int ret; @@ -736,7 +1178,7 @@ int ubcore_query_resource(const struct ubcore_device *dev, struct ubcore_res_key } EXPORT_SYMBOL(ubcore_query_resource); -int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg) +int ubcore_config_device(struct ubcore_device *dev, struct ubcore_device_cfg *cfg) { int ret; @@ -780,7 +1222,7 @@ int ubcore_user_control(struct ubcore_user_ctl *k_user_ctl) } EXPORT_SYMBOL(ubcore_user_control); -int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, +int ubcore_query_stats(struct ubcore_device *dev, struct ubcore_stats_key *key, struct ubcore_stats_val *val) { int ret; @@ -799,3 +1241,161 @@ int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key return 0; } EXPORT_SYMBOL(ubcore_query_stats); + +static int ubcore_add_device_sip(struct ubcore_device *dev, struct ubcore_sip_info *sip) +{ + uint32_t index; + int ret; + + ret = ubcore_lookup_sip_idx(sip, &index); + if (ret == 0) { + ubcore_log_err("sip already exists\n"); + return -1; + } + index = ubcore_sip_idx_alloc(0); + + if (dev->ops->add_net_addr != NULL && dev->ops->add_net_addr(dev, &sip->addr, index) != 0) { + ubcore_log_err("Failed to set net addr"); + ret = -1; + goto free_sip_index; + } + /* add net_addr entry, record idx -> netaddr mapping */ + if (ubcore_add_sip_entry(sip, index) != 0) { + ret = -1; + goto del_net_addr; + } + /* nodify uvs add sip info */ + if (ubcore_get_netlink_valid() == true) + (void)ubcore_notify_uvs_add_sip(dev, sip, index); + return 0; + +del_net_addr: + if (dev->ops->delete_net_addr != NULL) + dev->ops->delete_net_addr(dev, index); +free_sip_index: + (void)ubcore_sip_idx_free(index); + return ret; +} + +static int ubcore_del_device_sip(struct ubcore_device *dev, struct ubcore_sip_info *sip) +{ + uint32_t index; + + if (ubcore_lookup_sip_idx(sip, &index) != 0) + return -1; + + (void)ubcore_del_sip_entry(index); + + if (dev->ops->delete_net_addr != NULL && dev->ops->delete_net_addr(dev, index) != 0) { + ubcore_log_err("Failed to delete net addr"); + (void)ubcore_add_sip_entry(sip, index); + return -1; + } + /* nodify uvs add sip info */ + if (ubcore_get_netlink_valid() == true) + (void)ubcore_notify_uvs_del_sip(dev, sip, index); + + (void)ubcore_sip_idx_free(index); + return 0; +} + +static int ubcore_update_sip(struct ubcore_sip_info *sip, bool is_add) +{ + struct ubcore_device *tpf_dev; + + if (sip == NULL) { + ubcore_log_err("There is an illegal parameter.\n"); + return -1; + } + tpf_dev = ubcore_find_tpf_device(&sip->addr, UBCORE_TRANSPORT_UB); + if (is_add) { + if (tpf_dev && ubcore_add_device_sip(tpf_dev, sip) != 0) { + ubcore_put_device(tpf_dev); + return -1; + } + } else { + if (tpf_dev && ubcore_del_device_sip(tpf_dev, sip) != 0) { + ubcore_put_device(tpf_dev); + return -1; + } + } + ubcore_put_device(tpf_dev); + return 0; +} + +int ubcore_add_sip(struct ubcore_sip_info *sip) +{ + return ubcore_update_sip(sip, true); +} +EXPORT_SYMBOL(ubcore_add_sip); + +int ubcore_delete_sip(struct ubcore_sip_info *sip) +{ + return ubcore_update_sip(sip, false); +} +EXPORT_SYMBOL(ubcore_delete_sip); + +void ubcore_sync_sip_table(void) +{ + struct ubcore_sip_info *sip; + struct ubcore_device *tpf_dev; + uint32_t max_cnt; + uint32_t i; + + max_cnt = ubcore_get_sip_max_cnt(); + + for (i = 0; i < max_cnt; i++) { + sip = ubcore_lookup_sip_info(i); + if (sip == NULL) + continue; + + tpf_dev = ubcore_find_tpf_device(&sip->addr, UBCORE_TRANSPORT_UB); + if (tpf_dev) { + (void)ubcore_notify_uvs_add_sip(tpf_dev, sip, i); + ubcore_put_device(tpf_dev); + } + } +} + +struct ubcore_eid_info *ubcore_get_eid_list(struct ubcore_device *dev, uint32_t *cnt) +{ + struct ubcore_eid_info *tmp; + struct ubcore_eid_info *eid_list; + uint32_t count; + uint32_t i; + + tmp = vmalloc(dev->attr.max_eid_cnt * sizeof(struct ubcore_eid_info)); + if (tmp == NULL) + return NULL; + + spin_lock(&dev->eid_table.lock); + for (i = 0, count = 0; i < dev->attr.max_eid_cnt; i++) { + if (dev->eid_table.eid_entries[i].valid == true) { + tmp[count].eid = dev->eid_table.eid_entries[i].eid; + tmp[count].eid_index = i; + count++; + } + } + spin_unlock(&dev->eid_table.lock); + *cnt = count; + + eid_list = vmalloc(count * sizeof(struct ubcore_eid_info)); + if (eid_list == NULL) { + vfree(tmp); + ubcore_log_err("failed to apply for memory.\n"); + return NULL; + } + for (i = 0; i < count; i++) + eid_list[i] = tmp[i]; + + vfree(tmp); + return eid_list; +} +EXPORT_SYMBOL(ubcore_get_eid_list); + +void ubcore_free_eid_list(struct ubcore_eid_info *eid_list) +{ + if (eid_list != NULL) + vfree(eid_list); +} +EXPORT_SYMBOL(ubcore_free_eid_list); diff --git a/drivers/ub/urma/ubcore/ubcore_dp.c b/drivers/ub/urma/ubcore/ubcore_dp.c index c2bf3b1e173f..55941a5724c7 100644 --- a/drivers/ub/urma/ubcore/ubcore_dp.c +++ b/drivers/ub/urma/ubcore/ubcore_dp.c @@ -17,13 +17,12 @@ * Note: * History: 2023-05-09 */ -#include #include "ubcore_log.h" #include #include #include -int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, const struct ubcore_jfs_wr *wr, +int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr) { struct ubcore_ops *dev_ops; @@ -39,7 +38,7 @@ int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, const struct ubcore_jf } EXPORT_SYMBOL(ubcore_post_jetty_send_wr); -int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, const struct ubcore_jfr_wr *wr, +int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr) { struct ubcore_ops *dev_ops; @@ -55,7 +54,7 @@ int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, const struct ubcore_jf } EXPORT_SYMBOL(ubcore_post_jetty_recv_wr); -int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, +int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr) { struct ubcore_ops *dev_ops; @@ -71,7 +70,7 @@ int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, } EXPORT_SYMBOL(ubcore_post_jfs_wr); -int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr, +int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr) { struct ubcore_ops *dev_ops; diff --git a/drivers/ub/urma/ubcore/ubcore_hash_table.c b/drivers/ub/urma/ubcore/ubcore_hash_table.c index c3d66301c22c..fbc9db4d1efc 100644 --- a/drivers/ub/urma/ubcore/ubcore_hash_table.c +++ b/drivers/ub/urma/ubcore/ubcore_hash_table.c @@ -96,14 +96,18 @@ void ubcore_hash_table_add(struct ubcore_hash_table *ht, struct hlist_node *hnod spin_unlock(&ht->lock); } -void ubcore_hash_table_remove(struct ubcore_hash_table *ht, struct hlist_node *hnode) +void ubcore_hash_table_remove_nolock(struct ubcore_hash_table *ht, struct hlist_node *hnode) { - spin_lock(&ht->lock); - if (ht->head == NULL) { - spin_unlock(&ht->lock); + if (ht->head == NULL) return; - } + hlist_del(hnode); +} + +void ubcore_hash_table_remove(struct ubcore_hash_table *ht, struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + ubcore_hash_table_remove_nolock(ht, hnode); spin_unlock(&ht->lock); } diff --git a/drivers/ub/urma/ubcore/ubcore_hash_table.h b/drivers/ub/urma/ubcore/ubcore_hash_table.h index cdf136e74fa6..d885068c10d5 100644 --- a/drivers/ub/urma/ubcore/ubcore_hash_table.h +++ b/drivers/ub/urma/ubcore/ubcore_hash_table.h @@ -41,12 +41,14 @@ void ubcore_hash_table_free(struct ubcore_hash_table *ht); void ubcore_hash_table_free_with_cb(struct ubcore_hash_table *ht, void (*free_cb)(void *)); void ubcore_hash_table_add(struct ubcore_hash_table *ht, struct hlist_node *hnode, uint32_t hash); void ubcore_hash_table_add_nolock(struct ubcore_hash_table *ht, struct hlist_node *hnode, - uint32_t hash); + uint32_t hash); void ubcore_hash_table_remove(struct ubcore_hash_table *ht, struct hlist_node *hnode); +void ubcore_hash_table_remove_nolock(struct ubcore_hash_table *ht, struct hlist_node *hnode); void *ubcore_hash_table_lookup(struct ubcore_hash_table *ht, uint32_t hash, const void *key); -void *ubcore_hash_table_lookup_nolock(struct ubcore_hash_table *ht, uint32_t hash, const void *key); +void *ubcore_hash_table_lookup_nolock(struct ubcore_hash_table *ht, uint32_t hash, + const void *key); void *ubcore_hash_table_find_remove(struct ubcore_hash_table *ht, uint32_t hash, const void *key); /* Do not insert a new entry if an old entry with the same key exists */ int ubcore_hash_table_find_add(struct ubcore_hash_table *ht, struct hlist_node *hnode, - uint32_t hash); + uint32_t hash); #endif diff --git a/drivers/ub/urma/ubcore/ubcore_jetty.c b/drivers/ub/urma/ubcore/ubcore_jetty.c index e662189c59f0..2b4f5c438028 100644 --- a/drivers/ub/urma/ubcore/ubcore_jetty.c +++ b/drivers/ub/urma/ubcore/ubcore_jetty.c @@ -31,6 +31,8 @@ #include "ubcore_hash_table.h" #include "ubcore_tp.h" #include "ubcore_tp_table.h" +#include "ubcore_vtp.h" +#include "ubcore_tpg.h" struct ubcore_jfc *ubcore_find_jfc(struct ubcore_device *dev, uint32_t jfc_id) { @@ -50,20 +52,20 @@ struct ubcore_jfr *ubcore_find_jfr(struct ubcore_device *dev, uint32_t jfr_id) } EXPORT_SYMBOL(ubcore_find_jfr); -static uint32_t ubcore_get_eq_id(const struct ubcore_device *dev) +static uint32_t ubcore_get_ceqn(struct ubcore_device *dev) { - uint32_t eq_id = 0; + uint32_t ceqn = 0; int cpu; - if (dev->num_comp_vectors > 0) { + if (dev->attr.dev_cap.ceq_cnt > 0) { cpu = get_cpu(); - eq_id = (uint32_t)(cpu % dev->num_comp_vectors); + ceqn = (uint32_t)(cpu % dev->attr.dev_cap.ceq_cnt); put_cpu(); } - return eq_id; + return ceqn; } -static int check_and_fill_jfc_attr(struct ubcore_jfc_cfg *cfg, const struct ubcore_jfc_cfg *user) +static int check_and_fill_jfc_attr(struct ubcore_jfc_cfg *cfg, struct ubcore_jfc_cfg *user) { if (cfg->depth < user->depth) return -1; @@ -74,21 +76,21 @@ static int check_and_fill_jfc_attr(struct ubcore_jfc_cfg *cfg, const struct ubco return 0; } -struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, const struct ubcore_jfc_cfg *cfg, +struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, struct ubcore_jfc_cfg *cfg, ubcore_comp_callback_t jfce_handler, ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata) { struct ubcore_jfc *jfc; - uint32_t eq_id; + uint32_t ceqn; if (dev == NULL || cfg == NULL || dev->ops->create_jfc == NULL || dev->ops->destroy_jfc == NULL) return NULL; - eq_id = ubcore_get_eq_id(dev); + ceqn = ubcore_get_ceqn(dev); - ((struct ubcore_jfc_cfg *)cfg)->eq_id = eq_id; + ((struct ubcore_jfc_cfg *)cfg)->ceqn = ceqn; jfc = dev->ops->create_jfc(dev, cfg, udata); if (jfc == NULL) { ubcore_log_err("failed to create jfc.\n"); @@ -100,7 +102,7 @@ struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, const struct ubc ubcore_log_err("jfc cfg is not qualified.\n"); return NULL; } - jfc->jfc_cfg.eq_id = eq_id; + jfc->jfc_cfg.ceqn = ceqn; jfc->jfce_handler = jfce_handler; jfc->jfae_handler = jfae_handler; jfc->ub_dev = dev; @@ -116,7 +118,7 @@ struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, const struct ubc } EXPORT_SYMBOL(ubcore_create_jfc); -int ubcore_modify_jfc(struct ubcore_jfc *jfc, const struct ubcore_jfc_attr *attr, +int ubcore_modify_jfc(struct ubcore_jfc *jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata) { struct ubcore_device *dev; @@ -146,21 +148,55 @@ int ubcore_delete_jfc(struct ubcore_jfc *jfc) if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops->destroy_jfc == NULL) return -1; - if (WARN_ON_ONCE(atomic_read(&jfc->use_cnt))) + if (atomic_read(&jfc->use_cnt)) { + ubcore_log_err("The jfc is still being used"); return -EBUSY; + } jfc_id = jfc->id; dev = jfc->ub_dev; ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFC], &jfc->hnode); ret = dev->ops->destroy_jfc(jfc); - if (ret < 0) + if (ret < 0) { ubcore_log_err("UBEP failed to destroy jfc, jfc_id:%u.\n", jfc_id); + goto rollback; + } return ret; + +rollback: + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFC], &jfc->hnode, jfc->id); + return ret; } EXPORT_SYMBOL(ubcore_delete_jfc); -static int check_and_fill_jfs_attr(struct ubcore_jfs_cfg *cfg, const struct ubcore_jfs_cfg *user) +static int check_jfs_cfg(struct ubcore_device *dev, struct ubcore_jfs_cfg *cfg) +{ + if (cfg->depth == 0 || cfg->depth > dev->attr.dev_cap.max_jfs_depth) { + ubcore_log_err("Invalid parameter, depth:%u, max_depth:%u.\n", + cfg->depth, dev->attr.dev_cap.max_jfs_depth); + return -EINVAL; + } + if (cfg->max_inline_data != 0 && cfg->max_inline_data > + dev->attr.dev_cap.max_jfs_inline_size) { + ubcore_log_err("Invalid parameter, inline_data:%u, max_inline_len:%u.\n", + cfg->max_inline_data, dev->attr.dev_cap.max_jfs_inline_size); + return -EINVAL; + } + if (cfg->max_sge > dev->attr.dev_cap.max_jfs_sge) { + ubcore_log_err("Invalid parameter, sge:%hhu, max_sge:%u.\n", + cfg->max_sge, dev->attr.dev_cap.max_jfs_sge); + return -EINVAL; + } + if (cfg->max_rsge > dev->attr.dev_cap.max_jfs_rsge) { + ubcore_log_err("Invalid parameter, rsge:%hhu, max_rsge:%u.\n", + cfg->max_rsge, dev->attr.dev_cap.max_jfs_rsge); + return -EINVAL; + } + return 0; +} + +static int check_and_fill_jfs_attr(struct ubcore_jfs_cfg *cfg, struct ubcore_jfs_cfg *user) { if (cfg->depth < user->depth || cfg->max_sge < user->max_sge || cfg->max_rsge < user->max_rsge || cfg->max_inline_data < user->max_inline_data) @@ -170,8 +206,8 @@ static int check_and_fill_jfs_attr(struct ubcore_jfs_cfg *cfg, const struct ubco * max_sge and max_inline_data */ cfg->flag = user->flag; + cfg->eid_index = user->eid_index; cfg->priority = user->priority; - cfg->retry_cnt = user->retry_cnt; cfg->rnr_retry = user->rnr_retry; cfg->err_timeout = user->err_timeout; cfg->trans_mode = user->trans_mode; @@ -180,7 +216,7 @@ static int check_and_fill_jfs_attr(struct ubcore_jfs_cfg *cfg, const struct ubco return 0; } -struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, const struct ubcore_jfs_cfg *cfg, +struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, struct ubcore_jfs_cfg *cfg, ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata) { @@ -194,6 +230,8 @@ struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, const struct ubc ubcore_log_err("jfs cfg is not supported.\n"); return NULL; } + if (check_jfs_cfg(dev, cfg) != 0) + return NULL; jfs = dev->ops->create_jfs(dev, cfg, udata); if (jfs == NULL) { @@ -232,7 +270,7 @@ struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, const struct ubc } EXPORT_SYMBOL(ubcore_create_jfs); -int ubcore_modify_jfs(struct ubcore_jfs *jfs, const struct ubcore_jfs_attr *attr, +int ubcore_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, struct ubcore_udata *udata) { struct ubcore_device *dev; @@ -288,12 +326,20 @@ int ubcore_delete_jfs(struct ubcore_jfs *jfs) ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFS], &jfs->hnode); ubcore_destroy_tptable(&jfs->tptable); ret = dev->ops->destroy_jfs(jfs); - if (ret < 0) + if (ret < 0) { ubcore_log_err("UBEP failed to destroy jfs, jfs_id:%u.\n", jfs_id); - else + goto rollback; + } else { atomic_dec(&jfc->use_cnt); + } return ret; + +rollback: + if (ubcore_jfs_need_advise(jfs)) + jfs->tptable = ubcore_create_tptable(); + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFS], &jfs->hnode, jfs->id); + return ret; } EXPORT_SYMBOL(ubcore_delete_jfs); @@ -312,22 +358,23 @@ int ubcore_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr) } EXPORT_SYMBOL(ubcore_flush_jfs); -static int check_and_fill_jfr_attr(struct ubcore_jfr_cfg *cfg, const struct ubcore_jfr_cfg *user) +static int check_and_fill_jfr_attr(struct ubcore_jfr_cfg *cfg, struct ubcore_jfr_cfg *user) { if (cfg->depth < user->depth || cfg->max_sge < user->max_sge) return -1; /* store the immutable and skip the driver updated attributes including depth, max_sge */ + cfg->eid_index = user->eid_index; cfg->flag = user->flag; cfg->min_rnr_timer = user->min_rnr_timer; cfg->trans_mode = user->trans_mode; - cfg->ukey = user->ukey; + cfg->token_value = user->token_value; cfg->jfr_context = user->jfr_context; cfg->jfc = user->jfc; return 0; } -struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, const struct ubcore_jfr_cfg *cfg, +struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata) { @@ -373,7 +420,7 @@ struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, const struct ubc } EXPORT_SYMBOL(ubcore_create_jfr); -int ubcore_modify_jfr(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, +int ubcore_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, struct ubcore_udata *udata) { struct ubcore_device *dev; @@ -423,8 +470,10 @@ int ubcore_delete_jfr(struct ubcore_jfr *jfr) if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops->destroy_jfr == NULL) return -EINVAL; - if (WARN_ON_ONCE(atomic_read(&jfr->use_cnt))) + if (atomic_read(&jfr->use_cnt)) { + ubcore_log_err("The jfr is still being used"); return -EBUSY; + } jfc = jfr->jfr_cfg.jfc; jfr_id = jfr->id; @@ -432,19 +481,28 @@ int ubcore_delete_jfr(struct ubcore_jfr *jfr) ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFR], &jfr->hnode); ubcore_destroy_tptable(&jfr->tptable); ret = dev->ops->destroy_jfr(jfr); - if (ret < 0) + if (ret < 0) { ubcore_log_err("UBEP failed to destroy jfr, jfr_id:%u.\n", jfr_id); - else + goto rollback; + } else { atomic_dec(&jfc->use_cnt); + } return ret; + +rollback: + if (ubcore_jfr_need_advise(jfr)) + jfr->tptable = ubcore_create_tptable(); + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFR], &jfr->hnode, jfr->id); + return ret; } EXPORT_SYMBOL(ubcore_delete_jfr); struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata) { + struct ubcore_vtp_param vtp_param; struct ubcore_tjetty *tjfr; if (dev == NULL || cfg == NULL || dev->ops->import_jfr == NULL || @@ -461,9 +519,25 @@ struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev, tjfr->uctx = ubcore_get_uctx(udata); tjfr->type = UBCORE_JFR; atomic_set(&tjfr->use_cnt, 0); - + mutex_init(&tjfr->lock); + + /* create rm tp if the remote eid is not connected */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (cfg->trans_mode == UBCORE_TP_RM || cfg->trans_mode == UBCORE_TP_UM)) { + ubcore_set_vtp_param(dev, NULL, cfg, &vtp_param); + mutex_lock(&tjfr->lock); + tjfr->vtpn = ubcore_connect_vtp(dev, &vtp_param); + if (tjfr->vtpn == NULL) { + (void)dev->ops->unimport_jfr(tjfr); + mutex_unlock(&tjfr->lock); + ubcore_log_err("Failed to setup tp connection.\n"); + return NULL; + } + mutex_unlock(&tjfr->lock); + } else { + tjfr->vtpn = NULL; + } tjfr->tp = NULL; - return tjfr; } EXPORT_SYMBOL(ubcore_import_jfr); @@ -471,19 +545,32 @@ EXPORT_SYMBOL(ubcore_import_jfr); int ubcore_unimport_jfr(struct ubcore_tjetty *tjfr) { struct ubcore_device *dev; + int ret; if (tjfr == NULL || tjfr->ub_dev == NULL || tjfr->ub_dev->ops->unimport_jfr == NULL || - !ubcore_have_tp_ops(tjfr->ub_dev)) - return -1; + !ubcore_have_tp_ops(tjfr->ub_dev)) + return -EINVAL; dev = tjfr->ub_dev; - + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (tjfr->cfg.trans_mode == UBCORE_TP_RM || tjfr->cfg.trans_mode == UBCORE_TP_UM) && + tjfr->vtpn != NULL) { + mutex_lock(&tjfr->lock); + ret = ubcore_disconnect_vtp(tjfr->vtpn); + if (ret != 0) { + ubcore_log_err("Failed to disconnect vtp.\n"); + mutex_unlock(&tjfr->lock); + return ret; + } + tjfr->vtpn = NULL; + mutex_unlock(&tjfr->lock); + } return dev->ops->unimport_jfr(tjfr); } EXPORT_SYMBOL(ubcore_unimport_jfr); static int check_and_fill_jetty_attr(struct ubcore_jetty_cfg *cfg, - const struct ubcore_jetty_cfg *user) + struct ubcore_jetty_cfg *user) { if (cfg->jfs_depth < user->jfs_depth || cfg->max_send_sge < user->max_send_sge || cfg->max_send_rsge < user->max_send_rsge || @@ -496,25 +583,158 @@ static int check_and_fill_jetty_attr(struct ubcore_jetty_cfg *cfg, return -1; } /* store the immutable and skip the driver updated send and recv attributes */ + cfg->eid_index = user->eid_index; cfg->flag = user->flag; cfg->send_jfc = user->send_jfc; cfg->recv_jfc = user->recv_jfc; cfg->jfr = user->jfr; cfg->priority = user->priority; - cfg->retry_cnt = user->retry_cnt; cfg->rnr_retry = user->rnr_retry; cfg->err_timeout = user->err_timeout; cfg->min_rnr_timer = user->min_rnr_timer; cfg->trans_mode = user->trans_mode; cfg->jetty_context = user->jetty_context; - cfg->ukey = user->ukey; + cfg->token_value = user->token_value; return 0; } -struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, - const struct ubcore_jetty_cfg *cfg, - ubcore_event_callback_t jfae_handler, - struct ubcore_udata *udata) +static int check_jetty_cfg(struct ubcore_jetty_cfg *cfg) +{ + if (ubcore_check_trans_mode_valid(cfg->trans_mode) != true) { + ubcore_log_err("Invalid parameter, trans_mode: %d.\n", (int)cfg->trans_mode); + return -1; + } + + if (cfg->send_jfc == NULL || cfg->recv_jfc == NULL) { + ubcore_log_err("jfc is null.\n"); + return -1; + } + + if (cfg->flag.bs.share_jfr != 0 && + (cfg->jfr == NULL || cfg->jfr->jfr_cfg.trans_mode != cfg->trans_mode)) { + ubcore_log_err("jfr is null or trans_mode invalid with shared jfr flag.\n"); + return -1; + } + return 0; +} + +static int check_jetty_cfg_with_jetty_grp(struct ubcore_jetty_cfg *cfg) +{ + if (cfg->jetty_grp == NULL) + return 0; + + if (cfg->trans_mode != UBCORE_TP_RM) + return -1; + if (cfg->token_value.token != cfg->jetty_grp->jetty_grp_cfg.token_value.token) + return -1; + + if (cfg->flag.bs.share_jfr == 1 && (cfg->jfr == NULL || + cfg->token_value.token != cfg->jfr->jfr_cfg.token_value.token || + cfg->jfr->jfr_cfg.trans_mode != UBCORE_TP_RM)) + return -1; + + return 0; +} + +static int check_jetty_check_dev_cap(struct ubcore_device *dev, struct ubcore_jetty_cfg *cfg) +{ + struct ubcore_device_cap *cap = &dev->attr.dev_cap; + + if (cfg->jetty_grp != NULL) { + mutex_lock(&cfg->jetty_grp->lock); + if (cfg->jetty_grp->jetty_cnt >= cap->max_jetty_in_jetty_grp) { + mutex_unlock(&cfg->jetty_grp->lock); + ubcore_log_err("jetty_grp jetty cnt:%u, max_jetty in grp:%u.\n", + cfg->jetty_grp->jetty_cnt, cap->max_jetty_in_jetty_grp); + return -1; + } + mutex_unlock(&cfg->jetty_grp->lock); + } + + if (cfg->jfs_depth == 0 || cfg->jfs_depth > cap->max_jfs_depth) { + ubcore_log_err("Invalid parameter, jfs_depth:%u, max_jfs_depth: %u.\n", + cfg->jfs_depth, cap->max_jfs_depth); + return -EINVAL; + } + if (cfg->max_inline_data != 0 && cfg->max_inline_data > cap->max_jfs_inline_size) { + ubcore_log_err("Invalid parameter, inline_data:%u, max_jfs_inline_len: %u.\n", + cfg->max_inline_data, cap->max_jfs_inline_size); + return -EINVAL; + } + if (cfg->max_send_sge > cap->max_jfs_sge) { + ubcore_log_err("Invalid parameter, jfs_sge:%hhu, max_jfs_sge:%u.\n", + cfg->max_send_sge, cap->max_jfs_sge); + return -EINVAL; + } + if (cfg->max_send_rsge > cap->max_jfs_rsge) { + ubcore_log_err("Invalid parameter, jfs_rsge:%hhu, max_jfs_rsge:%u.\n", + cfg->max_send_rsge, cap->max_jfs_rsge); + return -EINVAL; + } + + if (cfg->flag.bs.share_jfr == 0) { + if (cfg->jfr_depth == 0 || cfg->jfr_depth > cap->max_jfr_depth) { + ubcore_log_err("Invalid parameter, jfr_depth:%u, max_jfr_depth: %u.\n", + cfg->jfr_depth, cap->max_jfr_depth); + return -EINVAL; + } + if (cfg->max_recv_sge > cap->max_jfr_sge) { + ubcore_log_err("Invalid parameter, jfr_sge:%hhu, max_jfr_sge:%u.\n", + cfg->max_recv_sge, cap->max_jfr_sge); + return -EINVAL; + } + } + + return 0; +} + +static int ubcore_add_jetty_to_jetty_grp(struct ubcore_jetty *jetty, + struct ubcore_jetty_group *jetty_grp) +{ + uint32_t max_jetty_in_grp; + uint32_t i; + + max_jetty_in_grp = jetty->ub_dev->attr.dev_cap.max_jetty_in_jetty_grp; + mutex_lock(&jetty_grp->lock); + for (i = 0; i < max_jetty_in_grp; i++) { + if (jetty_grp->jetty[i] == NULL) { + jetty_grp->jetty[i] = jetty; + jetty_grp->jetty_cnt++; + mutex_unlock(&jetty_grp->lock); + return 0; + } + } + mutex_unlock(&jetty_grp->lock); + ubcore_log_err("failed to add jetty to jetty_grp.\n"); + return -1; +} + +static int ubcore_remove_jetty_from_jetty_grp(struct ubcore_jetty *jetty, + struct ubcore_jetty_group *jetty_grp) +{ + uint32_t max_jetty_in_grp; + uint32_t i; + + if (jetty == NULL || jetty_grp == NULL) + return 0; + + max_jetty_in_grp = jetty->ub_dev->attr.dev_cap.max_jetty_in_jetty_grp; + mutex_lock(&jetty_grp->lock); + for (i = 0; i < max_jetty_in_grp; i++) { + if (jetty_grp->jetty[i] == jetty) { + jetty_grp->jetty[i] = NULL; + jetty_grp->jetty_cnt--; + mutex_unlock(&jetty_grp->lock); + return 0; + } + } + mutex_unlock(&jetty_grp->lock); + ubcore_log_err("failed to delete jetty to jetty_grp.\n"); + return -1; +} + +struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, struct ubcore_jetty_cfg *cfg, + ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata) { struct ubcore_jetty *jetty; @@ -522,25 +742,47 @@ struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, dev->ops->destroy_jetty == NULL) return NULL; + if (check_jetty_cfg(cfg) != 0) { + ubcore_log_err("failed to check jetty cfg.\n"); + return NULL; + } + + if (check_jetty_cfg_with_jetty_grp(cfg) != 0) { + ubcore_log_err("failed to check jetty cfg.\n"); + return NULL; + } + + if (check_jetty_check_dev_cap(dev, cfg) != 0) { + ubcore_log_err("failed to check jetty cfg.\n"); + return NULL; + } + jetty = dev->ops->create_jetty(dev, cfg, udata); if (jetty == NULL) { ubcore_log_err("failed to create jetty.\n"); return NULL; } + + jetty->ub_dev = dev; + if (cfg->jetty_grp != NULL && + ubcore_add_jetty_to_jetty_grp(jetty, + (struct ubcore_jetty_group *)cfg->jetty_grp) != 0) { + ubcore_log_err("jetty cfg is not qualified.\n"); + goto destroy_jetty; + } + if (check_and_fill_jetty_attr(&jetty->jetty_cfg, cfg) != 0) { ubcore_log_err("jetty cfg is not qualified.\n"); - (void)dev->ops->destroy_jetty(jetty); - return NULL; + goto delete_jetty_to_grp; } - jetty->ub_dev = dev; + jetty->uctx = ubcore_get_uctx(udata); jetty->jfae_handler = jfae_handler; if (ubcore_jetty_need_advise(jetty) || jetty->jetty_cfg.trans_mode == UBCORE_TP_RC) { jetty->tptable = ubcore_create_tptable(); if (jetty->tptable == NULL) { ubcore_log_err("Failed to create tp table in the jetty.\n"); - (void)dev->ops->destroy_jetty(jetty); - return NULL; + goto delete_jetty_to_grp; } } else { jetty->tptable = NULL; /* To prevent kernel-mode drivers, malloc is not empty */ @@ -548,20 +790,29 @@ struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, atomic_set(&jetty->use_cnt, 0); if (ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode, jetty->id) != 0) { - ubcore_destroy_tptable(&jetty->tptable); - (void)dev->ops->destroy_jetty(jetty); ubcore_log_err("Failed to add jetty.\n"); + goto destroy_tptable; } atomic_inc(&cfg->send_jfc->use_cnt); atomic_inc(&cfg->recv_jfc->use_cnt); + if (cfg->jfr) atomic_inc(&cfg->jfr->use_cnt); + return jetty; +destroy_tptable: + ubcore_destroy_tptable(&jetty->tptable); +delete_jetty_to_grp: + (void)ubcore_remove_jetty_from_jetty_grp( + jetty, (struct ubcore_jetty_group *)cfg->jetty_grp); +destroy_jetty: + (void)dev->ops->destroy_jetty(jetty); + return NULL; } EXPORT_SYMBOL(ubcore_create_jetty); -int ubcore_modify_jetty(struct ubcore_jetty *jetty, const struct ubcore_jetty_attr *attr, +int ubcore_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, struct ubcore_udata *udata) { struct ubcore_device *dev; @@ -605,6 +856,7 @@ EXPORT_SYMBOL(ubcore_query_jetty); int ubcore_delete_jetty(struct ubcore_jetty *jetty) { + struct ubcore_jetty_group *jetty_grp; struct ubcore_jfc *send_jfc; struct ubcore_jfc *recv_jfc; struct ubcore_device *dev; @@ -615,6 +867,7 @@ int ubcore_delete_jetty(struct ubcore_jetty *jetty) if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops->destroy_jetty == NULL) return -1; + jetty_grp = jetty->jetty_cfg.jetty_grp; send_jfc = jetty->jetty_cfg.send_jfc; recv_jfc = jetty->jetty_cfg.recv_jfc; jfr = jetty->jetty_cfg.jfr; @@ -622,9 +875,23 @@ int ubcore_delete_jetty(struct ubcore_jetty *jetty) dev = jetty->ub_dev; ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode); ubcore_destroy_tptable(&jetty->tptable); + + if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB && jetty->remote_jetty != NULL) { + mutex_lock(&jetty->remote_jetty->lock); + (void)ubcore_disconnect_vtp(jetty->remote_jetty->vtpn); + jetty->remote_jetty->vtpn = NULL; + mutex_unlock(&jetty->remote_jetty->lock); + atomic_set(&jetty->remote_jetty->use_cnt, 0); + /* The tjetty object will release remote jetty resources */ + jetty->remote_jetty = NULL; + } + + if (jetty_grp != NULL) + (void)ubcore_remove_jetty_from_jetty_grp(jetty, jetty_grp); ret = dev->ops->destroy_jetty(jetty); - if (ret < 0) { + if (ret != 0) { ubcore_log_err("UBEP failed to destroy jetty, jetty_id:%u.\n", jetty_id); + goto rollback; } else { if (send_jfc) atomic_dec(&send_jfc->use_cnt); @@ -634,6 +901,14 @@ int ubcore_delete_jetty(struct ubcore_jetty *jetty) atomic_dec(&jfr->use_cnt); } return ret; + +rollback: + if (jetty_grp != NULL) + (void)ubcore_add_jetty_to_jetty_grp(jetty, jetty_grp); + if (ubcore_jetty_need_advise(jetty) || jetty->jetty_cfg.trans_mode == UBCORE_TP_RC) + jetty->tptable = ubcore_create_tptable(); + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode, jetty->id); + return ret; } EXPORT_SYMBOL(ubcore_delete_jetty); @@ -653,9 +928,10 @@ int ubcore_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr EXPORT_SYMBOL(ubcore_flush_jetty); struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata) { + struct ubcore_vtp_param vtp_param; struct ubcore_tjetty *tjetty; if (dev == NULL || cfg == NULL || dev->ops->import_jetty == NULL || @@ -670,11 +946,27 @@ struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev, tjetty->cfg = *cfg; tjetty->ub_dev = dev; tjetty->uctx = ubcore_get_uctx(udata); - tjetty->type = UBCORE_JETTY; - atomic_set(&tjetty->use_cnt, 0); + tjetty->type = cfg->type; + atomic_set(&tjetty->use_cnt, 0); mutex_init(&tjetty->lock); - tjetty->tp = NULL; + + /* create rm tp if the remote eid is not connected */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (cfg->trans_mode == UBCORE_TP_RM || cfg->trans_mode == UBCORE_TP_UM)) { + ubcore_set_vtp_param(dev, NULL, cfg, &vtp_param); + mutex_lock(&tjetty->lock); + tjetty->vtpn = ubcore_connect_vtp(dev, &vtp_param); + if (tjetty->vtpn == NULL) { + (void)dev->ops->unimport_jetty(tjetty); + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to setup tp connection.\n"); + return NULL; + } + mutex_unlock(&tjetty->lock); + } else { + tjetty->tp = NULL; + } return tjetty; } @@ -683,13 +975,32 @@ EXPORT_SYMBOL(ubcore_import_jetty); int ubcore_unimport_jetty(struct ubcore_tjetty *tjetty) { struct ubcore_device *dev; + int ret; if (tjetty == NULL || tjetty->ub_dev == NULL || - tjetty->ub_dev->ops->unimport_jetty == NULL || !ubcore_have_tp_ops(tjetty->ub_dev)) - return -1; + tjetty->ub_dev->ops->unimport_jetty == NULL || !ubcore_have_tp_ops(tjetty->ub_dev)) + return -EINVAL; dev = tjetty->ub_dev; + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (tjetty->cfg.trans_mode == UBCORE_TP_RM || + tjetty->cfg.trans_mode == UBCORE_TP_UM) && + tjetty->vtpn != NULL) { + mutex_lock(&tjetty->lock); + ret = ubcore_disconnect_vtp(tjetty->vtpn); + if (ret != 0) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to disconnect vtp.\n"); + return ret; + } + tjetty->vtpn = NULL; + mutex_unlock(&tjetty->lock); + } + + if (tjetty->cfg.trans_mode == UBCORE_TP_RC && atomic_read(&tjetty->use_cnt)) + return -EBUSY; + return dev->ops->unimport_jetty(tjetty); } EXPORT_SYMBOL(ubcore_unimport_jetty); @@ -732,7 +1043,7 @@ static int ubcore_advice_jetty_tjetty(struct ubcore_tp_advice *advice, struct ub return 0; } -static inline void ubcore_put_advice(const struct ubcore_tp_advice *advice) +static inline void ubcore_put_advice(struct ubcore_tp_advice *advice) { ubcore_put_tptable(advice->meta.ht); } @@ -747,6 +1058,10 @@ int ubcore_advise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr, ubcore_log_err("invalid parameter.\n"); return -1; } + + if (jfs->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + if (!ubcore_jfs_tjfr_need_advise(jfs, tjfr)) { ubcore_log_err("The transport mode is not rm.\n"); return -1; @@ -771,6 +1086,10 @@ int ubcore_unadvise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr) ubcore_log_err("invalid parameter.\n"); return -1; } + + if (jfs->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + if (!ubcore_jfs_tjfr_need_advise(jfs, tjfr)) { ubcore_log_err("The transport mode is not rm.\n"); return -1; @@ -796,6 +1115,10 @@ int ubcore_advise_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty ubcore_log_err("invalid parameter.\n"); return -1; } + + if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + if (!ubcore_jetty_tjetty_need_advise(jetty, tjetty)) { ubcore_log_err("The transport mode is not rm.\n"); return -1; @@ -821,6 +1144,10 @@ int ubcore_unadvise_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjet ubcore_log_err("invalid parameter.\n"); return -1; } + + if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + if (!ubcore_jetty_tjetty_need_advise(jetty, tjetty)) { ubcore_log_err("The transport mode is not rm.\n"); return -1; @@ -839,7 +1166,9 @@ EXPORT_SYMBOL(ubcore_unadvise_jetty); int ubcore_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, struct ubcore_udata *udata) { + struct ubcore_vtp_param vtp_param; struct ubcore_tp_advice advice; + struct ubcore_device *dev; int ret; if (jetty == NULL || tjetty == NULL || !ubcore_have_tp_ops(jetty->ub_dev)) { @@ -856,48 +1185,115 @@ int ubcore_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, return -1; } - ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); - if (ret != 0) - return ret; + if (tjetty->vtpn != NULL) { + ubcore_log_err("The tjetty, has already connect vtpn, prevent duplicate bind.\n"); + return -1; + } - ret = ubcore_bind_tp(jetty, tjetty, &advice, udata); + dev = jetty->ub_dev; - ubcore_put_advice(&advice); - if (ret != 0) { - ubcore_log_err("Failed to setup tp connection.\n"); - return ret; + if (dev->ops->bind_jetty != NULL && dev->ops->unbind_jetty != NULL) { + ret = dev->ops->bind_jetty(jetty, tjetty, udata); + if (ret != 0) { + ubcore_log_err("Failed to bind jetty"); + return ret; + } } + + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + struct ubcore_vtpn *vtpn; + + ubcore_set_vtp_param(dev, jetty, &tjetty->cfg, &vtp_param); + mutex_lock(&tjetty->lock); + vtpn = ubcore_connect_vtp(dev, &vtp_param); + if (vtpn == NULL) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to setup vtp connection.\n"); + ret = -1; + goto unbind; + } + tjetty->vtpn = vtpn; + mutex_unlock(&tjetty->lock); + } else if (dev->transport_type == UBCORE_TRANSPORT_IB) { + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + ret = ubcore_bind_tp(jetty, tjetty, &advice, udata); + ubcore_put_advice(&advice); + if (ret != 0) { + ubcore_log_err("Failed to setup tp connection.\n"); + goto unbind; + } + } + ubcore_log_info("jetty: %u bind tjetty: %u\n", jetty->id, tjetty->cfg.id.id); jetty->remote_jetty = tjetty; + atomic_inc(&tjetty->use_cnt); return 0; + +unbind: + if (dev->ops->bind_jetty != NULL && dev->ops->unbind_jetty != NULL) + (void)dev->ops->unbind_jetty(jetty); + + return ret; } EXPORT_SYMBOL(ubcore_bind_jetty); -int ubcore_unbind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty) +int ubcore_unbind_jetty(struct ubcore_jetty *jetty) { struct ubcore_tp_advice advice; + struct ubcore_tjetty *tjetty; + struct ubcore_device *dev; int ret; - if (jetty == NULL || tjetty == NULL) { + if (jetty == NULL || jetty->ub_dev == NULL) { ubcore_log_err("invalid parameter.\n"); - return -1; + return -EINVAL; } + tjetty = jetty->remote_jetty; if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || - (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + tjetty == NULL || (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { ubcore_log_err("trans mode is not rc type.\n"); - return -1; + return -EINVAL; } - ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); - if (ret != 0) - return ret; - - ret = ubcore_unbind_tp(jetty, tjetty, &advice); - ubcore_put_advice(&advice); - if (ret != 0) - ubcore_log_err("Failed to destroy jetty tp.\n"); + dev = jetty->ub_dev; + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + if (tjetty->vtpn != NULL) { + mutex_lock(&tjetty->lock); + ret = ubcore_disconnect_vtp(tjetty->vtpn); + if (ret != 0) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to disconnect vtp.\n"); + return ret; + } + tjetty->vtpn = NULL; + mutex_unlock(&tjetty->lock); + } + } else if (dev->transport_type == UBCORE_TRANSPORT_IB) { + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + ret = ubcore_unbind_tp(jetty, tjetty, &advice); + ubcore_put_advice(&advice); + if (ret != 0) { + ubcore_log_err("Failed to destroy jetty tp.\n"); + return ret; + } + } + ubcore_log_info("jetty: %u unbind tjetty: %u\n", jetty->id, tjetty->cfg.id.id); + if (dev->ops->bind_jetty != NULL && dev->ops->unbind_jetty != NULL) { + ret = dev->ops->unbind_jetty(jetty); + if (ret != 0) { + ubcore_log_err("Failed to unbind jetty"); + return ret; + } + } + atomic_dec(&tjetty->use_cnt); jetty->remote_jetty = NULL; - return ret; + return 0; } EXPORT_SYMBOL(ubcore_unbind_jetty); @@ -906,3 +1302,77 @@ struct ubcore_jetty *ubcore_find_jetty(struct ubcore_device *dev, uint32_t jetty return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JETTY], jetty_id, &jetty_id); } EXPORT_SYMBOL(ubcore_find_jetty); + +struct ubcore_jetty_group *ubcore_create_jetty_grp(struct ubcore_device *dev, + struct ubcore_jetty_grp_cfg *cfg, ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jetty_group *jetty_grp; + uint32_t max_jetty_in_jetty_grp = dev->attr.dev_cap.max_jetty_in_jetty_grp; + uint32_t i; + + if (dev == NULL || cfg == NULL || + dev->ops->create_jetty_grp == NULL || dev->ops->delete_jetty_grp == NULL) + return NULL; + + jetty_grp = dev->ops->create_jetty_grp(dev, (struct ubcore_jetty_grp_cfg *)cfg, udata); + if (jetty_grp == NULL) { + ubcore_log_err("failed to create jetty_grp.\n"); + return NULL; + } + + jetty_grp->jetty = kzalloc( + sizeof(struct ubcore_jetty *) * max_jetty_in_jetty_grp, GFP_KERNEL); + if (jetty_grp->jetty == NULL) { + (void)dev->ops->delete_jetty_grp(jetty_grp); + ubcore_log_err("Failed to alloc jetty array.\n"); + return NULL; + } + + jetty_grp->ub_dev = dev; + jetty_grp->jetty_grp_cfg = *cfg; + jetty_grp->jfae_handler = jfae_handler; + jetty_grp->uctx = ubcore_get_uctx(udata); + mutex_init(&jetty_grp->lock); + jetty_grp->jetty_cnt = 0; + for (i = 0; i < dev->attr.dev_cap.max_jetty_in_jetty_grp; i++) + jetty_grp->jetty[i] = NULL; + + return jetty_grp; +} +EXPORT_SYMBOL(ubcore_create_jetty_grp); + +int ubcore_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) +{ + struct ubcore_device *dev; + uint32_t jetty_grp_id; + int ret; + + if (jetty_grp == NULL || jetty_grp->ub_dev == NULL || + jetty_grp->ub_dev->ops->delete_jetty_grp == NULL) + return -EINVAL; + + jetty_grp_id = jetty_grp->id; + dev = jetty_grp->ub_dev; + + mutex_lock(&jetty_grp->lock); + if (jetty_grp->jetty_cnt > 0) { + mutex_unlock(&jetty_grp->lock); + ubcore_log_err("jetty_grp->jetty_cnt: %u.\n", jetty_grp->jetty_cnt); + return -EBUSY; + } + if (jetty_grp->jetty != NULL) { + kfree(jetty_grp->jetty); + jetty_grp->jetty = NULL; + } + mutex_unlock(&jetty_grp->lock); + mutex_destroy(&jetty_grp->lock); + + ret = dev->ops->delete_jetty_grp(jetty_grp); + if (ret < 0) + ubcore_log_err( + "UBEP failed to destroy jetty_grp, jetty_grp_id:%u.\n", jetty_grp_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jetty_grp); diff --git a/drivers/ub/urma/ubcore/ubcore_main.c b/drivers/ub/urma/ubcore/ubcore_main.c index 733aff02e61c..b5e2784f20f6 100644 --- a/drivers/ub/urma/ubcore/ubcore_main.c +++ b/drivers/ub/urma/ubcore/ubcore_main.c @@ -27,14 +27,18 @@ #include #include #include +#include #include "ubcore_cmd.h" +#include "ubcore_uvs_cmd.h" #include "ubcore_log.h" #include "ubcore_netlink.h" #include #include #include #include "ubcore_priv.h" +#include "ubcore_netdev.h" +#include "ubcore_msg.h" /* ubcore create independent cdev and ioctl channels * to handle public work. @@ -42,6 +46,7 @@ #define UBCORE_DEVICE_NAME "ubcore" #define UBCORE_CLASS_NAME "ubus" #define UBCORE_IPV4_MAP_IPV6_PREFIX 0x0000ffff +#define UBCORE_LOCAL_SHUNET (0xfe80000000000000ULL) struct ubcore_ctx { dev_t ubcore_devno; @@ -51,13 +56,11 @@ struct ubcore_ctx { }; static struct ubcore_ctx g_ubcore_ctx; -#define UBCORE_MAX_UASID (1 << 24) -static DECLARE_BITMAP(g_uasid_bitmap, UBCORE_MAX_UASID); -static DEFINE_SPINLOCK(g_uasid_spinlock); struct ubcore_net_addr_node { struct list_head node; struct ubcore_net_addr addr; + uint32_t prefix_len; }; int ubcore_open(struct inode *i_node, struct file *filp) @@ -65,97 +68,8 @@ int ubcore_open(struct inode *i_node, struct file *filp) return 0; } -static uint32_t ubcore_uasid_alloc(uint32_t uasid) -{ - spin_lock(&g_uasid_spinlock); - if (uasid > 0) { - uint32_t ret = 0; - - if (test_bit(uasid, g_uasid_bitmap) == 0) { - set_bit(uasid, g_uasid_bitmap); - spin_unlock(&g_uasid_spinlock); - ret = uasid; - } else { - spin_unlock(&g_uasid_spinlock); - ubcore_log_err("uasid allocation failed.\n"); - return 0; - } - if (ret != 0) - return ret; - } - uasid = (uint32_t)find_first_zero_bit(g_uasid_bitmap, UBCORE_MAX_UASID); - if (uasid >= UBCORE_MAX_UASID) { - ubcore_log_err("uasid allocation failed.\n"); - spin_unlock(&g_uasid_spinlock); - return 0; - } - set_bit(uasid, g_uasid_bitmap); - spin_unlock(&g_uasid_spinlock); - return uasid; -} - -static int ubcore_uasid_free(uint32_t uasid) -{ - spin_lock(&g_uasid_spinlock); - if (uasid == 0) { - spin_unlock(&g_uasid_spinlock); - ubcore_log_err("uasid is zero.\n"); - return -EINVAL; - } - if (test_bit(uasid, g_uasid_bitmap) == false) { - spin_unlock(&g_uasid_spinlock); - ubcore_log_err("uasid is used.\n"); - return -EINVAL; - } - clear_bit(uasid, g_uasid_bitmap); - spin_unlock(&g_uasid_spinlock); - return 0; -} - -static int ubcore_cmd_set_uasid(struct ubcore_cmd_hdr *hdr) -{ - struct ubcore_cmd_set_uasid arg; - int ret; - - ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, - sizeof(struct ubcore_cmd_set_uasid)); - if (ret != 0) - return -EPERM; - - arg.out.uasid = ubcore_uasid_alloc(arg.in.uasid); - if (arg.out.uasid == 0) { - ubcore_log_err("set uasid allocation failed, in_uasid: %u.\n", arg.in.uasid); - return -ENOMEM; - } - ubcore_log_info("set uasid allocation success, uasid: %u.\n", arg.out.uasid); - - ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, - sizeof(struct ubcore_cmd_set_uasid)); - if (ret != 0) - return -EPERM; - - return 0; -} - -static int ubcore_cmd_put_uasid(struct ubcore_cmd_hdr *hdr) -{ - struct ubcore_cmd_put_uasid arg; - int ret; - - ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, - sizeof(struct ubcore_cmd_put_uasid)); - if (ret != 0) - return -EPERM; - - if (ubcore_uasid_free(arg.in.uasid) != 0) - return -EINVAL; - - ubcore_log_info("put uasid free success, uasid: %u.\n", arg.in.uasid); - return 0; -} - -static void ubcore_set_utp_cfg(struct ubcore_cmd_set_utp *arg, struct ubcore_utp_attr *attr, - union ubcore_utp_attr_mask *mask) +static void ubcore_set_utp_cfg(struct ubcore_cmd_set_utp *arg, + struct ubcore_utp_attr *attr, union ubcore_utp_attr_mask *mask) { attr->flag.bs.spray_en = arg->in.spray_en; attr->data_udp_start = arg->in.data_udp_start; @@ -167,12 +81,10 @@ static void ubcore_set_utp_cfg(struct ubcore_cmd_set_utp *arg, struct ubcore_utp static int ubcore_cmd_set_utp(struct ubcore_cmd_hdr *hdr) { - enum ubcore_transport_type trans_type; - union ubcore_utp_attr_mask mask = { 0 }; + union ubcore_utp_attr_mask mask = {0}; struct ubcore_cmd_set_utp arg; struct ubcore_utp_attr attr; struct ubcore_device *dev; - union ubcore_eid eid; int ret; ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, @@ -180,9 +92,7 @@ static int ubcore_cmd_set_utp(struct ubcore_cmd_hdr *hdr) if (ret != 0) return -EPERM; - (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); - trans_type = arg.in.transport_type; - dev = ubcore_find_device(&eid, trans_type); + dev = ubcore_find_device_with_name(arg.in.dev_name); if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); @@ -190,7 +100,7 @@ static int ubcore_cmd_set_utp(struct ubcore_cmd_hdr *hdr) } ubcore_set_utp_cfg(&arg, &attr, &mask); - if (ubcore_config_utp(dev, &eid, &attr, mask) != 0) { + if (ubcore_config_utp(dev, arg.in.utp_id, &attr, mask) != 0) { ubcore_log_err("config utp failed.\n"); ubcore_put_device(dev); return -EPERM; @@ -201,10 +111,11 @@ static int ubcore_cmd_set_utp(struct ubcore_cmd_hdr *hdr) static int ubcore_cmd_show_utp(struct ubcore_cmd_hdr *hdr) { - enum ubcore_transport_type trans_type; + struct ubcore_res_utp_val utp_info = {0}; + struct ubcore_res_key key = {0}; + struct ubcore_res_val val = {0}; struct ubcore_cmd_show_utp arg; struct ubcore_device *dev; - union ubcore_eid eid; int ret; ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, @@ -212,33 +123,37 @@ static int ubcore_cmd_show_utp(struct ubcore_cmd_hdr *hdr) if (ret != 0) return -EPERM; - (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); - trans_type = arg.in.transport_type; - - dev = ubcore_find_device(&eid, trans_type); + dev = ubcore_find_device_with_name(arg.in.dev_name); if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); return -EINVAL; } - if (ubcore_show_utp(dev, &eid) != 0) { - ubcore_log_err("show utp failed.\n"); + + key.type = UBCORE_RES_KEY_UTP; + key.key = arg.in.utp_id; + val.addr = (uint64_t)&utp_info; + val.len = (uint32_t)sizeof(struct ubcore_res_utp_val); + if (dev->ops != NULL && dev->ops->query_res != NULL && + dev->ops->query_res(dev, &key, &val) != 0) { ubcore_put_device(dev); - return -EPERM; + ubcore_log_err("failed to query res.\n"); + return -1; } + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)arg.out.addr, &utp_info, + sizeof(struct ubcore_res_utp_val)); + ubcore_put_device(dev); - return 0; + return ret; } static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) { - enum ubcore_transport_type trans_type; - struct ubcore_cmd_query_stats arg = { 0 }; + struct ubcore_cmd_query_stats arg = {0}; struct ubcore_stats_com_val com_val; - struct ubcore_stats_key key = { 0 }; + struct ubcore_stats_key key = {0}; struct ubcore_stats_val val; struct ubcore_device *dev; - union ubcore_eid eid; int ret; ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, @@ -246,9 +161,7 @@ static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) if (ret != 0) return ret; - (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); - trans_type = (enum ubcore_transport_type)arg.in.tp_type; - dev = ubcore_find_device(&eid, trans_type); + dev = ubcore_find_device_with_name(arg.in.dev_name); if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); @@ -257,8 +170,8 @@ static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) key.type = (uint8_t)arg.in.type; key.key = arg.in.key; - val.addr = (uintptr_t)&com_val; - val.len = sizeof(struct ubcore_stats_com_val); + val.addr = (uint64_t)&com_val; + val.len = (uint32_t)sizeof(struct ubcore_stats_com_val); ret = ubcore_query_stats(dev, &key, &val); if (ret != 0) { @@ -272,11 +185,177 @@ static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) sizeof(struct ubcore_cmd_query_stats)); } +static int ubcore_cmd_add_ueid(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_add_ueid arg; + struct ubcore_device *dev; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_add_ueid)); + if (ret != 0) + return -EPERM; + + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev_name: %s failed.\n", arg.in.dev_name); + return -EPERM; + } + if (dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_1 || dev->dynamic_eid) { + ubcore_log_err("The dynamic mode of pf does not support eid change\n"); + ubcore_put_device(dev); + return -EPERM; + } + if (dev->attr.tp_maintainer && ubcore_get_netlink_valid() == false) { + ubcore_put_device(dev); + return -EPERM; + } + if (ubcore_msg_discover_eid(dev, arg.in.eid_index, UBCORE_MSG_ALLOC_EID) != 0) { + ubcore_put_device(dev); + return -EPERM; + } + + ubcore_put_device(dev); + return 0; +} + +static int ubcore_cmd_del_ueid(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_add_ueid arg; + struct ubcore_device *dev; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_add_ueid)); + if (ret != 0) + return -EPERM; + + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev_name: %s failed.\n", arg.in.dev_name); + return -EPERM; + } + if (dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_1 || dev->dynamic_eid) { + ubcore_put_device(dev); + ubcore_log_err("The dynamic mode of pf does not support eid change\n"); + return -EPERM; + } + if (dev->attr.tp_maintainer && ubcore_get_netlink_valid() == false) { + ubcore_put_device(dev); + return -EPERM; + } + if (ubcore_msg_discover_eid(dev, arg.in.eid_index, UBCORE_MSG_DEALLOC_EID) != 0) { + ubcore_put_device(dev); + return -EPERM; + } + + ubcore_put_device(dev); + return 0; +} + +static void ubcore_update_pattern1_eid(struct ubcore_device *dev, + union ubcore_eid *eid, bool is_add) +{ + struct ubcore_ueid_cfg cfg; + uint32_t eid_idx = 0; + + if (ubcore_update_eidtbl_by_eid(dev, eid, &eid_idx, is_add) != 0) + return; + + cfg.eid = *eid; + cfg.eid_index = eid_idx; + cfg.upi = 0; + if (is_add) + (void)ubcore_add_ueid(dev, (uint16_t)UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + else + (void)ubcore_delete_ueid(dev, (uint16_t)UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); +} + +static void ubcore_update_pattern3_eid(struct ubcore_device *dev, + union ubcore_eid *eid, bool is_add) +{ + uint32_t pattern3_upi = 0; + struct ubcore_ueid_cfg cfg; + uint32_t eid_idx = 0; + + if (ubcore_update_eidtbl_by_eid(dev, eid, &eid_idx, is_add) != 0) + return; + + if (dev->attr.virtualization || + ubcore_find_upi_with_dev_name(dev->dev_name, &pattern3_upi) == NULL) + return; + + if (pattern3_upi != (uint32_t)UCBORE_INVALID_UPI) { + cfg.eid = *eid; + cfg.eid_index = eid_idx; + cfg.upi = pattern3_upi; + if (is_add) + (void)ubcore_add_ueid(dev, + (uint16_t)UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + else + (void)ubcore_delete_ueid(dev, + (uint16_t)UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + } else { + ubcore_log_err("upi not configured\n"); + } +} + +static int ubcore_cmd_set_eid_mode(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_set_eid_mode arg; + struct ubcore_event event; + struct ubcore_device *dev; + union ubcore_eid eid = {0}; + uint32_t i; + int ret; + + ret = ubcore_copy_from_user(&arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct ubcore_cmd_set_eid_mode)); + if (ret != 0) + return -EPERM; + + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev_name: %s failed.\n", arg.in.dev_name); + return -EPERM; + } + if (dev->dynamic_eid == arg.in.eid_mode) { + ubcore_put_device(dev); + return 0; + } + + if (dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_1 && arg.in.eid_mode == 0) { + ubcore_put_device(dev); + ubcore_log_err("pattern1 not support static mode"); + return -1; + } + + /* change eid mode, need to flush eids */ + event.ub_dev = dev; + event.event_type = UBCORE_EVENT_EID_CHANGE; + for (i = 0; i < dev->attr.max_eid_cnt; i++) { + if (dev->eid_table.eid_entries[i].valid == true) { + eid = dev->eid_table.eid_entries[i].eid; + if (dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_1) + ubcore_update_pattern1_eid(dev, &eid, false); + else + ubcore_update_pattern3_eid(dev, &eid, false); + event.element.eid_idx = i; + ubcore_dispatch_async_event(&event); + } + } + dev->dynamic_eid = arg.in.eid_mode; + ubcore_put_device(dev); + return 0; +} + static uint32_t ubcore_get_query_res_len(uint32_t type) { switch (type) { case UBCORE_RES_KEY_UPI: return (uint32_t)sizeof(struct ubcore_res_upi_val); + case UBCORE_RES_KEY_VTP: + return (uint32_t)sizeof(struct ubcore_res_vtp_val); case UBCORE_RES_KEY_TP: return (uint32_t)sizeof(struct ubcore_res_tp_val); case UBCORE_RES_KEY_TPG: @@ -293,6 +372,8 @@ static uint32_t ubcore_get_query_res_len(uint32_t type) return (uint32_t)sizeof(struct ubcore_res_jetty_group_val); case UBCORE_RES_KEY_JFC: return (uint32_t)sizeof(struct ubcore_res_jfc_val); + case UBCORE_RES_KEY_RC: + return (uint32_t)sizeof(struct ubcore_res_rc_val); case UBCORE_RES_KEY_SEG: return (uint32_t)sizeof(struct ubcore_res_seg_val); case UBCORE_RES_KEY_URMA_DEV: @@ -329,6 +410,14 @@ static void ubcore_dealloc_res_dev(struct ubcore_res_dev_val *ubcore_addr) vfree(ubcore_addr->jetty_group_list); ubcore_addr->jetty_group_list = NULL; } + if (ubcore_addr->rc_list != NULL) { + vfree(ubcore_addr->rc_list); + ubcore_addr->rc_list = NULL; + } + if (ubcore_addr->vtp_list != NULL) { + vfree(ubcore_addr->vtp_list); + ubcore_addr->vtp_list = NULL; + } if (ubcore_addr->tp_list != NULL) { vfree(ubcore_addr->tp_list); ubcore_addr->tp_list = NULL; @@ -369,9 +458,17 @@ static int ubcore_fill_res_addr(struct ubcore_res_dev_val *ubcore_addr) if (ubcore_addr->jetty_group_list == NULL) goto free_jetty_list; + ubcore_addr->rc_list = vmalloc(sizeof(uint32_t) * ubcore_addr->rc_cnt); + if (ubcore_addr->rc_list == NULL) + goto free_jetty_group_list; + + ubcore_addr->vtp_list = vmalloc(sizeof(uint32_t) * ubcore_addr->vtp_cnt); + if (ubcore_addr->vtp_list == NULL) + goto free_rc_list; + ubcore_addr->tp_list = vmalloc(sizeof(uint32_t) * ubcore_addr->tp_cnt); if (ubcore_addr->tp_list == NULL) - goto free_jetty_group_list; + goto free_vtp_list; ubcore_addr->tpg_list = vmalloc(sizeof(uint32_t) * ubcore_addr->tpg_cnt); if (ubcore_addr->tpg_list == NULL) @@ -386,6 +483,10 @@ static int ubcore_fill_res_addr(struct ubcore_res_dev_val *ubcore_addr) vfree(ubcore_addr->tpg_list); free_tp_list: vfree(ubcore_addr->tp_list); +free_vtp_list: + vfree(ubcore_addr->vtp_list); +free_rc_list: + vfree(ubcore_addr->rc_list); free_jetty_group_list: vfree(ubcore_addr->jetty_group_list); free_jetty_list: @@ -407,57 +508,71 @@ static int ubcore_fill_user_res_dev(struct ubcore_res_dev_val *dev_val, int ret; dev_val->seg_cnt = ubcore_addr->seg_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->seg_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->seg_list, ubcore_addr->seg_list, dev_val->seg_cnt * sizeof(struct ubcore_seg_info)); if (ret != 0) return ret; dev_val->jfs_cnt = ubcore_addr->jfs_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jfs_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->jfs_list, ubcore_addr->jfs_list, dev_val->jfs_cnt * sizeof(uint32_t)); if (ret != 0) return ret; dev_val->jfr_cnt = ubcore_addr->jfr_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jfr_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->jfr_list, ubcore_addr->jfr_list, dev_val->jfr_cnt * sizeof(uint32_t)); if (ret != 0) return ret; dev_val->jfc_cnt = ubcore_addr->jfc_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jfc_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->jfc_list, ubcore_addr->jfc_list, dev_val->jfc_cnt * sizeof(uint32_t)); if (ret != 0) return ret; dev_val->jetty_cnt = ubcore_addr->jetty_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jetty_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->jetty_list, ubcore_addr->jetty_list, dev_val->jetty_cnt * sizeof(uint32_t)); if (ret != 0) return ret; dev_val->jetty_group_cnt = ubcore_addr->jetty_group_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jetty_group_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->jetty_group_list, ubcore_addr->jetty_group_list, dev_val->jetty_group_cnt * sizeof(uint32_t)); if (ret != 0) return ret; + dev_val->rc_cnt = ubcore_addr->rc_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->rc_list, + ubcore_addr->rc_list, + dev_val->rc_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->vtp_cnt = ubcore_addr->vtp_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->vtp_list, + ubcore_addr->vtp_list, + dev_val->vtp_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + dev_val->tp_cnt = ubcore_addr->tp_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->tp_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->tp_list, ubcore_addr->tp_list, dev_val->tp_cnt * sizeof(uint32_t)); if (ret != 0) return ret; dev_val->tpg_cnt = ubcore_addr->tpg_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->tpg_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->tpg_list, ubcore_addr->tpg_list, dev_val->tpg_cnt * sizeof(uint32_t)); if (ret != 0) return ret; dev_val->utp_cnt = ubcore_addr->utp_cnt; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->utp_list, + ret = ubcore_copy_to_user((void __user *)(uintptr_t)(uint64_t)dev_val->utp_list, ubcore_addr->utp_list, dev_val->utp_cnt * sizeof(uint32_t)); if (ret != 0) return ret; @@ -465,23 +580,22 @@ static int ubcore_fill_user_res_dev(struct ubcore_res_dev_val *dev_val, return 0; } -static int ubcore_query_res_dev(const struct ubcore_device *dev, struct ubcore_res_key *key, +static int ubcore_query_res_dev(struct ubcore_device *dev, struct ubcore_res_key *key, struct ubcore_res_dev_val *dev_val) { struct ubcore_res_dev_val ubcore_addr = { 0 }; struct ubcore_res_val val = { 0 }; int ret = 0; - (void)memcpy(&ubcore_addr, dev_val, - sizeof(struct ubcore_res_dev_val)); // save + (void)memcpy(&ubcore_addr, dev_val, sizeof(struct ubcore_res_dev_val)); // save if (ubcore_fill_res_addr(&ubcore_addr) != 0) { ubcore_log_err("Failed to fill dev dev_val.\n"); return -ENOMEM; } - val.addr = (uintptr_t)&ubcore_addr; - val.len = sizeof(struct ubcore_res_dev_val); + val.addr = (uint64_t)&ubcore_addr; + val.len = (uint32_t)sizeof(struct ubcore_res_dev_val); ret = ubcore_query_resource(dev, key, &val); if (ret != 0) @@ -493,48 +607,131 @@ static int ubcore_query_res_dev(const struct ubcore_device *dev, struct ubcore_r return ret; } -static int ubcore_query_res_arg(const struct ubcore_device *dev, struct ubcore_cmd_query_res *arg, - uint32_t res_len) +static void ubcore_dealloc_res_tp_list(uint64_t user_tp_list, struct ubcore_res_val *val) { - struct ubcore_res_key key = { 0 }; - struct ubcore_res_val val = { 0 }; - void *addr; + struct ubcore_res_tpg_val *tpg = (struct ubcore_res_tpg_val *)val->addr; + + if (ubcore_copy_to_user((void __user *)(uintptr_t)user_tp_list, + tpg->tp_list, sizeof(uint32_t) * tpg->tp_cnt) != 0) + ubcore_log_err("ubcore_copy_to_user failed.\n"); + + kfree(tpg->tp_list); +} + +static int ubcore_alloc_res_tp_list(struct ubcore_res_val *val) +{ + struct ubcore_res_tpg_val *tpg; + + tpg = (struct ubcore_res_tpg_val *)val->addr; + tpg->tp_list = kzalloc(sizeof(uint32_t) * UBCORE_MAX_TP_CNT_IN_GRP, GFP_KERNEL); + if (tpg->tp_list == NULL) + return -ENOMEM; + + return 0; +} + +static void ubcore_dealloc_res_jetty_list(uint64_t user_jetty_list, struct ubcore_res_val *val) +{ + struct ubcore_res_jetty_group_val *tpg; + + tpg = (struct ubcore_res_jetty_group_val *)val->addr; + + if (ubcore_copy_to_user((void __user *)(uintptr_t)user_jetty_list, tpg->jetty_list, + sizeof(uint32_t) * tpg->jetty_cnt) != 0) + ubcore_log_err("ubcore_copy_to_user failed.\n"); + + kfree(tpg->jetty_list); +} + +static int ubcore_alloc_res_jetty_list(struct ubcore_device *dev, struct ubcore_res_val *val) +{ + struct ubcore_res_jetty_group_val *tpg; + + tpg = (struct ubcore_res_jetty_group_val *)val->addr; + tpg->jetty_list = kcalloc(1, sizeof(uint32_t) * dev->attr.dev_cap.max_jetty_in_jetty_grp, + GFP_KERNEL); + if (tpg->jetty_list == NULL) + return -ENOMEM; + + return 0; +} + +static int ubcore_query_res_arg(struct ubcore_device *dev, struct ubcore_cmd_query_res *arg, + uint32_t res_len) +{ + struct ubcore_res_key key = {0}; + struct ubcore_res_val val = {0}; + uint64_t user_tp_list_addr = 0; // to saves and restores user-mode addresses + uint64_t user_jetty_list_addr = 0; // to saves and restores user-mode addresses + void *k_res_val; int ret; - addr = kzalloc(res_len, GFP_KERNEL); - if (addr == NULL) + k_res_val = kzalloc(res_len, GFP_KERNEL); + if (k_res_val == NULL) return -1; - ret = ubcore_copy_from_user(addr, (void __user *)(uintptr_t)arg->out.addr, res_len); + ret = ubcore_copy_from_user(k_res_val, (void __user *)(uintptr_t)arg->out.addr, res_len); if (ret != 0) goto kfree_addr; + if (arg->in.type == (uint32_t)UBCORE_RES_KEY_VTP && dev->attr.virtualization == true) { + ubcore_log_warn("FE device do not support query VTP, dev: %s, type: %u.\n", + dev->dev_name, arg->in.type); + goto kfree_addr; + } key.type = (uint8_t)arg->in.type; key.key = arg->in.key; - val.addr = (uintptr_t)addr; + key.key_ext = arg->in.key_ext; + key.key_cnt = arg->in.key_cnt; + val.addr = (uint64_t)k_res_val; val.len = res_len; + if (arg->in.type == UBCORE_RES_KEY_TPG) { + user_tp_list_addr = + (uint64_t)(((struct ubcore_res_tpg_val *)k_res_val)->tp_list); // save + if (ubcore_alloc_res_tp_list(&val) != 0) + goto kfree_addr; + } + + if (arg->in.type == UBCORE_RES_KEY_JETTY_GROUP) { + // save + user_jetty_list_addr = + (uint64_t)(((struct ubcore_res_jetty_group_val *)k_res_val)->jetty_list); + if (ubcore_alloc_res_jetty_list(dev, &val) != 0) + goto kfree_addr; + } + if (arg->in.type == UBCORE_RES_KEY_URMA_DEV) - ret = ubcore_query_res_dev(dev, &key, (struct ubcore_res_dev_val *)addr); + ret = ubcore_query_res_dev(dev, &key, (struct ubcore_res_dev_val *)k_res_val); else ret = ubcore_query_resource(dev, &key, &val); + if (arg->in.type == UBCORE_RES_KEY_TPG) { + ubcore_dealloc_res_tp_list(user_tp_list_addr, &val); + /* recover after use */ + ((struct ubcore_res_tpg_val *)k_res_val)->tp_list = (uint32_t *)user_tp_list_addr; + } + + if (arg->in.type == UBCORE_RES_KEY_JETTY_GROUP) { + ubcore_dealloc_res_jetty_list(user_jetty_list_addr, &val); + ((struct ubcore_res_jetty_group_val *)k_res_val)->jetty_list = + (uint32_t *)user_jetty_list_addr; // recover after use + } + if (ret != 0) goto kfree_addr; - ret = ubcore_copy_to_user((void __user *)(uintptr_t)arg->out.addr, addr, res_len); + ret = ubcore_copy_to_user((void __user *)(uintptr_t)arg->out.addr, k_res_val, res_len); kfree_addr: - kfree(addr); + kfree(k_res_val); return ret; } static int ubcore_cmd_query_res(struct ubcore_cmd_hdr *hdr) { - enum ubcore_transport_type trans_type; - struct ubcore_cmd_query_res arg = { 0 }; + struct ubcore_cmd_query_res arg = {0}; struct ubcore_device *dev; - union ubcore_eid eid; uint32_t res_len; int ret; @@ -549,10 +746,7 @@ static int ubcore_cmd_query_res(struct ubcore_cmd_hdr *hdr) (uint32_t)arg.in.type, res_len, arg.out.len); return -1; } - (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); - trans_type = (enum ubcore_transport_type)arg.in.tp_type; - - dev = ubcore_find_device(&eid, trans_type); + dev = ubcore_find_device_with_name(arg.in.dev_name); if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); @@ -562,7 +756,7 @@ static int ubcore_cmd_query_res(struct ubcore_cmd_hdr *hdr) ret = ubcore_query_res_arg(dev, &arg, res_len); if (ret != 0) { ubcore_put_device(dev); - ubcore_log_err("Failed to query res by arg, tp_type: %d.\n", (int)trans_type); + ubcore_log_err("Failed to query res by arg\n"); return -1; } @@ -574,10 +768,6 @@ static int ubcore_cmd_query_res(struct ubcore_cmd_hdr *hdr) static int ubcore_cmd_parse(struct ubcore_cmd_hdr *hdr) { switch (hdr->command) { - case UBCORE_CMD_SET_UASID: - return ubcore_cmd_set_uasid(hdr); - case UBCORE_CMD_PUT_UASID: - return ubcore_cmd_put_uasid(hdr); case UBCORE_CMD_SET_UTP: return ubcore_cmd_set_utp(hdr); case UBCORE_CMD_SHOW_UTP: @@ -586,6 +776,12 @@ static int ubcore_cmd_parse(struct ubcore_cmd_hdr *hdr) return ubcore_cmd_query_stats(hdr); case UBCORE_CMD_QUERY_RES: return ubcore_cmd_query_res(hdr); + case UBCORE_CMD_ADD_EID: + return ubcore_cmd_add_ueid(hdr); + case UBCORE_CMD_DEL_EID: + return ubcore_cmd_del_ueid(hdr); + case UBCORE_CMD_SET_EID_MODE: + return ubcore_cmd_set_eid_mode(hdr); default: ubcore_log_err("bad ubcore command: %d.\n", (int)hdr->command); return -EINVAL; @@ -603,11 +799,21 @@ static long ubcore_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ubcore_log_err("length of ioctl input parameter is out of range.\n"); return -EINVAL; } - } else { - ubcore_log_err("bad ioctl command.\n"); - return -ENOIOCTLCMD; + + return ubcore_cmd_parse(&hdr); + } + + if (cmd == UBCORE_UVS_CMD) { + ret = ubcore_copy_from_user(&hdr, (void *)arg, sizeof(struct ubcore_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBCORE_MAX_CMD_SIZE)) { + ubcore_log_err("length of ioctl input parameter is out of range.\n"); + return -EINVAL; + } + return ubcore_uvs_cmd_parse(&hdr); } - return ubcore_cmd_parse(&hdr); + + ubcore_log_err("bad ioctl command.\n"); + return -ENOIOCTLCMD; } static int ubcore_close(struct inode *i_node, struct file *filp) @@ -680,68 +886,161 @@ static void ubcore_unregister_sysfs(void) static void ubcore_ipv4_to_netaddr(struct ubcore_net_addr *netaddr, __be32 ipv4) { - netaddr->net_addr.in4.resv1 = 0; - netaddr->net_addr.in4.resv2 = htonl(UBCORE_IPV4_MAP_IPV6_PREFIX); + netaddr->net_addr.in4.reserved1 = 0; + netaddr->net_addr.in4.reserved2 = htonl(UBCORE_IPV4_MAP_IPV6_PREFIX); netaddr->net_addr.in4.addr = ipv4; } -static void ubcore_set_net_addr(struct ubcore_device *dev, const struct ubcore_net_addr *netaddr) +static void ubcore_sip_init(struct ubcore_sip_info *sip, struct ubcore_device *pf_dev, + const struct ubcore_net_addr *netaddr, uint32_t port_id, uint32_t prefix_len, uint32_t mtu) { - if (dev->ops->set_net_addr != NULL && dev->ops->set_net_addr(dev, netaddr) != 0) + (void)memcpy(sip->dev_name, pf_dev->dev_name, UBCORE_MAX_DEV_NAME); + (void)memcpy(&sip->addr, netaddr, sizeof(struct ubcore_net_addr)); + sip->port_id[0] = (uint8_t)port_id; + sip->port_cnt = 1; + sip->prefix_len = prefix_len; + sip->mtu = mtu; +} + +static void ubcore_add_net_addr(struct ubcore_device *tpf_dev, struct ubcore_device *pf_dev, + struct ubcore_net_addr *netaddr, struct net_device *netdev, uint32_t prefix_len) +{ + struct ubcore_sip_info sip = {0}; + uint32_t index; + uint32_t port_id; + int ret; + + /* get driver set nedev port */ + port_id = ubcore_find_port_netdev(pf_dev, netdev); + + ubcore_sip_init(&sip, pf_dev, + netaddr, port_id, prefix_len, (uint32_t)netdev->mtu); + + ret = ubcore_lookup_sip_idx(&sip, &index); + if (ret == 0) { + ubcore_log_err("sip already exists\n"); + return; + } + index = ubcore_sip_idx_alloc(0); + + if (tpf_dev->ops->add_net_addr != NULL && + tpf_dev->ops->add_net_addr(tpf_dev, netaddr, index) != 0) ubcore_log_err("Failed to set net addr"); + + /* add net_addr entry, record idx -> netaddr mapping */ + (void)ubcore_add_sip_entry(&sip, index); + + /* nodify uvs add sip info */ + if (ubcore_get_netlink_valid() == true) + (void)ubcore_notify_uvs_add_sip(tpf_dev, &sip, index); + + /* The ubcore sip table and up/down events are updated synchronously, and the uvs + * is abnormally disconnected. After waiting for the pull-up, + * the sip table in the kernel state is actively synchronized. + */ } -static void ubcore_unset_net_addr(struct ubcore_device *dev, const struct ubcore_net_addr *netaddr) +static void ubcore_delete_net_addr(struct ubcore_device *tpf_dev, struct ubcore_device *pf_dev, + struct ubcore_net_addr *netaddr, struct net_device *netdev, uint32_t prefix_len) { - if (dev->ops->unset_net_addr != NULL && dev->ops->unset_net_addr(dev, netaddr) != 0) - ubcore_log_err("Failed to unset net addr"); + struct ubcore_sip_info sip = {0}; + uint32_t port_id; + uint32_t index; + + port_id = ubcore_find_port_netdev(pf_dev, netdev); + ubcore_sip_init(&sip, pf_dev, + netaddr, port_id, prefix_len, (uint32_t)netdev->mtu); + if (ubcore_lookup_sip_idx(&sip, &index) != 0) + return; + + if (tpf_dev->ops->delete_net_addr != NULL && + tpf_dev->ops->delete_net_addr(tpf_dev, index) != 0) + ubcore_log_err("Failed to delete net addr"); + + (void)ubcore_del_sip_entry(index); + (void)ubcore_sip_idx_free(index); + /* nodify uvs delete sip info */ + if (ubcore_get_netlink_valid() == true) + (void)ubcore_notify_uvs_del_sip(tpf_dev, &sip, index); + + /* The ubcore sip table and up/down events are updated synchronously, + * and the uvs is abnormally disconnected. After waiting for the pull-up, + * the sip table in the kernel state is actively synchronized + */ } -static void ubcore_update_eid(struct ubcore_device *dev, struct ubcore_net_addr *netaddr) +static void ubcore_update_eid(struct ubcore_device *dev, + struct ubcore_net_addr *netaddr, bool is_add) { + union ubcore_eid *eid; + if (dev->transport_type <= UBCORE_TRANSPORT_INVALID || - dev->transport_type >= UBCORE_TRANSPORT_MAX) + dev->transport_type >= UBCORE_TRANSPORT_MAX) return; - if (ubcore_set_eid(dev, (union ubcore_eid *)(void *)&netaddr->net_addr) != 0) - ubcore_log_warn("Failed to update eid"); + if (!dev->dynamic_eid) { + ubcore_log_err("static mode does not allow modify of eid"); + return; + } + eid = (union ubcore_eid *)(void *)&netaddr->net_addr; + if (dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_1) + ubcore_update_pattern1_eid(dev, eid, is_add); + else + ubcore_update_pattern3_eid(dev, eid, is_add); } static int ubcore_handle_inetaddr_event(struct net_device *netdev, unsigned long event, - struct ubcore_net_addr *netaddr) + struct ubcore_net_addr *netaddr, uint32_t prefix_len) { + struct net_device *real_netdev; + struct ubcore_net_addr real_netaddr; struct ubcore_device **devices; - struct ubcore_device *dev; uint32_t num_devices = 0; + struct ubcore_device *tpf_dev; + struct ubcore_device *dev; + uint32_t i; if (netdev == NULL || netdev->reg_state >= NETREG_UNREGISTERING) return NOTIFY_DONE; - devices = ubcore_get_devices_from_netdev(netdev, &num_devices); - if (devices == NULL) + if (is_vlan_dev(netdev)) { + real_netdev = vlan_dev_real_dev(netdev); + (void)ubcore_fill_netaddr_macvlan(&real_netaddr, real_netdev, netaddr->type); + } else { + real_netdev = netdev; + real_netaddr = *netaddr; + } + tpf_dev = ubcore_find_tpf_device(&real_netaddr, UBCORE_TRANSPORT_UB); + devices = ubcore_get_devices_from_netdev(real_netdev, &num_devices); + if (devices == NULL) { + ubcore_put_device(tpf_dev); return NOTIFY_DONE; - + } for (i = 0; i < num_devices; i++) { dev = devices[i]; switch (event) { case NETDEV_UP: - ubcore_set_net_addr(dev, netaddr); - ubcore_update_eid(dev, netaddr); + if (tpf_dev) + ubcore_add_net_addr(tpf_dev, dev, netaddr, netdev, prefix_len); + ubcore_update_eid(dev, netaddr, true); break; case NETDEV_DOWN: - ubcore_unset_net_addr(dev, netaddr); + if (tpf_dev) + ubcore_delete_net_addr(tpf_dev, dev, netaddr, netdev, prefix_len); + ubcore_update_eid(dev, netaddr, false); break; default: break; } } - + ubcore_put_device(tpf_dev); ubcore_put_devices(devices, num_devices); return NOTIFY_OK; } -static int ubcore_ipv6_notifier_call(struct notifier_block *nb, unsigned long event, void *arg) +static int ubcore_ipv6_notifier_call(struct notifier_block *nb, + unsigned long event, void *arg) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; struct ubcore_net_addr netaddr; @@ -751,11 +1050,18 @@ static int ubcore_ipv6_notifier_call(struct notifier_block *nb, unsigned long ev return NOTIFY_DONE; netdev = ifa->idev->dev; - ubcore_log_info("Get a ipv6 event %s from netdev %s%s ip %pI6c", netdev_cmd_to_name(event), - netdev_name(netdev), netdev_reg_state(netdev), &ifa->addr); + ubcore_log_info("Get a ipv6 event %s from netdev %s%s ip %pI6c prefixlen %u", + netdev_cmd_to_name(event), netdev_name(netdev), netdev_reg_state(netdev), + &ifa->addr, ifa->prefix_len); + memset(&netaddr, 0, sizeof(struct ubcore_net_addr)); (void)memcpy(&netaddr.net_addr, &ifa->addr, sizeof(struct in6_addr)); - return ubcore_handle_inetaddr_event(netdev, event, &netaddr); + (void)ubcore_fill_netaddr_macvlan(&netaddr, netdev, UBCORE_NET_ADDR_TYPE_IPV6); + + if (netaddr.net_addr.in6.subnet_prefix == cpu_to_be64(UBCORE_LOCAL_SHUNET)) + /* When mtu changes, intercept the ipv6 address up/down that triggers fe80 */ + return NOTIFY_DONE; + return ubcore_handle_inetaddr_event(netdev, event, &netaddr, ifa->prefix_len); } static int ubcore_ipv4_notifier_call(struct notifier_block *nb, unsigned long event, void *arg) @@ -768,25 +1074,33 @@ static int ubcore_ipv4_notifier_call(struct notifier_block *nb, unsigned long ev return NOTIFY_DONE; netdev = ifa->ifa_dev->dev; - ubcore_log_info("Get a ipv4 event %s netdev %s%s ip %pI4", netdev_cmd_to_name(event), - netdev_name(netdev), netdev_reg_state(netdev), &ifa->ifa_address); + ubcore_log_info("Get a ipv4 event %s netdev %s%s ip %pI4 prefixlen %hhu", + netdev_cmd_to_name(event), netdev_name(netdev), netdev_reg_state(netdev), + &ifa->ifa_address, ifa->ifa_prefixlen); memset(&netaddr, 0, sizeof(struct ubcore_net_addr)); ubcore_ipv4_to_netaddr(&netaddr, ifa->ifa_address); - return ubcore_handle_inetaddr_event(netdev, event, &netaddr); + (void)ubcore_fill_netaddr_macvlan(&netaddr, netdev, UBCORE_NET_ADDR_TYPE_IPV4); + return ubcore_handle_inetaddr_event(netdev, event, &netaddr, (uint32_t)ifa->ifa_prefixlen); } -static void ubcore_add_ipv4_entry(struct list_head *list, __be32 ipv4) +static void ubcore_add_ipv4_entry(struct list_head *list, __be32 ipv4, uint32_t prefix_len, + struct net_device *netdev) { struct ubcore_net_addr_node *na_entry; na_entry = kzalloc(sizeof(struct ubcore_net_addr_node), GFP_ATOMIC); + if (na_entry == NULL) + return; ubcore_ipv4_to_netaddr(&na_entry->addr, ipv4); + (void)ubcore_fill_netaddr_macvlan(&na_entry->addr, netdev, UBCORE_NET_ADDR_TYPE_IPV4); + na_entry->prefix_len = prefix_len; list_add_tail(&na_entry->node, list); } -static void ubcore_add_ipv6_entry(struct list_head *list, const struct in6_addr *ipv6) +static void ubcore_add_ipv6_entry(struct list_head *list, struct in6_addr *ipv6, + uint32_t prefix_len, struct net_device *netdev) { struct ubcore_net_addr_node *na_entry; @@ -795,12 +1109,14 @@ static void ubcore_add_ipv6_entry(struct list_head *list, const struct in6_addr return; (void)memcpy(&na_entry->addr.net_addr, ipv6, sizeof(struct in6_addr)); + (void)ubcore_fill_netaddr_macvlan(&na_entry->addr, netdev, UBCORE_NET_ADDR_TYPE_IPV6); + na_entry->prefix_len = prefix_len; list_add_tail(&na_entry->node, list); } static void ubcore_netdev_get_ipv4(struct net_device *netdev, struct list_head *list) { - const struct in_ifaddr *ifa; + struct in_ifaddr *ifa; struct in_device *in_dev; rcu_read_lock(); @@ -811,16 +1127,14 @@ static void ubcore_netdev_get_ipv4(struct net_device *netdev, struct list_head * } in_dev_for_each_ifa_rcu(ifa, in_dev) { - if (ifa->ifa_flags & IFA_F_SECONDARY) - continue; - ubcore_add_ipv4_entry(list, ifa->ifa_address); + ubcore_add_ipv4_entry(list, ifa->ifa_address, ifa->ifa_prefixlen, netdev); } rcu_read_unlock(); } static void ubcore_netdev_get_ipv6(struct net_device *netdev, struct list_head *list) { - const struct inet6_ifaddr *ifa; + struct inet6_ifaddr *ifa; struct inet6_dev *in_dev; in_dev = in6_dev_get(netdev); @@ -829,35 +1143,46 @@ static void ubcore_netdev_get_ipv6(struct net_device *netdev, struct list_head * read_lock_bh(&in_dev->lock); list_for_each_entry(ifa, &in_dev->addr_list, if_list) { - if (ifa->flags & IFA_F_SECONDARY) - continue; - ubcore_add_ipv6_entry(list, (const struct in6_addr *)&ifa->addr); + ubcore_add_ipv6_entry(list, (struct in6_addr *)&ifa->addr, ifa->prefix_len, netdev); } read_unlock_bh(&in_dev->lock); in6_dev_put(in_dev); } -void ubcore_set_default_eid(struct ubcore_device *dev) +void ubcore_update_default_eid(struct ubcore_device *dev, bool is_add) { struct net_device *netdev = dev->netdev; struct ubcore_net_addr_node *na_entry; + struct ubcore_device *tpf_dev = NULL; struct ubcore_net_addr_node *next; LIST_HEAD(na_list); /* Do not modify eid if the driver already set default eid other than 0 */ - if (netdev == NULL || - !(dev->attr.eid.in6.interface_id == 0 && dev->attr.eid.in6.subnet_prefix == 0)) + if (netdev == NULL || !(dev->eid_table.eid_entries[0].eid.in6.interface_id == 0 && + dev->eid_table.eid_entries[0].eid.in6.subnet_prefix == 0)) return; ubcore_netdev_get_ipv4(netdev, &na_list); + ubcore_netdev_get_ipv6(netdev, &na_list); list_for_each_entry_safe(na_entry, next, &na_list, node) { - ubcore_update_eid(dev, &na_entry->addr); + if (na_entry->addr.net_addr.in6.subnet_prefix == cpu_to_be64(UBCORE_LOCAL_SHUNET)) + continue; + tpf_dev = ubcore_find_tpf_device(&na_entry->addr, UBCORE_TRANSPORT_UB); + if (tpf_dev) + is_add == true ? + ubcore_add_net_addr(tpf_dev, dev, &na_entry->addr, + netdev, na_entry->prefix_len) : + ubcore_delete_net_addr(tpf_dev, dev, &na_entry->addr, + netdev, na_entry->prefix_len); + if (tpf_dev) + ubcore_put_device(tpf_dev); + ubcore_update_eid(dev, &na_entry->addr, is_add); list_del(&na_entry->node); kfree(na_entry); } } -static void ubcore_update_netaddr(struct ubcore_device *dev, struct net_device *netdev, bool add) +void ubcore_update_netaddr(struct ubcore_device *dev, struct net_device *netdev, bool add) { struct ubcore_net_addr_node *na_entry; struct ubcore_net_addr_node *next; @@ -868,11 +1193,15 @@ static void ubcore_update_netaddr(struct ubcore_device *dev, struct net_device * ubcore_netdev_get_ipv6(netdev, &na_list); list_for_each_entry_safe(na_entry, next, &na_list, node) { - if (add) - ubcore_set_net_addr(dev, &na_entry->addr); - else - ubcore_unset_net_addr(dev, &na_entry->addr); - + if (add) { + if (dev->ops->add_net_addr != NULL && + dev->ops->add_net_addr(dev, &na_entry->addr, 0) != 0) + ubcore_log_err("Failed to add net addr"); + } else { + if (dev->ops->delete_net_addr != NULL && + dev->ops->delete_net_addr(dev, 0) != 0) + ubcore_log_err("Failed to delete net addr"); + } list_del(&na_entry->node); kfree(na_entry); } @@ -896,10 +1225,45 @@ static int ubcore_remove_netaddr(struct ubcore_device *dev, struct net_device *n return NOTIFY_OK; } +static void ubcore_chang_mtu(struct ubcore_device *dev, struct net_device *netdev) +{ + struct ubcore_device *tpf_dev; + struct ubcore_sip_info *old_sip; + struct ubcore_sip_info new_sip; + uint32_t max_cnt; + uint32_t i; + + if (ubcore_get_netlink_valid() == false) + return; + + tpf_dev = ubcore_find_tpf_device(NULL, UBCORE_TRANSPORT_UB); + max_cnt = ubcore_get_sip_max_cnt(); + + for (i = 0; i < max_cnt; i++) { + if (tpf_dev) { + old_sip = ubcore_lookup_sip_info(i); + if (old_sip == NULL) + continue; + new_sip = *old_sip; + new_sip.mtu = netdev->mtu; + if (memcmp(old_sip->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME) == 0) { + (void)ubcore_notify_uvs_del_sip(tpf_dev, old_sip, i); + (void)ubcore_notify_uvs_add_sip(tpf_dev, &new_sip, i); + ubcore_log_info("dev_name: %s, mtu: %u change mtu: %u\n", + dev->dev_name, old_sip->mtu, new_sip.mtu); + old_sip->mtu = netdev->mtu; + } + } + } + if (tpf_dev) + ubcore_put_device(tpf_dev); +} + static int ubcore_net_notifier_call(struct notifier_block *nb, unsigned long event, void *arg) { struct net_device *netdev = netdev_notifier_info_to_dev(arg); struct ubcore_device **devices; + struct net_device *real_netdev; struct ubcore_device *dev; uint32_t num_devices = 0; uint32_t i; @@ -907,7 +1271,12 @@ static int ubcore_net_notifier_call(struct notifier_block *nb, unsigned long eve if (netdev == NULL) return NOTIFY_DONE; - devices = ubcore_get_devices_from_netdev(netdev, &num_devices); + if (is_vlan_dev(netdev)) + real_netdev = vlan_dev_real_dev(netdev); + else + real_netdev = netdev; + + devices = ubcore_get_devices_from_netdev(real_netdev, &num_devices); if (devices == NULL) return NOTIFY_DONE; @@ -919,11 +1288,23 @@ static int ubcore_net_notifier_call(struct notifier_block *nb, unsigned long eve switch (event) { case NETDEV_REGISTER: case NETDEV_UP: - ubcore_add_netaddr(dev, netdev); + if (dev->transport_type != UBCORE_TRANSPORT_UB) + ubcore_add_netaddr(dev, netdev); break; case NETDEV_UNREGISTER: case NETDEV_DOWN: - ubcore_remove_netaddr(dev, netdev); + if (dev->transport_type != UBCORE_TRANSPORT_UB) + ubcore_remove_netaddr(dev, netdev); + break; + case NETDEV_CHANGEADDR: + if (dev->transport_type != UBCORE_TRANSPORT_UB) { + ubcore_remove_netaddr(dev, netdev); + ubcore_add_netaddr(dev, netdev); + } + break; + case NETDEV_CHANGEMTU: + if (dev->transport_type == UBCORE_TRANSPORT_UB) + ubcore_chang_mtu(dev, netdev); break; default: break; @@ -941,7 +1322,9 @@ static struct notifier_block ubcore_ipv4_notifier = { .notifier_call = ubcore_ipv4_notifier_call, }; -static struct notifier_block ubcore_net_notifier = { .notifier_call = ubcore_net_notifier_call }; +static struct notifier_block ubcore_net_notifier = { + .notifier_call = ubcore_net_notifier_call, +}; static int ubcore_register_notifiers(void) { @@ -983,13 +1366,10 @@ static int __init ubcore_init(void) if (ret != 0) return ret; - /* uasid is assigned from 1, and 0 means random value. - * so 0 consumed here first. - */ - bitmap_zero(g_uasid_bitmap, UBCORE_MAX_UASID); - set_bit(0, g_uasid_bitmap); + (void)ubcore_sip_table_init(); if (ubcore_netlink_init() != 0) { + ubcore_sip_table_uninit(); ubcore_unregister_sysfs(); return -1; } @@ -997,10 +1377,12 @@ static int __init ubcore_init(void) ret = ubcore_register_notifiers(); if (ret != 0) { pr_err("Failed to register notifiers\n"); - ubcore_unregister_sysfs(); ubcore_netlink_exit(); + ubcore_sip_table_uninit(); + ubcore_unregister_sysfs(); return -1; } + ubcore_log_info("ubcore module init success.\n"); return 0; } @@ -1008,6 +1390,7 @@ static int __init ubcore_init(void) static void __exit ubcore_exit(void) { ubcore_unregister_notifiers(); + ubcore_sip_table_uninit(); ubcore_netlink_exit(); ubcore_unregister_sysfs(); ubcore_log_info("ubcore module exits.\n"); diff --git a/drivers/ub/urma/ubcore/ubcore_msg.c b/drivers/ub/urma/ubcore/ubcore_msg.c new file mode 100644 index 000000000000..0ced81857628 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_msg.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore message table implementation + * Author: Yang Yijian + * Create: 2023-07-05 + * Note: + * History: 2023-07-05: Create file + */ + +#include +#include +#include "ubcore_log.h" +#include +#include "ubcore_netlink.h" +#include "ubcore_vtp.h" +#include +#include "ubcore_priv.h" +#include "ubcore_msg.h" + +#define UBCORE_MSG_TIMEOUT 10000 /* 10s */ + +static LIST_HEAD(g_msg_session_list); +static DEFINE_SPINLOCK(g_msg_session_lock); +static atomic_t g_msg_seq = ATOMIC_INIT(0); + +static uint32_t ubcore_get_msg_seq(void) +{ + return (uint32_t)atomic_inc_return(&g_msg_seq); +} + +static void ubcore_free_msg_session(struct kref *kref) +{ + struct ubcore_msg_session *s = container_of(kref, struct ubcore_msg_session, kref); + unsigned long flags; + + spin_lock_irqsave(&g_msg_session_lock, flags); + list_del(&s->node); + spin_unlock_irqrestore(&g_msg_session_lock, flags); + kfree(s); +} + +static struct ubcore_msg_session *ubcore_find_msg_session(uint32_t seq) +{ + struct ubcore_msg_session *tmp, *target = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_msg_session_lock, flags); + list_for_each_entry(tmp, &g_msg_session_list, node) { + if (tmp->req->hdr.msg_id == seq) { + target = tmp; + kref_get(&target->kref); + break; + } + } + spin_unlock_irqrestore(&g_msg_session_lock, flags); + return target; +} + +static void ubcore_destroy_msg_session(struct ubcore_msg_session *s) +{ + (void)kref_put(&s->kref, ubcore_free_msg_session); +} + +static struct ubcore_msg_session *ubcore_create_msg_session(struct ubcore_msg *msg) +{ + struct ubcore_msg_session *s; + unsigned long flags; + + s = kzalloc(sizeof(struct ubcore_msg_session), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->req = msg; + spin_lock_irqsave(&g_msg_session_lock, flags); + list_add_tail(&s->node, &g_msg_session_list); + spin_unlock_irqrestore(&g_msg_session_lock, flags); + kref_init(&s->kref); + init_completion(&s->comp); + return s; +} + +static struct ubcore_nlmsg *ubcore_get_fe2uvs_nlmsg(struct ubcore_device *dev, + struct ubcore_msg *msg) +{ + uint32_t payload_len = (uint32_t)sizeof(*msg) + msg->hdr.len; + struct ubcore_nlmsg *nlmsg; + + nlmsg = ubcore_alloc_nlmsg(payload_len, NULL, NULL); + if (nlmsg == NULL) + return NULL; + + nlmsg->transport_type = dev->transport_type; + nlmsg->msg_type = UBCORE_NL_FE2TPF_REQ; + (void)memcpy(nlmsg->payload, msg, payload_len); + return nlmsg; +} + +/* called when recv nl response from uvs */ +static int ubcore_forward_uvs2fe_msg(struct ubcore_nlmsg *msg, void *user_arg) +{ + struct ubcore_device *dev = (struct ubcore_device *)user_arg; + int ret; + + ret = ubcore_send_msg(dev, (struct ubcore_msg *)msg->payload); + return ret; +} + +static int ubcore_forward_fe2uvs_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + struct ubcore_nl_resp_cb cb; + struct ubcore_nlmsg *nlmsg; + int ret; + + nlmsg = ubcore_get_fe2uvs_nlmsg(dev, msg); + if (nlmsg == NULL) + return -ENOMEM; + + cb.callback = ubcore_forward_uvs2fe_msg; + cb.user_arg = dev; + ret = ubcore_nl_send_nowait(nlmsg, &cb); + if (ret) { + kfree(nlmsg); + return -EIO; + } + + return 0; +} + +/* msg is a copy of received msg from driver */ +static int ubcore_fe2tpf_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + int ret; + + ret = ubcore_forward_fe2uvs_msg(dev, msg); + kfree(msg); + return ret; +} + +/* msg is a copy of received msg from driver */ +static int ubcore_tpf2fe_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + struct ubcore_msg_session *s; + + s = ubcore_find_msg_session(msg->hdr.msg_id); + if (s == NULL) { + ubcore_log_err("Failed to find msg session with seq %u", msg->hdr.msg_id); + kfree(msg); + return -ENXIO; + } + s->resp = msg; + kref_put(&s->kref, ubcore_free_msg_session); + + if (s->cb.callback == NULL) { + complete(&s->comp); + } else { + s->cb.callback(dev, msg, s->cb.user_arg); + kfree(msg); + kfree(s->req); + ubcore_destroy_msg_session(s); + } + return 0; +} + +static int ubcore_mpf2tpf_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + return 0; +} + +static int ubcore_tpf2mpf_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + return 0; +} + +static ubcore_req_handler g_ubcore_msg_type_handlers[] = { + [UBCORE_MSG_TYPE_FE2TPF] = ubcore_fe2tpf_msg, + [UBCORE_MSG_TYPE_MPF2TPF] = ubcore_mpf2tpf_msg, + [UBCORE_MSG_TYPE_TPF2FE] = ubcore_tpf2fe_msg, + [UBCORE_MSG_TYPE_TPF2MPF] = ubcore_tpf2mpf_msg, +}; + +static void ubcore_fill_tpf_dev_name(struct ubcore_msg *msg) +{ + struct ubcore_create_vtp_req *create; + struct ubcore_create_vtp_req *destroy; + struct ubcore_msg_discover_eid_req *eid_req; + struct ubcore_msg_config_device_req *config_dev; + struct ubcore_device *tpf_dev; + + tpf_dev = ubcore_find_tpf_device(NULL, UBCORE_TRANSPORT_UB); + if (tpf_dev == NULL) + return; + + if (msg->hdr.type != UBCORE_MSG_TYPE_FE2TPF) { + ubcore_put_device(tpf_dev); + return; + } + + switch (msg->hdr.opcode) { + case UBCORE_MSG_CREATE_VTP: + create = (struct ubcore_create_vtp_req *)msg->data; + (void)memcpy(create->tpfdev_name, tpf_dev->dev_name, + UBCORE_MAX_DEV_NAME); + break; + case UBCORE_MSG_DESTROY_VTP: + destroy = (struct ubcore_create_vtp_req *)msg->data; + (void)memcpy(destroy->tpfdev_name, tpf_dev->dev_name, + UBCORE_MAX_DEV_NAME); + break; + case UBCORE_MSG_ALLOC_EID: + case UBCORE_MSG_DEALLOC_EID: + eid_req = (struct ubcore_msg_discover_eid_req *)msg->data; + (void)memcpy(eid_req->tpfdev_name, tpf_dev->dev_name, + UBCORE_MAX_DEV_NAME); + break; + case UBCORE_MSG_CONFIG_DEVICE: + config_dev = (struct ubcore_msg_config_device_req *)msg->data; + (void)memcpy(config_dev->tpfdev_name, tpf_dev->dev_name, + UBCORE_MAX_DEV_NAME); + break; + case UBCORE_MSG_STOP_PROC_VTP_MSG: + case UBCORE_MSG_QUERY_VTP_MIG_STATUS: + case UBCORE_MSG_FLOW_STOPPED: + case UBCORE_MSG_MIG_ROLLBACK: + case UBCORE_MSG_MIG_VM_START: + ubcore_log_err("Wrong type when try to full tpf dev name\n"); + break; + default: + ubcore_log_err("Unrecognized type of opcode %d\n", (int)msg->hdr.opcode); + } + ubcore_put_device(tpf_dev); +} + +static struct ubcore_msg *ubcore_copy_msg(struct ubcore_msg *msg) +{ + uint32_t len = (uint32_t)sizeof(struct ubcore_msg) + msg->hdr.len; + struct ubcore_msg *resp; + + resp = kzalloc(len, GFP_KERNEL); + if (resp == NULL) + return NULL; + + (void)memcpy(resp, msg, len); + return resp; +} + +static struct ubcore_msg *ubcore_migrate_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + uint32_t len; + struct ubcore_nl_function_mig_req *mig_resp; + struct ubcore_function_mig_req *mig_msg; + struct ubcore_msg *resp; + + len = (uint32_t)sizeof(struct ubcore_msg) + + (uint32_t)sizeof(struct ubcore_nl_function_mig_req); + mig_msg = (struct ubcore_function_mig_req *)msg->data; + resp = kzalloc(len, GFP_KERNEL); + if (resp == NULL) { + ubcore_log_err("Failed to kzalloc msg resp!\n"); + return NULL; + } + + resp->hdr.type = msg->hdr.type; + resp->hdr.ep = msg->hdr.ep; + resp->hdr.opcode = msg->hdr.opcode; + resp->hdr.msg_id = msg->hdr.msg_id; + resp->hdr.len = sizeof(struct ubcore_nl_function_mig_req); + + mig_resp = (struct ubcore_nl_function_mig_req *)resp->data; + mig_resp->mig_fe_idx = mig_msg->mig_fe_idx; + (void)strcpy(mig_resp->dev_name, dev->dev_name); + + return resp; +} + +int ubcore_recv_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + struct ubcore_msg *handle_msg; + int ret; + + if (dev == NULL || msg == NULL) { + ubcore_log_err("Invalid parameter in recv msg\n!"); + return -EINVAL; + } + + if (msg->hdr.opcode >= UBCORE_MSG_STOP_PROC_VTP_MSG) { + handle_msg = ubcore_migrate_msg(dev, msg); + if (handle_msg == NULL) { + ubcore_log_err("null msg when handle migrate\n!"); + return -EINVAL; + } + } else { + handle_msg = ubcore_copy_msg(msg); + if (handle_msg == NULL) { + ubcore_log_err("Failed to create handle msg req!\n"); + return -ENOMEM; + } + /* fill tpf_dev name */ + ubcore_fill_tpf_dev_name(handle_msg); + } + + ret = g_ubcore_msg_type_handlers[handle_msg->hdr.type](dev, handle_msg); + + /* do not free copy here */ + return ret; +} +EXPORT_SYMBOL(ubcore_recv_msg); + +int ubcore_send_msg(struct ubcore_device *dev, struct ubcore_msg *msg) +{ + int ret; + + if (dev == NULL || dev->ops->send_msg == NULL) { + ubcore_log_err("Invalid parameter!\n"); + return -EINVAL; + } + + ret = dev->ops->send_msg(dev, msg); + if (ret != 0) { + ubcore_log_err("Failed to send message! msg_id = %u!\n", msg->hdr.msg_id); + return -EIO; + } + return 0; +} + +int ubcore_send_fe2tpf_msg(struct ubcore_device *dev, struct ubcore_msg *req, + bool wait, struct ubcore_resp_cb *cb) +{ + unsigned long leavetime; + struct ubcore_msg_session *s; + int ret; + + req->hdr.msg_id = ubcore_get_msg_seq(); + s = ubcore_create_msg_session(req); + if (s == NULL) { + ubcore_log_err("Failed to create req session!\n"); + return -ENOMEM; + } + + ret = ubcore_send_msg(dev, req); + if (ret != 0) { + ubcore_log_err("Failed to send req, msg_id = %u, opcode = %hu.\n", + req->hdr.msg_id, (uint16_t)req->hdr.opcode); + ubcore_destroy_msg_session(s); + return -EIO; + } + + if (!wait) { + s->cb = *cb; + return 0; + } + + leavetime = wait_for_completion_timeout(&s->comp, msecs_to_jiffies(UBCORE_MSG_TIMEOUT)); + if (leavetime == 0) { + ubcore_log_err("Failed to wait req reply, msg_id = %u, opcode = %hu, leavetime = %lu.\n", + req->hdr.msg_id, (uint16_t)req->hdr.opcode, leavetime); + ubcore_destroy_msg_session(s); + return -EIO; + } + + ubcore_log_info("Success to wait req reply, msg_id = %u, opcode = %hu, leavetime = %lu.\n", + req->hdr.msg_id, (uint16_t)req->hdr.opcode, leavetime); + + ret = cb->callback(dev, s->resp, cb->user_arg); + kfree(s->resp); + kfree(s->req); + ubcore_destroy_msg_session(s); + return ret; +} + +static int ubcore_msg_discover_eid_cb(struct ubcore_device *dev, + struct ubcore_msg *msg, void *msg_ctx) +{ + struct ubcore_msg_discover_eid_resp *data; + struct ubcore_ueid_cfg cfg; + union ubcore_eid eid = {0}; + int ret; + + if (dev == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + data = (struct ubcore_msg_discover_eid_resp *)(void *)msg->data; + if (msg == NULL || msg->hdr.type != UBCORE_MSG_TYPE_TPF2FE || + data == NULL || data->ret != 0 || + (msg->hdr.opcode != UBCORE_MSG_ALLOC_EID && + msg->hdr.opcode != UBCORE_MSG_DEALLOC_EID)) { + ubcore_log_err("Failed to query data from the UVS. Use the default value.\n"); + return -EINVAL; + } + + if (msg->hdr.opcode == UBCORE_MSG_ALLOC_EID) { + ret = ubcore_update_eidtbl_by_idx(dev, &data->eid, data->eid_index, true); + } else { + eid = data->eid; + (void)memset(&data->eid, 0, sizeof(union ubcore_eid)); + ret = ubcore_update_eidtbl_by_idx(dev, &data->eid, data->eid_index, false); + } + if (ret != 0) + return ret; + + /* pf need update ueid */ + if (!dev->attr.virtualization) { + if (msg->hdr.opcode == UBCORE_MSG_ALLOC_EID) + cfg.eid = data->eid; + else + cfg.eid = eid; + + cfg.eid_index = data->eid_index; + cfg.upi = data->upi; + if (msg->hdr.opcode == UBCORE_MSG_ALLOC_EID) + ret = ubcore_add_ueid(dev, UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + else + ret = ubcore_delete_ueid(dev, UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + } + return ret; +} + +int ubcore_msg_discover_eid(struct ubcore_device *dev, uint32_t eid_index, + enum ubcore_msg_opcode op) +{ + struct ubcore_msg_discover_eid_req *data; + struct ubcore_msg *req_msg; + uint32_t data_len; + struct ubcore_resp_cb cb = { + .callback = ubcore_msg_discover_eid_cb, + .user_arg = NULL + }; + int ret; + + data_len = sizeof(struct ubcore_msg_discover_eid_req); + req_msg = kcalloc(1, sizeof(struct ubcore_msg) + data_len, GFP_KERNEL); + if (req_msg == NULL) { + ubcore_log_err("alloc req_msg failed.\n"); + return -ENOMEM; + } + req_msg->hdr.type = UBCORE_MSG_TYPE_FE2TPF; + req_msg->hdr.len = data_len; + req_msg->hdr.opcode = op; + data = (struct ubcore_msg_discover_eid_req *)req_msg->data; + data->eid_index = eid_index; + data->eid_type = dev->cfg.pattern; + data->virtualization = dev->attr.virtualization; + (void)memcpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + + ret = ubcore_send_fe2tpf_msg(dev, req_msg, true, &cb); + if (ret != 0) + ubcore_log_err("send fe2tpf failed.\n"); + + return ret; +} diff --git a/drivers/ub/urma/ubcore/ubcore_msg.h b/drivers/ub/urma/ubcore/ubcore_msg.h new file mode 100644 index 000000000000..16fe4dbaac67 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_msg.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore msg table header + * Author: Yang Yijian + * Create: 2023-07-05 + * Note: + * History: 2023-07-05: Create file + */ + +#ifndef UBCORE_MSG_H +#define UBCORE_MSG_H + +#include + +enum ubcore_msg_resp_status { + UBCORE_MSG_RESP_RC_JETTY_ALREADY_BIND = -3, + UBCORE_MSG_RESP_IN_PROGRESS = -2, + UBCORE_MSG_RESP_FAIL = -1, + UBCORE_MSG_RESP_SUCCESS = 0 +}; + +typedef int (*ubcore_req_handler)(struct ubcore_device *dev, struct ubcore_msg *msg); +typedef int (*ubcore_resp_handler)(struct ubcore_device *dev, + struct ubcore_msg *msg, void *msg_ctx); + +struct ubcore_resp_cb { + void *user_arg; + ubcore_resp_handler callback; +}; + +struct ubcore_msg_session { + struct ubcore_msg *req; + struct ubcore_msg *resp; + struct list_head node; + struct kref kref; + struct completion comp; /* Synchronization event of timeout sleep and thread wakeup */ + struct ubcore_resp_cb cb; +}; + +struct ubcore_msg_config_device_req { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint32_t max_rc_cnt; + uint32_t max_rc_depth; + uint32_t min_slice; /* TA slice size byte */ + uint32_t max_slice; /* TA slice size byte */ + bool is_tpf_dev; + bool virtualization; + char tpfdev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_msg_config_device_resp { + enum ubcore_msg_resp_status ret; + uint32_t rc_cnt; + uint32_t rc_depth; + uint32_t slice; /* TA slice size byte */ + bool is_tpf_dev; + uint32_t suspend_period; + uint32_t suspend_cnt; +}; + +struct ubcore_msg_discover_eid_req { + uint32_t eid_index; + char dev_name[UBCORE_MAX_DEV_NAME]; + enum ubcore_pattern eid_type; + bool virtualization; + char tpfdev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_msg_discover_eid_resp { + uint32_t ret; + union ubcore_eid eid; + uint32_t eid_index; + uint32_t upi; +}; + +struct ubcore_function_mig_req { + uint16_t mig_fe_idx; +}; + +int ubcore_send_msg(struct ubcore_device *dev, struct ubcore_msg *msg); +int ubcore_send_fe2tpf_msg(struct ubcore_device *dev, struct ubcore_msg *req, + bool wait, struct ubcore_resp_cb *cb); +int ubcore_msg_discover_eid(struct ubcore_device *dev, uint32_t eid_index, + enum ubcore_msg_opcode op); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_netdev.c b/drivers/ub/urma/ubcore/ubcore_netdev.c new file mode 100644 index 000000000000..1d6486e444c4 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netdev.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netdev module + * Author: Chen Wen + * Create: 2023-08-27 + * Note: + * History: 2023-08-27: create file + */ +#include +#include + +#include +#include "ubcore_log.h" +#include +#include "ubcore_netlink.h" + +#define UBCORE_MAX_SIP (1 << 24) /* 2^24 */ +#define UBCORE_SIP_TABLE_SIZE (10240) + +static DECLARE_BITMAP(g_sip_bitmap, UBCORE_MAX_SIP); +static DEFINE_SPINLOCK(g_sip_spinlock); +static DECLARE_RWSEM(g_port_list_lock); + +struct ubcore_ndev_port { + struct net_device *ndev; + unsigned int port_id; + struct list_head node; + char dev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_sip_table { + struct ubcore_sip_info *entry[UBCORE_SIP_TABLE_SIZE]; + struct mutex lock; +}; + +static struct ubcore_sip_table g_sip_table; + +int ubcore_check_port_state(struct ubcore_device *dev, uint8_t port_idx) +{ + struct ubcore_device_status status; + + if (dev == NULL || port_idx >= UBCORE_MAX_PORT_CNT) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ubcore_query_device_status(dev, &status) != 0) { + ubcore_log_err("query device status for state failed with dev name %s\n", + dev->dev_name); + return -EPERM; + } + + if (status.port_status[port_idx].state != UBCORE_PORT_ACTIVE) { + ubcore_log_err("port state is not active with dev name: %s and port_idx: %hhu\n", + dev->dev_name, port_idx); + return -EPERM; + } + ubcore_log_info("Success to query dev %s port state and it's active.\n", dev->dev_name); + return 0; +} + +int ubcore_find_port_netdev(struct ubcore_device *dev, struct net_device *ndev) +{ + struct ubcore_ndev_port *port_info; + + down_write(&g_port_list_lock); + list_for_each_entry(port_info, &dev->port_list, node) { + if (port_info->ndev == ndev) { + up_write(&g_port_list_lock); + return (int)port_info->port_id; + } + } + up_write(&g_port_list_lock); + ubcore_log_warn("ndev:%s no available port found.\n", netdev_name(ndev)); + /* Currently assigned port0 by default */ + return 0; +} + +int ubcore_find_port_with_dev_name(struct ubcore_device *dev, char *dev_name) +{ + struct ubcore_ndev_port *port_info; + + if (dev == NULL || dev_name == NULL) { + ubcore_log_err("Invalid input parameter\n"); + return 0; + } + + down_write(&g_port_list_lock); + list_for_each_entry(port_info, &dev->port_list, node) { + if (strcmp(port_info->dev_name, dev_name) == 0) { + up_write(&g_port_list_lock); + ubcore_log_info("ndev:%s dev name:%s with port id %u found.\n", + netdev_name(port_info->ndev), dev_name, port_info->port_id); + return (int)port_info->port_id; + } + } + up_write(&g_port_list_lock); + ubcore_log_warn("dev name:%s no available port found.\n", dev_name); + return 0; +} + +int ubcore_set_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + unsigned int port_id) +{ + struct ubcore_ndev_port *port_info, *new_node; + + if (dev == NULL || ndev == NULL) { + ubcore_log_err("invalid input parameter.\n"); + return -1; + } + down_write(&g_port_list_lock); + list_for_each_entry(port_info, &dev->port_list, node) { + if (port_info->ndev == ndev) { + up_write(&g_port_list_lock); + ubcore_log_warn("ndev:%s is already bound port: %d\n", + netdev_name(ndev), port_info->port_id); + return 0; + } + } + up_write(&g_port_list_lock); + + new_node = kzalloc(sizeof(struct ubcore_ndev_port), GFP_ATOMIC); + if (new_node == NULL) + return -ENOMEM; + + new_node->ndev = ndev; + new_node->port_id = port_id; + (void)strcpy(new_node->dev_name, dev->dev_name); + down_write(&g_port_list_lock); + list_add_tail(&new_node->node, &dev->port_list); + up_write(&g_port_list_lock); + ubcore_log_info("ndev:%s dev_name: %s bound port: %d\n", netdev_name(ndev), + dev->dev_name, new_node->port_id); + return 0; +} +EXPORT_SYMBOL(ubcore_set_port_netdev); + +void ubcore_put_port_netdev(struct ubcore_device *dev) +{ + struct ubcore_ndev_port *port_info, *next; + + down_write(&g_port_list_lock); + list_for_each_entry_safe(port_info, next, &dev->port_list, node) { + if (port_info != NULL) { + list_del(&port_info->node); + kfree(port_info); + } + } + up_write(&g_port_list_lock); +} +EXPORT_SYMBOL(ubcore_put_port_netdev); + +static void ubcore_sip_bitmap_init(void) +{ + bitmap_zero(g_sip_bitmap, UBCORE_MAX_SIP); +} + +uint32_t ubcore_sip_idx_alloc(uint32_t idx) +{ + uint32_t ret_idx = idx; + int ret; + + spin_lock(&g_sip_spinlock); + if (ret_idx > 0) { + ret = test_bit(ret_idx, g_sip_bitmap); + if (ret == 0) { + set_bit(ret_idx, g_sip_bitmap); + spin_unlock(&g_sip_spinlock); + return ret_idx; + } + spin_unlock(&g_sip_spinlock); + ubcore_log_err("ret_idx allocation failed.\n"); + return 0; + } + ret_idx = (uint32_t)find_first_zero_bit(g_sip_bitmap, UBCORE_MAX_SIP); + if (ret_idx >= UBCORE_MAX_SIP) { + ubcore_log_err("ret_idx allocation failed.\n"); + spin_unlock(&g_sip_spinlock); + return 0; + } + set_bit(ret_idx, g_sip_bitmap); + spin_unlock(&g_sip_spinlock); + return ret_idx; +} + +int ubcore_sip_idx_free(uint32_t idx) +{ + spin_lock(&g_sip_spinlock); + if (test_bit(idx, g_sip_bitmap) == false) { + spin_unlock(&g_sip_spinlock); + ubcore_log_err("idx is used.\n"); + return -EINVAL; + } + clear_bit(idx, g_sip_bitmap); + spin_unlock(&g_sip_spinlock); + return 0; +} + +void ubcore_sip_table_init(void) +{ + ubcore_sip_bitmap_init(); + mutex_init(&g_sip_table.lock); +} + +void ubcore_sip_table_uninit(void) +{ + uint32_t i; + + mutex_lock(&g_sip_table.lock); + for (i = 0; i < UBCORE_SIP_TABLE_SIZE; i++) { + if (g_sip_table.entry[i] != NULL) { + kfree(g_sip_table.entry[i]); + g_sip_table.entry[i] = NULL; + } + } + mutex_unlock(&g_sip_table.lock); +} + +int ubcore_add_sip_entry(const struct ubcore_sip_info *sip, uint32_t idx) +{ + struct ubcore_sip_info *new_sip; + + if (idx >= UBCORE_SIP_TABLE_SIZE || g_sip_table.entry[idx] != NULL) { + ubcore_log_err("add sip failed.\n"); + return -1; + } + new_sip = kzalloc(sizeof(struct ubcore_sip_info), GFP_ATOMIC); + if (new_sip == NULL) + return -ENOMEM; + + (void)memcpy(new_sip, sip, sizeof(struct ubcore_sip_info)); + mutex_lock(&g_sip_table.lock); + g_sip_table.entry[idx] = new_sip; + mutex_unlock(&g_sip_table.lock); + ubcore_log_info("sip table add netdev: %s, entry idx: %d.\n", new_sip->dev_name, idx); + return 0; +} + +int ubcore_del_sip_entry(uint32_t idx) +{ + if (idx >= UBCORE_SIP_TABLE_SIZE || g_sip_table.entry[idx] == NULL) { + ubcore_log_err("delete sip failed.\n"); + return -1; + } + mutex_lock(&g_sip_table.lock); + kfree(g_sip_table.entry[idx]); + g_sip_table.entry[idx] = NULL; + mutex_unlock(&g_sip_table.lock); + ubcore_log_info("del sip entry idx: %d.\n", idx); + return 0; +} + +int ubcore_lookup_sip_idx(struct ubcore_sip_info *sip, uint32_t *idx) +{ + uint32_t i; + + mutex_lock(&g_sip_table.lock); + for (i = 0; i < UBCORE_SIP_TABLE_SIZE; i++) { + if (g_sip_table.entry[i] != NULL && memcmp(g_sip_table.entry[i], sip, + sizeof(struct ubcore_sip_info)) == 0) { + *idx = i; + break; + } + } + mutex_unlock(&g_sip_table.lock); + + if (i == UBCORE_SIP_TABLE_SIZE) { + ubcore_log_warn("no available idx found.\n"); + return -1; + } + return 0; +} + +uint32_t ubcore_get_sip_max_cnt(void) +{ + return UBCORE_SIP_TABLE_SIZE; +} + +struct ubcore_sip_info *ubcore_lookup_sip_info(uint32_t idx) +{ + struct ubcore_sip_info *sip = NULL; + + mutex_lock(&g_sip_table.lock); + if (idx < UBCORE_SIP_TABLE_SIZE && g_sip_table.entry[idx] != NULL) + sip = g_sip_table.entry[idx]; + mutex_unlock(&g_sip_table.lock); + return sip; +} + +int ubcore_notify_uvs_del_sip(struct ubcore_device *dev, + const struct ubcore_sip_info *sip_info, uint32_t index) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_del_sip_req *sip_req; + struct ubcore_del_sip_resp *resp; + + req_msg = kzalloc(sizeof(struct ubcore_nlmsg) + + sizeof(struct ubcore_del_sip_req), GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + /* fill msg head */ + req_msg->msg_type = UBCORE_NL_DEL_SIP_REQ; + req_msg->transport_type = dev->transport_type; + (void)memcpy(req_msg->dst_eid.raw, sip_info->addr.net_addr.raw, UBCORE_EID_SIZE); + (void)memcpy(req_msg->src_eid.raw, sip_info->addr.net_addr.raw, UBCORE_EID_SIZE); + req_msg->payload_len = (uint32_t)sizeof(struct ubcore_del_sip_req); + + /* fill msg payload */ + sip_req = (struct ubcore_del_sip_req *)(void *)req_msg->payload; + sip_req->index = index; + resp_msg = ubcore_nl_send_wait(req_msg); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait query response"); + kfree(req_msg); + return -1; + } + resp = (struct ubcore_del_sip_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_NL_DEL_SIP_RESP || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err("del sip request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + kfree(resp_msg); + kfree(req_msg); + return -1; + } + kfree(resp_msg); + kfree(req_msg); + return 0; +} + +int ubcore_notify_uvs_add_sip(struct ubcore_device *dev, + const struct ubcore_sip_info *sip_info, uint32_t index) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_add_sip_req *sip_req; + struct ubcore_add_sip_resp *resp; + + req_msg = kzalloc(sizeof(struct ubcore_nlmsg) + + sizeof(struct ubcore_add_sip_req), GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + /* fill msg head */ + req_msg->msg_type = UBCORE_NL_ADD_SIP_REQ; + req_msg->transport_type = dev->transport_type; + (void)memcpy(req_msg->dst_eid.raw, sip_info->addr.net_addr.raw, UBCORE_EID_SIZE); + (void)memcpy(req_msg->src_eid.raw, sip_info->addr.net_addr.raw, UBCORE_EID_SIZE); + req_msg->payload_len = (uint32_t)sizeof(struct ubcore_add_sip_req); + + /* fill msg payload */ + sip_req = (struct ubcore_add_sip_req *)(void *)req_msg->payload; + memcpy(sip_req->dev_name, sip_info->dev_name, UBCORE_MAX_DEV_NAME); + memcpy(&sip_req->netaddr, &sip_info->addr, sizeof(struct ubcore_net_addr)); + sip_req->index = index; + sip_req->port_cnt = sip_info->port_cnt; + (void)memcpy(sip_req->port_id, sip_info->port_id, UBCORE_MAX_PORT_CNT); + sip_req->prefix_len = sip_info->prefix_len; + sip_req->mtu = sip_info->mtu; + + resp_msg = ubcore_nl_send_wait(req_msg); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait query response"); + kfree(req_msg); + return -1; + } + + resp = (struct ubcore_add_sip_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_NL_ADD_SIP_RESP || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err("add sip request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + kfree(resp_msg); + kfree(req_msg); + return -1; + } + kfree(resp_msg); + kfree(req_msg); + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcore_netdev.h b/drivers/ub/urma/ubcore/ubcore_netdev.h new file mode 100644 index 000000000000..6f2cd7cada06 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netdev.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netdev head file + * Author: Chen Wen + * Create: 2023-07-14 + * Note: + * History: 2023-07-14: Create file + */ + +#ifndef UBCORE_NETDEV_H +#define UBCORE_NETDEV_H + +#include + +int ubcore_check_port_state(struct ubcore_device *dev, uint8_t port_idx); +int ubcore_find_port_netdev(struct ubcore_device *dev, struct net_device *ndev); +int ubcore_find_port_with_dev_name(struct ubcore_device *dev, char *dev_name); + +int ubcore_sip_table_init(void); +void ubcore_sip_table_uninit(void); +uint32_t ubcore_sip_idx_alloc(uint32_t idx); +int ubcore_sip_idx_free(uint32_t idx); + +int ubcore_add_sip_entry(struct ubcore_sip_info *sip, uint32_t idx); +int ubcore_del_sip_entry(uint32_t idx); +int ubcore_lookup_sip_idx(struct ubcore_sip_info *sip, uint32_t *idx); + +int ubcore_notify_uvs_add_sip(struct ubcore_device *dev, + const struct ubcore_sip_info *sip, uint32_t index); +int ubcore_notify_uvs_del_sip(struct ubcore_device *dev, + const struct ubcore_sip_info *sip, uint32_t index); + +uint32_t ubcore_get_sip_max_cnt(void); +struct ubcore_sip_info *ubcore_lookup_sip_info(uint32_t idx); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.c b/drivers/ub/urma/ubcore/ubcore_netlink.c index 985424f8bba6..489322d1d182 100644 --- a/drivers/ub/urma/ubcore/ubcore_netlink.c +++ b/drivers/ub/urma/ubcore/ubcore_netlink.c @@ -24,25 +24,46 @@ #include #include "ubcore_log.h" #include "ubcore_tp.h" +#include "ubcore_vtp.h" +#include "ubcore_priv.h" #include "ubcore_netlink.h" #define UBCORE_NL_TYPE 24 /* same with agent netlink type */ #define UBCORE_NL_TIMEOUT 10000 /* 10s */ #define UBCORE_NL_INVALID_PORT 0 -struct sock *nl_sock; +static struct sock *nl_sock; static LIST_HEAD(g_nl_session_list); static DEFINE_SPINLOCK(g_nl_session_lock); atomic_t g_nlmsg_seq; static uint32_t g_agent_port = UBCORE_NL_INVALID_PORT; /* get agent pid */ -static int ubcore_nl_send(struct ubcore_nlmsg *pbuf, uint16_t len); +static int ubcore_nl_unicast(struct ubcore_nlmsg *pbuf, uint32_t len); static uint32_t ubcore_get_nlmsg_seq(void) { return atomic_inc_return(&g_nlmsg_seq); } +struct ubcore_nlmsg *ubcore_alloc_nlmsg(size_t payload_len, + const union ubcore_eid *src_eid, const union ubcore_eid *dst_eid) +{ + struct ubcore_nlmsg *msg; + + msg = kzalloc(sizeof(struct ubcore_nlmsg) + payload_len, GFP_KERNEL); + if (msg == NULL) + return NULL; + + if (src_eid != NULL) + msg->src_eid = *src_eid; + + if (dst_eid != NULL) + msg->dst_eid = *dst_eid; + + msg->payload_len = payload_len; + return msg; +} + static struct ubcore_nl_session *ubcore_create_nl_session(struct ubcore_nlmsg *req) { struct ubcore_nl_session *s; @@ -149,13 +170,126 @@ static void ubcore_nl_handle_tp_req(struct nlmsghdr *nlh) kfree(req); return; } - if (ubcore_nl_send(resp, ubcore_nlmsg_len(resp)) != 0) + if (ubcore_nl_unicast(resp, ubcore_nlmsg_len(resp)) != 0) ubcore_log_err("Failed to send response"); kfree(req); kfree(resp); } +static void ubcore_nl_handle_tpf2fe_resp(struct nlmsghdr *nlh) +{ + struct ubcore_nl_session *s; + struct ubcore_nlmsg *resp; + + resp = ubcore_get_nlmsg_data(nlh); + if (resp == NULL) { + ubcore_log_err("Failed to calloc and copy response"); + return; + } + s = ubcore_find_nl_session(resp->nlmsg_seq); + if (s == NULL) { + ubcore_log_err("Failed to find nl session with seq %u", resp->nlmsg_seq); + kfree(resp); + return; + } + s->resp = resp; + kref_put(&s->kref, ubcore_free_nl_session); + + s->cb.callback(resp, s->cb.user_arg); + kfree(s->req); + kfree(s->resp); + ubcore_destroy_nl_session(s); +} + +static void ubcore_nl_handle_update_tpf_dev_info_resp(struct nlmsghdr *nlh) +{ + struct ubcore_nl_session *s; + struct ubcore_nlmsg *resp; + + resp = ubcore_get_nlmsg_data(nlh); + if (resp == NULL) { + ubcore_log_err("Failed to calloc and copy response"); + return; + } + s = ubcore_find_nl_session(resp->nlmsg_seq); + if (s == NULL) { + ubcore_log_err("Failed to find nl session with seq %u", resp->nlmsg_seq); + kfree(resp); + return; + } + s->resp = resp; + kref_put(&s->kref, ubcore_free_nl_session); + complete(&s->comp); +} + +static void ubcore_nl_sync_table(void) +{ + ubcore_sync_sip_table(); +} + +static void ubcore_nl_update_tpf_dev_info(void) +{ + if (ubcore_query_all_device_tpf_dev_info() != 0) + ubcore_log_warn("Failed update tpf dev info after tpsa ready"); +} + +static struct ubcore_nlmsg *ubcore_get_migrate_vtp_req(struct ubcore_vtp *vtp, + enum ubcore_event_type event_type, char *dev_name) +{ + uint32_t payload_len = (uint32_t)sizeof(struct ubcore_migrate_vtp_req); + struct ubcore_migrate_vtp_req *mig_req; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &vtp->cfg.local_eid, &vtp->cfg.peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = UBCORE_TRANSPORT_UB; + if (event_type == UBCORE_EVENT_MIGRATE_VTP_SWITCH) { + req->msg_type = UBCORE_NL_MIGRATE_VTP_SWITCH; + } else if (event_type == UBCORE_EVENT_MIGRATE_VTP_ROLLBACK) { + req->msg_type = UBCORE_NL_MIGRATE_VTP_ROLLBACK; + } else { + ubcore_log_err("wrong event msg type"); + return NULL; + } + mig_req = (struct ubcore_migrate_vtp_req *)(void *)req->payload; + (void)strcpy(mig_req->dev_name, dev_name); + + mig_req->vtp_cfg.fe_idx = vtp->cfg.fe_idx; + mig_req->vtp_cfg.vtpn = vtp->cfg.vtpn; + mig_req->vtp_cfg.local_jetty = vtp->cfg.local_jetty; + mig_req->vtp_cfg.local_eid = vtp->cfg.local_eid; + mig_req->vtp_cfg.peer_eid = vtp->cfg.peer_eid; + mig_req->vtp_cfg.peer_jetty = vtp->cfg.peer_jetty; + mig_req->vtp_cfg.flag = vtp->cfg.flag; + mig_req->vtp_cfg.trans_mode = vtp->cfg.trans_mode; + + return req; +} + +void ubcore_report_migrate_vtp(struct ubcore_device *dev, struct ubcore_vtp *vtp, + enum ubcore_event_type event_type) +{ + struct ubcore_nlmsg *req_msg; + int ret; + + req_msg = ubcore_get_migrate_vtp_req(vtp, event_type, dev->dev_name); + if (req_msg == NULL) { + ubcore_log_err("Failed to get migrate vtp switch req"); + return; + } + + ret = ubcore_nl_send_nowait_without_cb(req_msg); + if (ret) + ubcore_log_err("Failed to nowait send migrate vtp request"); + else + ubcore_log_info("Success to nowait send migrate vtp request"); + + kfree(req_msg); +} + static void ubcore_nl_cb_func(struct sk_buff *skb) { struct nlmsghdr *nlh; @@ -176,11 +310,24 @@ static void ubcore_nl_cb_func(struct sk_buff *skb) case UBCORE_NL_DESTROY_TP_RESP: case UBCORE_NL_QUERY_TP_RESP: case UBCORE_NL_RESTORE_TP_RESP: + case UBCORE_NL_ADD_SIP_RESP: + case UBCORE_NL_DEL_SIP_RESP: ubcore_nl_handle_tp_resp(nlh); break; case UBCORE_NL_SET_AGENT_PID: g_agent_port = nlh->nlmsg_pid; + ubcore_nl_sync_table(); + break; + case UBCORE_NL_QUERY_TPF_DEV_INFO: + ubcore_nl_update_tpf_dev_info(); + break; + case UBCORE_NL_UPDATE_TPF_DEV_INFO_RESP: + ubcore_nl_handle_update_tpf_dev_info_resp(nlh); break; + case UBCORE_NL_TPF2FE_RESP: + ubcore_nl_handle_tpf2fe_resp(nlh); + break; + case UBCORE_NL_FE2TPF_REQ: case UBCORE_NL_QUERY_TP_REQ: default: ubcore_log_err("Unexpected nl msg type: %d received\n", nlh->nlmsg_type); @@ -188,7 +335,7 @@ static void ubcore_nl_cb_func(struct sk_buff *skb) } } -static int ubcore_nl_send(struct ubcore_nlmsg *pbuf, uint16_t len) +static int ubcore_nl_unicast(struct ubcore_nlmsg *pbuf, uint32_t len) { struct sk_buff *nl_skb; struct nlmsghdr *nlh; @@ -232,7 +379,7 @@ struct ubcore_nlmsg *ubcore_nl_send_wait(struct ubcore_nlmsg *req) return NULL; } - ret = ubcore_nl_send(req, ubcore_nlmsg_len(req)); + ret = ubcore_nl_unicast(req, ubcore_nlmsg_len(req)); if (ret != 0) { ubcore_log_err("Failed to send nl msg %d", ret); ubcore_destroy_nl_session(s); @@ -250,6 +397,69 @@ struct ubcore_nlmsg *ubcore_nl_send_wait(struct ubcore_nlmsg *req) return resp; } +int ubcore_nl_send_nowait(struct ubcore_nlmsg *req, struct ubcore_nl_resp_cb *cb) +{ + struct ubcore_nl_session *s; + int ret; + + req->nlmsg_seq = ubcore_get_nlmsg_seq(); + s = ubcore_create_nl_session(req); + if (s == NULL) { + ubcore_log_err("Failed to create nl session"); + return -ENOMEM; + } + s->cb = *cb; + + ret = ubcore_nl_unicast(req, ubcore_nlmsg_len(req)); + if (ret != 0) { + ubcore_log_err("Failed to send nl msg %d", ret); + ubcore_destroy_nl_session(s); + return -EIO; + } + + return 0; +} + +int ubcore_nl_send_nowait_without_cb(struct ubcore_nlmsg *req) +{ + int ret; + + req->nlmsg_seq = ubcore_get_nlmsg_seq(); + + ret = ubcore_nl_unicast(req, ubcore_nlmsg_len(req)); + if (ret != 0) { + ubcore_log_err("Failed to send nl msg %d", ret); + return -EIO; + } + + return 0; +} + +bool ubcore_get_netlink_valid(void) +{ + if (g_agent_port == UBCORE_NL_INVALID_PORT) + ubcore_log_warn("The nelink service is not established well"); + return g_agent_port != UBCORE_NL_INVALID_PORT; +} + + +static int ubcore_netlink_notify(struct notifier_block *nb, unsigned long event, void *_notify) +{ + struct netlink_notify *notify = _notify; + + if (event != NETLINK_URELEASE || notify->protocol != UBCORE_NL_TYPE) + return NOTIFY_DONE; + + ubcore_log_info("netlink release.\n"); + g_agent_port = UBCORE_NL_INVALID_PORT; + + return NOTIFY_OK; +} + +static struct notifier_block ubcore_netlink_notifier = { + .notifier_call = ubcore_netlink_notify, +}; + int ubcore_netlink_init(void) { /* create netlink socket */ @@ -260,11 +470,18 @@ int ubcore_netlink_init(void) ubcore_log_err("Netlink_kernel_create error.\n"); return -1; } + if (netlink_register_notifier(&ubcore_netlink_notifier) != 0) { + ubcore_log_err("netlink_register_notifier error.\n"); + netlink_kernel_release(nl_sock); + nl_sock = NULL; + return -1; + } return 0; } void ubcore_netlink_exit(void) { + (void)netlink_unregister_notifier(&ubcore_netlink_notifier); if (nl_sock != NULL) { netlink_kernel_release(nl_sock); nl_sock = NULL; diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.h b/drivers/ub/urma/ubcore/ubcore_netlink.h index 1bdda997f96a..56c642e92262 100644 --- a/drivers/ub/urma/ubcore/ubcore_netlink.h +++ b/drivers/ub/urma/ubcore/ubcore_netlink.h @@ -24,7 +24,11 @@ #include #include -enum ubcore_nl_resp_status { UBCORE_NL_RESP_FAIL = -1, UBCORE_NL_RESP_SUCCESS = 0 }; +enum ubcore_nl_resp_status { + UBCORE_NL_RESP_IN_PROGRESS = -2, + UBCORE_NL_RESP_FAIL = -1, + UBCORE_NL_RESP_SUCCESS = 0 +}; enum ubcore_nlmsg_type { UBCORE_NL_CREATE_TP_REQ = NLMSG_MIN_TYPE, /* 0x10 */ @@ -35,23 +39,38 @@ enum ubcore_nlmsg_type { UBCORE_NL_QUERY_TP_RESP, UBCORE_NL_RESTORE_TP_REQ, UBCORE_NL_RESTORE_TP_RESP, - UBCORE_NL_SET_AGENT_PID + UBCORE_NL_SET_AGENT_PID, + UBCORE_NL_FE2TPF_REQ, + UBCORE_NL_TPF2FE_RESP, + UBCORE_NL_ADD_SIP_REQ, + UBCORE_NL_ADD_SIP_RESP, + UBCORE_NL_DEL_SIP_REQ, + UBCORE_NL_DEL_SIP_RESP, + UBCORE_NL_TP_ERROR_REQ, + UBCORE_NL_TP_SUSPEND_REQ, + UBCORE_NL_MIGRATE_VTP_SWITCH, + UBCORE_NL_MIGRATE_VTP_ROLLBACK, + UBCORE_NL_QUERY_TPF_DEV_INFO, + UBCORE_NL_UPDATE_TPF_DEV_INFO_REQ, + UBCORE_NL_UPDATE_TPF_DEV_INFO_RESP, }; struct ubcore_nlmsg { uint32_t nlmsg_seq; enum ubcore_nlmsg_type msg_type; enum ubcore_transport_type transport_type; - union ubcore_eid src_eid; - union ubcore_eid dst_eid; + union ubcore_eid src_eid; /* todo: delete */ + union ubcore_eid dst_eid; /* todo: delete */ uint32_t payload_len; - uint8_t payload[0]; + uint8_t payload[0]; // limited by tpsa_nl_msg_t's payload len } __packed; struct ubcore_ta_data { - enum ubcore_ta_type type; + enum ubcore_transport_type trans_type; + enum ubcore_ta_type ta_type; struct ubcore_jetty_id jetty_id; /* local jetty id */ struct ubcore_jetty_id tjetty_id; /* peer jetty id */ + bool is_target; }; struct ubcore_multipath_tp_cfg { @@ -75,7 +94,7 @@ struct ubcore_nl_create_tp_req { struct ubcore_ta_data ta; uint32_t ext_len; uint32_t udrv_in_len; - uint8_t ext_udrv[0]; /* struct ubcore_tp_ext->len + struct ubcore_udrv_priv->in_len */ + uint8_t ext_udrv[0]; /* struct ubcore_udrv_ext->len + struct ubcore_udrv_priv->in_len */ }; struct ubcore_nl_create_tp_resp { @@ -126,22 +145,98 @@ struct ubcore_nl_restore_tp_resp { uint32_t peer_rx_psn; }; +struct ubcore_nl_resp_cb { + void *user_arg; + int (*callback)(struct ubcore_nlmsg *resp, void *user_arg); +}; + struct ubcore_nl_session { struct ubcore_nlmsg *req; struct ubcore_nlmsg *resp; struct list_head node; struct kref kref; + struct ubcore_nl_resp_cb cb; struct completion comp; /* Synchronization event of timeout sleep and thread wakeup */ }; +struct ubcore_add_sip_req { + struct ubcore_net_addr netaddr; + uint32_t prefix_len; + char dev_name[UBCORE_MAX_DEV_NAME]; + uint8_t port_cnt; + uint8_t port_id[UBCORE_MAX_PORT_CNT]; + uint32_t index; + uint32_t mtu; +}; + +struct ubcore_add_sip_resp { + enum ubcore_nl_resp_status ret; +}; + +struct ubcore_del_sip_req { + uint32_t index; +}; + +struct ubcore_del_sip_resp { + enum ubcore_nl_resp_status ret; +}; + +struct ubcore_tp_suspend_req { + uint32_t tpgn; + uint32_t tpn; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint32_t sip_idx; +}; + +struct ubcore_tp_error_req { + uint32_t tpgn; + uint32_t tpn; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint32_t tx_psn; + uint32_t peer_tpn; + enum ubcore_transport_mode trans_mode; + uint32_t sip_idx; + union ubcore_eid local_eid; + uint32_t local_jetty_id; + union ubcore_eid peer_eid; + uint32_t peer_jetty_id; +}; + +struct ubcore_nl_function_mig_req { + uint16_t mig_fe_idx; + char dev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_update_tpf_dev_info_req { + char dev_name[UBCORE_MAX_DEV_NAME]; + union ubcore_device_feat dev_fea; + uint32_t cc_entry_cnt; + uint8_t data[0]; +}; // same as tpsa_nl_update_tpf_dev_info_req + +struct ubcore_update_tpf_dev_info_resp { + enum ubcore_nl_resp_status ret; +}; // same as tpsa_nl_update_tpf_dev_info_resp + static inline uint32_t ubcore_nlmsg_len(struct ubcore_nlmsg *msg) { return sizeof(struct ubcore_nlmsg) + msg->payload_len; } +bool ubcore_get_netlink_valid(void); int ubcore_netlink_init(void); void ubcore_netlink_exit(void); /* return response msg pointer, caller must release it */ struct ubcore_nlmsg *ubcore_nl_send_wait(struct ubcore_nlmsg *req); + +int ubcore_nl_send_nowait(struct ubcore_nlmsg *req, struct ubcore_nl_resp_cb *cb); +int ubcore_nl_send_nowait_without_cb(struct ubcore_nlmsg *req); +struct ubcore_nlmsg *ubcore_alloc_nlmsg(size_t payload_len, + const union ubcore_eid *src_eid, const union ubcore_eid *dst_eid); + +void ubcore_report_migrate_vtp(struct ubcore_device *dev, struct ubcore_vtp *vtp, + enum ubcore_event_type event_type); #endif diff --git a/drivers/ub/urma/ubcore/ubcore_priv.h b/drivers/ub/urma/ubcore/ubcore_priv.h index 73a3060e2d78..f4bfb0804676 100644 --- a/drivers/ub/urma/ubcore/ubcore_priv.h +++ b/drivers/ub/urma/ubcore/ubcore_priv.h @@ -23,6 +23,57 @@ #include #include +#include "ubcore_tp.h" + +/* + * Pure UB device, netdev type is Unified Bus (UB). + * On the Internet Assigned Numbers Authority, add Hardware Types: Unified Bus (UB) + */ +#define UBCORE_NETDEV_UB_TYPE (38) /* Unified Bus(UB) */ +#define UBCORE_NON_VIRTUALIZATION_FE_IDX 0xffff +#define UCBORE_INVALID_UPI 0xffffffff + +union ubcore_set_global_cfg_mask { + struct { + uint32_t suspend_period : 1; + uint32_t suspend_cnt : 1; + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +struct ubcore_set_global_cfg { + union ubcore_set_global_cfg_mask mask; + uint32_t suspend_period; + uint32_t suspend_cnt; +}; + +union ubcore_set_vport_cfg_mask { + struct { + uint32_t pattern : 1; + uint32_t virtualization : 1; + uint32_t min_jetty_cnt : 1; + uint32_t max_jetty_cnt : 1; + uint32_t min_jfr_cnt : 1; + uint32_t max_jfr_cnt : 1; + uint32_t slice : 1; + uint32_t reserved : 25; + } bs; + uint32_t value; +}; + +struct ubcore_set_vport_cfg { + union ubcore_set_vport_cfg_mask mask; + char dev_name[UBCORE_MAX_DEV_NAME]; + uint16_t fe_idx; + uint32_t pattern; + uint32_t virtualization; + uint32_t min_jetty_cnt; + uint32_t max_jetty_cnt; + uint32_t min_jfr_cnt; + uint32_t max_jfr_cnt; + uint32_t slice; +}; static inline struct ubcore_ucontext *ubcore_get_uctx(struct ubcore_udata *udata) { @@ -34,63 +85,109 @@ static inline bool ubcore_check_dev_name_invalid(struct ubcore_device *dev, char return (strcmp(dev->dev_name, dev_name) != 0); } +static inline bool ubcore_check_trans_mode_valid(enum ubcore_transport_mode trans_mode) +{ + return trans_mode == UBCORE_TP_RM || + trans_mode == UBCORE_TP_RC || trans_mode == UBCORE_TP_UM; +} + /* Caller must put device */ struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, enum ubcore_transport_type type); +struct ubcore_device *ubcore_find_device_with_eid_index(union ubcore_eid *eid, + enum ubcore_transport_type type, uint32_t eid_index); +struct ubcore_device *ubcore_find_device_with_name(const char *dev_name); void ubcore_get_device(struct ubcore_device *dev); void ubcore_put_device(struct ubcore_device *dev); +struct ubcore_device *ubcore_find_tpf_device(struct ubcore_net_addr *netaddr, + enum ubcore_transport_type type); +int ubcore_tpf_device_set_global_cfg(struct ubcore_set_global_cfg *cfg); +int ubcore_update_eidtbl_by_idx(struct ubcore_device *dev, union ubcore_eid *eid, + uint32_t eid_idx, bool is_alloc_eid); +int ubcore_update_eidtbl_by_eid(struct ubcore_device *dev, union ubcore_eid *eid, + uint32_t *eid_idx, bool is_alloc_eid); + +struct ubcore_device *ubcore_find_upi_with_dev_name(const char *dev_name, uint32_t *upi); +int ubcore_add_upi_list(struct ubcore_device *dev, uint32_t upi); /* Must call ubcore_put_devices to put and release the returned devices */ struct ubcore_device **ubcore_get_devices_from_netdev(struct net_device *netdev, uint32_t *cnt); void ubcore_put_devices(struct ubcore_device **devices, uint32_t cnt); -void ubcore_set_default_eid(struct ubcore_device *dev); +void ubcore_update_default_eid(struct ubcore_device *dev, bool is_add); +void ubcore_update_netaddr(struct ubcore_device *dev, struct net_device *netdev, bool add); +int ubcore_fill_netaddr_macvlan(struct ubcore_net_addr *netaddr, struct net_device *netdev, + enum ubcore_net_addr_type type); + +void ubcore_sync_sip_table(void); +int ubcore_query_all_device_tpf_dev_info(void); + +int ubcore_config_utp(struct ubcore_device *dev, uint8_t utp_id, struct ubcore_utp_attr *attr, + union ubcore_utp_attr_mask mask); -int ubcore_config_utp(struct ubcore_device *dev, const union ubcore_eid *eid, - const struct ubcore_utp_attr *attr, union ubcore_utp_attr_mask mask); -int ubcore_show_utp(struct ubcore_device *dev, const union ubcore_eid *eid); +void ubcore_set_tp_init_cfg(struct ubcore_tp *tp, struct ubcore_tp_cfg *cfg); +struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata); +void ubcore_abort_tp(struct ubcore_tp *tp, struct ubcore_tp_meta *meta); +int ubcore_modify_tp(struct ubcore_device *dev, struct ubcore_tp_node *tp_node, + struct ubcore_tp_attr *arg_tp_attr, struct ubcore_udata udata); -static inline uint32_t ubcore_get_jetty_hash(const struct ubcore_jetty_id *jetty_id) +static inline uint32_t ubcore_get_jetty_hash(struct ubcore_jetty_id *jetty_id) { return jhash(jetty_id, sizeof(struct ubcore_jetty_id), 0); } -static inline uint32_t ubcore_get_tseg_hash(const struct ubcore_ubva *ubva) +static inline uint32_t ubcore_get_tseg_hash(struct ubcore_ubva *ubva) { return jhash(ubva, sizeof(struct ubcore_ubva), 0); } -static inline uint32_t ubcore_get_eid_hash(const union ubcore_eid *eid) +static inline uint32_t ubcore_get_eid_hash(union ubcore_eid *eid) { return jhash(eid, sizeof(union ubcore_eid), 0); } -static inline bool ubcore_jfs_need_advise(const struct ubcore_jfs *jfs) +static inline uint32_t ubcore_get_vtp_hash(union ubcore_eid *local_eid) +{ + return jhash(local_eid, sizeof(union ubcore_eid) + sizeof(union ubcore_eid), 0); +} + +static inline uint32_t ubcore_get_rc_vtp_hash(union ubcore_eid *peer_eid) +{ + return jhash(peer_eid, sizeof(union ubcore_eid) + sizeof(uint32_t), 0); +} + +static inline uint32_t ubcore_get_vtpn_hash(union ubcore_eid *key_addr) +{ + return jhash(key_addr, sizeof(union ubcore_eid) + sizeof(union ubcore_eid), 0); +} + +static inline bool ubcore_jfs_need_advise(struct ubcore_jfs *jfs) { return jfs->ub_dev->transport_type == UBCORE_TRANSPORT_IB && jfs->jfs_cfg.trans_mode == UBCORE_TP_RM; } -static inline bool ubcore_jfs_tjfr_need_advise(const struct ubcore_jfs *jfs, - const struct ubcore_tjetty *tjfr) +static inline bool ubcore_jfs_tjfr_need_advise(struct ubcore_jfs *jfs, + struct ubcore_tjetty *tjfr) { return jfs->ub_dev->transport_type == UBCORE_TRANSPORT_IB && jfs->jfs_cfg.trans_mode == UBCORE_TP_RM && tjfr->cfg.trans_mode == UBCORE_TP_RM; } -static inline bool ubcore_jetty_need_advise(const struct ubcore_jetty *jetty) +static inline bool ubcore_jetty_need_advise(struct ubcore_jetty *jetty) { return jetty->ub_dev->transport_type == UBCORE_TRANSPORT_IB && jetty->jetty_cfg.trans_mode == UBCORE_TP_RM; } -static inline bool ubcore_jetty_tjetty_need_advise(const struct ubcore_jetty *jetty, - const struct ubcore_tjetty *tjetty) +static inline bool ubcore_jetty_tjetty_need_advise(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty) { return jetty->ub_dev->transport_type == UBCORE_TRANSPORT_IB && jetty->jetty_cfg.trans_mode == UBCORE_TP_RM && tjetty->cfg.trans_mode == UBCORE_TP_RM; } -static inline bool ubcore_jfr_need_advise(const struct ubcore_jfr *jfr) +static inline bool ubcore_jfr_need_advise(struct ubcore_jfr *jfr) { return jfr->ub_dev->transport_type == UBCORE_TRANSPORT_IB && jfr->jfr_cfg.trans_mode == UBCORE_TP_RM; diff --git a/drivers/ub/urma/ubcore/ubcore_segment.c b/drivers/ub/urma/ubcore/ubcore_segment.c index be37b2d893d4..0efe6a1761e8 100644 --- a/drivers/ub/urma/ubcore/ubcore_segment.c +++ b/drivers/ub/urma/ubcore/ubcore_segment.c @@ -25,48 +25,53 @@ #include "ubcore_tp.h" #include "ubcore_tp_table.h" -struct ubcore_key_id *ubcore_alloc_key_id(struct ubcore_device *dev, struct ubcore_udata *udata) +struct ubcore_token_id *ubcore_alloc_token_id(struct ubcore_device *dev, + struct ubcore_udata *udata) { - struct ubcore_key_id *key; + struct ubcore_token_id *token_id; - if (dev == NULL || dev->ops->alloc_key_id == NULL || dev->ops->free_key_id == NULL) { + if (dev == NULL || dev->ops->alloc_token_id == NULL || dev->ops->free_token_id == NULL) { ubcore_log_err("invalid parameter.\n"); return NULL; } - key = dev->ops->alloc_key_id(dev, udata); - if (key == NULL) { - ubcore_log_err("failed to alloc key id.\n"); + token_id = dev->ops->alloc_token_id(dev, udata); + if (token_id == NULL) { + ubcore_log_err("failed to alloc token_id id.\n"); return NULL; } - key->ub_dev = dev; - key->uctx = ubcore_get_uctx(udata); - atomic_set(&key->use_cnt, 0); - return key; + token_id->ub_dev = dev; + token_id->uctx = ubcore_get_uctx(udata); + atomic_set(&token_id->use_cnt, 0); + return token_id; } -EXPORT_SYMBOL(ubcore_alloc_key_id); +EXPORT_SYMBOL(ubcore_alloc_token_id); -int ubcore_free_key_id(struct ubcore_key_id *key) +int ubcore_free_token_id(struct ubcore_token_id *token_id) { struct ubcore_device *dev; - if (key == NULL || key->ub_dev == NULL || key->ub_dev->ops->free_key_id == NULL) { + if (token_id == NULL || token_id->ub_dev == NULL || + token_id->ub_dev->ops->free_token_id == NULL) { ubcore_log_err("invalid parameter.\n"); return -1; } - dev = key->ub_dev; + dev = token_id->ub_dev; - if (WARN_ON_ONCE(atomic_read(&key->use_cnt))) + if (atomic_read(&token_id->use_cnt)) { + ubcore_log_err("The token_id is still being used"); return -EBUSY; - - return dev->ops->free_key_id(key); + } + return dev->ops->free_token_id(token_id); } -EXPORT_SYMBOL(ubcore_free_key_id); +EXPORT_SYMBOL(ubcore_free_token_id); struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, - const struct ubcore_seg_cfg *cfg, + struct ubcore_seg_cfg *cfg, struct ubcore_udata *udata) { + bool alloc_token_id = false; + struct ubcore_seg_cfg tmp_cfg; struct ubcore_target_seg *tseg; if (dev == NULL || cfg == NULL || dev->ops->register_seg == NULL || @@ -75,6 +80,14 @@ struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, return NULL; } + if (dev->transport_type == UBCORE_TRANSPORT_UB && + ((cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_VALID && cfg->token_id == NULL) || + (cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_INVALID && + cfg->token_id != NULL))) { + ubcore_log_err("invalid parameter of token_id.\n"); + return NULL; + } + if ((cfg->flag.bs.access & (UBCORE_ACCESS_REMOTE_WRITE | UBCORE_ACCESS_REMOTE_ATOMIC)) && !(cfg->flag.bs.access & UBCORE_ACCESS_LOCAL_WRITE)) { ubcore_log_err( @@ -82,23 +95,40 @@ struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, return NULL; } - tseg = dev->ops->register_seg(dev, cfg, udata); + if (udata == NULL && cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_INVALID && + dev->transport_type == UBCORE_TRANSPORT_UB) + alloc_token_id = true; + + tmp_cfg = *cfg; + if (alloc_token_id == true) { + tmp_cfg.token_id = ubcore_alloc_token_id(dev, NULL); + if (tmp_cfg.token_id == NULL) { + ubcore_log_err("alloc token id failed.\n"); + return NULL; + } + } + + tseg = dev->ops->register_seg(dev, &tmp_cfg, udata); if (tseg == NULL) { - ubcore_log_err("UBEP failed to register segment with va:%llu\n", cfg->va); + ubcore_log_err("UBEP failed to register segment with va:%llu\n", tmp_cfg.va); + if (alloc_token_id == true) + (void)ubcore_free_token_id(tmp_cfg.token_id); return NULL; } tseg->ub_dev = dev; tseg->uctx = ubcore_get_uctx(udata); - tseg->seg.len = cfg->len; - tseg->seg.ubva.va = cfg->va; - tseg->keyid = cfg->keyid; + tseg->seg.len = tmp_cfg.len; + tseg->seg.ubva.va = tmp_cfg.va; + tseg->token_id = tmp_cfg.token_id; - (void)memcpy(tseg->seg.ubva.eid.raw, dev->attr.eid.raw, UBCORE_EID_SIZE); + (void)memcpy(tseg->seg.ubva.eid.raw, dev->eid_table.eid_entries[cfg->eid_index].eid.raw, + UBCORE_EID_SIZE); (void)memcpy(&tseg->seg.attr, &cfg->flag, sizeof(union ubcore_reg_seg_flag)); + tseg->seg.attr.bs.user_token_id = tmp_cfg.flag.bs.token_id_valid; atomic_set(&tseg->use_cnt, 0); - if (tseg->keyid != NULL) - atomic_inc(&tseg->keyid->use_cnt); + if (tseg->token_id != NULL) + atomic_inc(&tseg->token_id->use_cnt); return tseg; } @@ -106,6 +136,8 @@ EXPORT_SYMBOL(ubcore_register_seg); int ubcore_unregister_seg(struct ubcore_target_seg *tseg) { + struct ubcore_token_id *token_id = NULL; + bool free_token_id = false; struct ubcore_device *dev; int ret; @@ -115,16 +147,27 @@ int ubcore_unregister_seg(struct ubcore_target_seg *tseg) } dev = tseg->ub_dev; - if (tseg->keyid != NULL) - atomic_dec(&tseg->keyid->use_cnt); + if (tseg->token_id != NULL) + atomic_dec(&tseg->token_id->use_cnt); + + if (tseg->seg.attr.bs.user_token_id == UBCORE_TOKEN_ID_INVALID && + dev->transport_type == UBCORE_TRANSPORT_UB && + tseg->token_id != NULL && tseg->uctx == NULL) { + free_token_id = true; + token_id = tseg->token_id; + } ret = dev->ops->unregister_seg(tseg); + + if (free_token_id == true && token_id != NULL) + (void)ubcore_free_token_id(token_id); + return ret; } EXPORT_SYMBOL(ubcore_unregister_seg); struct ubcore_target_seg *ubcore_import_seg(struct ubcore_device *dev, - const struct ubcore_target_seg_cfg *cfg, + struct ubcore_target_seg_cfg *cfg, struct ubcore_udata *udata) { struct ubcore_target_seg *tseg; diff --git a/drivers/ub/urma/ubcore/ubcore_tp.c b/drivers/ub/urma/ubcore/ubcore_tp.c index 34104b8f050e..ff58899dbc53 100644 --- a/drivers/ub/urma/ubcore/ubcore_tp.c +++ b/drivers/ub/urma/ubcore/ubcore_tp.c @@ -22,21 +22,35 @@ #include #include #include +#include +#include #include "ubcore_log.h" #include "ubcore_netlink.h" #include "ubcore_priv.h" #include #include "ubcore_tp_table.h" #include "ubcore_tp.h" +#include "ubcore_msg.h" +#include "ubcore_vtp.h" #define UB_PROTOCOL_HEAD_BYTES 313 #define UB_MTU_BITS_BASE_SHIFT 7 -static inline uint32_t get_udrv_in_len(const struct ubcore_udata *udata) +struct ubcore_resp_args { + struct ubcore_udata *udata; + enum ubcore_transport_mode trans_mode; +}; + +static inline uint32_t get_udrv_in_len(struct ubcore_udata *udata) { return ((udata == NULL || udata->udrv_data == NULL) ? 0 : udata->udrv_data->in_len); } +static inline uint32_t get_udrv_out_len(const struct ubcore_udata *udata) +{ + return ((udata == NULL || udata->udrv_data == NULL) ? 0 : udata->udrv_data->out_len); +} + static inline int get_udrv_in_data(uint8_t *dst, uint32_t dst_len, struct ubcore_udata *udata) { if (udata == NULL || udata->udrv_data == NULL || udata->udrv_data->in_len == 0) @@ -53,13 +67,6 @@ static inline int get_udrv_in_data(uint8_t *dst, uint32_t dst_len, struct ubcore } } -static inline void ubcore_set_net_addr_with_eid(struct ubcore_net_addr *net_addr, - const union ubcore_eid *eid) -{ - memset(net_addr, 0, sizeof(struct ubcore_net_addr)); - (void)memcpy(net_addr, eid, UBCORE_EID_SIZE); -} - static inline int ubcore_mtu_enum_to_int(enum ubcore_mtu mtu) { return 1 << ((int)mtu + UB_MTU_BITS_BASE_SHIFT); @@ -67,62 +74,27 @@ static inline int ubcore_mtu_enum_to_int(enum ubcore_mtu mtu) enum ubcore_mtu ubcore_get_mtu(int mtu) { - mtu = mtu - UB_PROTOCOL_HEAD_BYTES; + int tmp_mtu = mtu - UB_PROTOCOL_HEAD_BYTES; - if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_8192)) + if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_8192)) return UBCORE_MTU_8192; - if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_4096)) + if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_4096)) return UBCORE_MTU_4096; - else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_2048)) + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_2048)) return UBCORE_MTU_2048; - else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_1024)) + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_1024)) return UBCORE_MTU_1024; - else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_512)) + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_512)) return UBCORE_MTU_512; - else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_256)) + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_256)) return UBCORE_MTU_256; else return 0; } EXPORT_SYMBOL(ubcore_get_mtu); -static int ubcore_get_active_mtu(const struct ubcore_device *dev, uint8_t port_num, - enum ubcore_mtu *mtu) -{ - struct ubcore_device_status st = { 0 }; - - if (port_num >= dev->attr.port_cnt || dev->ops->query_device_status == NULL) { - ubcore_log_err("Invalid parameter"); - return -1; - } - if (dev->ops->query_device_status(dev, &st) != 0) { - ubcore_log_err("Failed to query query_device_status for port %d", port_num); - return -1; - } - if (st.port_status[port_num].state != UBCORE_PORT_ACTIVE) { - ubcore_log_err("Port %d is not active", port_num); - return -1; - } - *mtu = st.port_status[port_num].active_mtu; - return 0; -} - -static struct ubcore_nlmsg *ubcore_alloc_nlmsg(size_t payload_len, const union ubcore_eid *src_eid, - const union ubcore_eid *dst_eid) -{ - struct ubcore_nlmsg *msg = kzalloc(sizeof(struct ubcore_nlmsg) + payload_len, GFP_KERNEL); - - if (msg == NULL) - return NULL; - - msg->src_eid = *src_eid; - msg->dst_eid = *dst_eid; - msg->payload_len = payload_len; - return msg; -} - static struct ubcore_nlmsg *ubcore_get_destroy_tp_req(struct ubcore_tp *tp, - const struct ubcore_ta_data *ta) + const struct ubcore_ta_data *ta) { struct ubcore_nl_destroy_tp_req *destroy; struct ubcore_nlmsg *req; @@ -141,71 +113,18 @@ static struct ubcore_nlmsg *ubcore_get_destroy_tp_req(struct ubcore_tp *tp, if (ta != NULL) destroy->ta = *ta; else - destroy->ta.type = UBCORE_TA_NONE; + destroy->ta.ta_type = UBCORE_TA_NONE; return req; } -static int ubcore_init_create_tp_req(struct ubcore_nl_create_tp_req *create, struct ubcore_tp *tp, - const struct ubcore_ta_data *ta, struct ubcore_udata *udata) -{ - create->tpn = tp->tpn; - create->local_net_addr = tp->local_net_addr; - create->peer_net_addr = tp->peer_net_addr; - create->trans_mode = tp->trans_mode; - create->mtu = tp->mtu; - create->rx_psn = tp->rx_psn; - create->cfg.flag = tp->flag; - create->cfg.congestion_alg = tp->ub_dev->attr.dev_cap.congestion_ctrl_alg; - - if (ta != NULL) - create->ta = *ta; - else - create->ta.type = UBCORE_TA_NONE; - - create->ext_len = tp->tp_ext.len; - create->udrv_in_len = get_udrv_in_len(udata); - if (tp->tp_ext.len > 0) - (void)memcpy(create->ext_udrv, (void *)tp->tp_ext.addr, tp->tp_ext.len); - - if (get_udrv_in_data(create->ext_udrv + tp->tp_ext.len, create->udrv_in_len, udata) != 0) { - ubcore_log_err("Failed to get udrv data"); - return -1; - } - - return 0; -} - -static struct ubcore_nlmsg *ubcore_get_create_tp_req(struct ubcore_tp *tp, - struct ubcore_ta_data *ta, - struct ubcore_udata *udata) -{ - uint32_t payload_len = - sizeof(struct ubcore_nl_create_tp_req) + tp->tp_ext.len + get_udrv_in_len(udata); - struct ubcore_nlmsg *req; - - req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); - if (req == NULL) - return NULL; - - req->transport_type = tp->ub_dev->transport_type; - req->msg_type = UBCORE_NL_CREATE_TP_REQ; - - if (ubcore_init_create_tp_req((struct ubcore_nl_create_tp_req *)req->payload, tp, ta, - udata) != 0) { - kfree(req); - ubcore_log_err("Failed to init create tp req"); - return NULL; - } - return req; -} - -static int ubcore_set_tp_peer_ext(struct ubcore_tp_attr *attr, const uint8_t *ext_addr, - const uint32_t ext_len) +static int ubcore_set_tp_peer_ext(struct ubcore_tp_attr *attr, uint64_t ext_addr, + uint32_t ext_len) { void *peer_ext = NULL; + int ret; - if (ext_len == 0 || ext_addr == NULL) + if (ext_len == 0 || ext_addr == 0) return 0; /* copy resp ext from req or response */ @@ -213,11 +132,10 @@ static int ubcore_set_tp_peer_ext(struct ubcore_tp_attr *attr, const uint8_t *ex if (peer_ext == NULL) return -ENOMEM; - (void)memcpy(peer_ext, ext_addr, ext_len); - - attr->peer_ext.addr = (uintptr_t)peer_ext; + ret = (int)copy_from_user(peer_ext, (void __user *)(uintptr_t)ext_addr, ext_len); + attr->peer_ext.addr = (uint64_t)peer_ext; attr->peer_ext.len = ext_len; - return 0; + return ret; } static inline void ubcore_unset_tp_peer_ext(struct ubcore_tp_attr *attr) @@ -226,122 +144,25 @@ static inline void ubcore_unset_tp_peer_ext(struct ubcore_tp_attr *attr) kfree((void *)attr->peer_ext.addr); } -static int ubcore_negotiate_optimal_cc_alg(uint16_t local_congestion_alg, - uint16_t peer_local_congestion_alg) -{ - int i; - - /* TODO Configure congestion control priority based on UVS */ - for (i = 0; i <= UBCORE_TP_CC_DIP; i++) { - if ((0x1 << (uint32_t)i) & local_congestion_alg & peer_local_congestion_alg) - return i; - } - return -1; -} - -static int ubcore_set_initiator_peer(const struct ubcore_tp *tp, struct ubcore_tp_attr *attr, - union ubcore_tp_attr_mask *mask, - const struct ubcore_nl_create_tp_resp *resp) -{ - mask->value = 0; - mask->bs.flag = 1; - mask->bs.peer_tpn = 1; - mask->bs.mtu = 1; - mask->bs.tx_psn = 1; - mask->bs.state = 1; - - memset(attr, 0, sizeof(*attr)); - attr->flag.bs.oor_en = tp->flag.bs.oor_en & resp->flag.bs.oor_en; - attr->flag.bs.sr_en = tp->flag.bs.sr_en & resp->flag.bs.sr_en; - attr->flag.bs.spray_en = tp->flag.bs.spray_en & resp->flag.bs.spray_en; - attr->flag.bs.cc_en = tp->flag.bs.cc_en & resp->flag.bs.cc_en; - attr->flag.bs.cc_alg = resp->flag.bs.cc_alg; /* negotiated with the remote */ - attr->peer_tpn = resp->peer_tpn; - attr->mtu = min(tp->mtu, resp->peer_mtu); - attr->tx_psn = resp->peer_rx_psn; - attr->state = UBCORE_TP_STATE_RTS; - - if (tp->peer_ext.addr != 0) - return 0; - - mask->bs.peer_ext = 1; - return ubcore_set_tp_peer_ext(attr, resp->peer_ext, resp->peer_ext_len); -} - -static struct ubcore_nlmsg *ubcore_get_query_tp_req(struct ubcore_device *dev, - const union ubcore_eid *remote_eid, - enum ubcore_transport_mode trans_mode) -{ - uint32_t payload_len = sizeof(struct ubcore_nl_query_tp_req); - struct ubcore_nl_query_tp_req *query; - struct ubcore_nlmsg *req; - - req = ubcore_alloc_nlmsg(payload_len, &dev->attr.eid, remote_eid); - if (req == NULL) - return NULL; - - req->transport_type = dev->transport_type; - req->msg_type = UBCORE_NL_QUERY_TP_REQ; - query = (struct ubcore_nl_query_tp_req *)req->payload; - query->trans_mode = trans_mode; - return req; -} - -static int ubcore_query_tp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, - enum ubcore_transport_mode trans_mode, - struct ubcore_nl_query_tp_resp *query_tp_resp) -{ - struct ubcore_nlmsg *req_msg, *resp_msg; - struct ubcore_nl_query_tp_resp *resp; - int ret = 0; - - req_msg = ubcore_get_query_tp_req(dev, remote_eid, trans_mode); - if (req_msg == NULL) { - ubcore_log_err("Failed to get query tp req"); - return -1; - } - - resp_msg = ubcore_nl_send_wait(req_msg); - if (resp_msg == NULL) { - ubcore_log_err("Failed to wait query response"); - kfree(req_msg); - return -1; - } - - resp = (struct ubcore_nl_query_tp_resp *)(void *)resp_msg->payload; - if (resp_msg->msg_type != UBCORE_NL_QUERY_TP_RESP || resp == NULL || - resp->ret != UBCORE_NL_RESP_SUCCESS) { - ret = -1; - ubcore_log_err("Query tp request is rejected with type %d ret %d", - resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); - } else { - (void)memcpy(query_tp_resp, resp, sizeof(struct ubcore_nl_query_tp_resp)); - } - kfree(resp_msg); - kfree(req_msg); - return ret; -} - -static void ubcore_get_ta_data_from_ta(const struct ubcore_ta *ta, struct ubcore_ta_data *ta_data) +static void ubcore_get_ta_data_from_ta(const struct ubcore_ta *ta, + enum ubcore_transport_type trans_type, struct ubcore_ta_data *ta_data) { struct ubcore_jetty *jetty; struct ubcore_jfs *jfs; - ta_data->type = ta->type; + ta_data->ta_type = ta->type; switch (ta->type) { case UBCORE_TA_JFS_TJFR: jfs = ta->jfs; - ta_data->jetty_id.eid = jfs->ub_dev->attr.eid; - if (jfs->uctx != NULL) - ta_data->jetty_id.uasid = jfs->uctx->uasid; + ta_data->jetty_id.eid = + jfs->ub_dev->eid_table.eid_entries[jfs->jfs_cfg.eid_index].eid; ta_data->jetty_id.id = jfs->id; ta_data->tjetty_id = ta->tjetty_id; break; case UBCORE_TA_JETTY_TJETTY: jetty = ta->jetty; - ta_data->jetty_id.eid = jetty->ub_dev->attr.eid; - if (jetty->uctx != NULL) - ta_data->jetty_id.uasid = jetty->uctx->uasid; + ta_data->jetty_id.eid = + jetty->ub_dev->eid_table.eid_entries[jetty->jetty_cfg.eid_index].eid; ta_data->jetty_id.id = jetty->id; ta_data->tjetty_id = ta->tjetty_id; break; @@ -350,43 +171,45 @@ static void ubcore_get_ta_data_from_ta(const struct ubcore_ta *ta, struct ubcore default: return; } + ta_data->trans_type = trans_type; } -static struct ubcore_nlmsg *ubcore_exchange_tp(struct ubcore_tp *tp, struct ubcore_ta *ta, - struct ubcore_udata *udata) +static int ubcore_nl_handle_create_tp_resp_cb(struct ubcore_device *dev, struct ubcore_msg *msg, + void *user_arg) { - struct ubcore_nlmsg *req_msg, *resp_msg; - - struct ubcore_nl_create_tp_resp *resp; - struct ubcore_ta_data ta_data = { 0 }; - - if (ta != NULL) - ubcore_get_ta_data_from_ta(ta, &ta_data); + struct ubcore_create_vtp_resp *resp; + struct ubcore_resp_args *input; + struct ubcore_udata *udata; + int ret = -1; - req_msg = ubcore_get_create_tp_req(tp, &ta_data, udata); - if (req_msg == NULL) { - ubcore_log_err("Failed to get create tp req"); - return NULL; + resp = (struct ubcore_create_vtp_resp *)msg->data; + if (resp->ret == UBCORE_MSG_RESP_FAIL) { + ubcore_log_err("failed to create vtp: response error"); + return -1; + } else if (resp->ret == UBCORE_MSG_RESP_IN_PROGRESS) { + ubcore_log_err("failed: try to create vtp which is being created. Try again later"); + return -1; + } else if (resp->ret == UBCORE_MSG_RESP_RC_JETTY_ALREADY_BIND) { + ubcore_log_err("failed: rc jetty already bind by other jetty"); + return -1; } - resp_msg = ubcore_nl_send_wait(req_msg); - if (resp_msg == NULL) { - ubcore_log_err("Failed to wait create_tp response %pI6c", &tp->peer_eid); - kfree(req_msg); - return NULL; - } + input = (struct ubcore_resp_args *)user_arg; + if (input->trans_mode == UBCORE_TP_RM) + /* There is no need to copy udrv out data, otherwise it will be overwritten. */ + return 0; - resp = (struct ubcore_nl_create_tp_resp *)(void *)resp_msg->payload; - if (resp_msg->msg_type != req_msg->msg_type + 1 || resp == NULL || - resp->ret != UBCORE_NL_RESP_SUCCESS) { - ubcore_log_err("Create tp request is rejected with type %d ret %d", - resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); - kfree(resp_msg); - resp_msg = NULL; + udata = (struct ubcore_udata *)input->udata; + if (udata == NULL || udata->udrv_data->out_len < resp->udrv_out_len) { + ubcore_log_err( + "udata user mode len length is too small,udata->udrv_data->out_len: %d, resp_len: %d", + udata->udrv_data->out_len, resp->udrv_out_len); + return -1; } + ret = (int)copy_to_user((void __user *)(uintptr_t)udata->udrv_data->out_addr, + (char *)resp->udrv_out_data, resp->udrv_out_len); - kfree(req_msg); - return resp_msg; + return ret; } int ubcore_destroy_tp(struct ubcore_tp *tp) @@ -403,37 +226,35 @@ int ubcore_destroy_tp(struct ubcore_tp *tp) } EXPORT_SYMBOL(ubcore_destroy_tp); -static void ubcore_set_tp_flag(union ubcore_tp_flag *flag, const struct ubcore_tp_cfg *cfg, - const struct ubcore_device *dev) +static void ubcore_set_tp_flag(union ubcore_tp_flag *flag, struct ubcore_tp_cfg *cfg, + struct ubcore_device *dev) { flag->bs.target = cfg->flag.bs.target; - flag->bs.sr_en = cfg->flag.bs.sr_en; - flag->bs.spray_en = cfg->flag.bs.spray_en; - flag->bs.oor_en = cfg->flag.bs.oor_en; - flag->bs.cc_en = cfg->flag.bs.cc_en; } -static void ubcore_set_tp_init_cfg(struct ubcore_tp *tp, const struct ubcore_tp_cfg *cfg) +void ubcore_set_tp_init_cfg(struct ubcore_tp *tp, struct ubcore_tp_cfg *cfg) { ubcore_set_tp_flag(&tp->flag, cfg, tp->ub_dev); - tp->local_net_addr = cfg->local_net_addr; - tp->peer_net_addr = cfg->peer_net_addr; - tp->local_eid = cfg->local_eid; - tp->peer_eid = cfg->peer_eid; + if (tp->ub_dev->transport_type == UBCORE_TRANSPORT_IB || + (tp->ub_dev->transport_type == UBCORE_TRANSPORT_UB && + tp->trans_mode == UBCORE_TP_RC)) { + tp->local_jetty = cfg->local_jetty; + tp->peer_jetty = cfg->peer_jetty; + } else { + tp->local_eid = cfg->local_eid; + tp->peer_eid = cfg->peer_eid; + } + tp->trans_mode = cfg->trans_mode; - tp->rx_psn = cfg->rx_psn; tp->tx_psn = 0; - tp->mtu = cfg->mtu; - tp->data_udp_start = cfg->data_udp_start; - tp->ack_udp_start = cfg->ack_udp_start; - tp->udp_range = cfg->udp_range; tp->retry_num = cfg->retry_num; tp->ack_timeout = cfg->ack_timeout; - tp->tc = cfg->tc; + tp->dscp = cfg->dscp; + tp->oor_cnt = cfg->oor_cnt; } -static struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, - const struct ubcore_tp_cfg *cfg, +struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, + struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata) { struct ubcore_tp *tp = NULL; @@ -452,7 +273,7 @@ static struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, ubcore_set_tp_init_cfg(tp, cfg); tp->state = UBCORE_TP_STATE_RESET; tp->priv = NULL; - atomic_set(&tp->use_cnt, 1); + atomic_set(&tp->use_cnt, 0); return tp; } @@ -470,7 +291,7 @@ static int ubcore_destroy_peer_tp(struct ubcore_tp *tp, struct ubcore_ta *ta) } if (ta != NULL) - ubcore_get_ta_data_from_ta(ta, &ta_data); + ubcore_get_ta_data_from_ta(ta, tp->ub_dev->transport_type, &ta_data); req_msg = ubcore_get_destroy_tp_req(tp, &ta_data); if (req_msg == NULL) { @@ -515,7 +336,7 @@ static int ubcore_destroy_local_peer_tp(struct ubcore_tp *tp, struct ubcore_ta * return ubcore_destroy_tp(tp); } -static void ubcore_abort_tp(struct ubcore_tp *tp, struct ubcore_tp_meta *meta) +void ubcore_abort_tp(struct ubcore_tp *tp, struct ubcore_tp_meta *meta) { struct ubcore_tp *target; @@ -527,12 +348,11 @@ static void ubcore_abort_tp(struct ubcore_tp *tp, struct ubcore_tp_meta *meta) ubcore_log_warn("TP is not found, already removed or under use\n"); return; } - (void)ubcore_destroy_tp(tp); } -/* destroy initiator and peer tp created by ubcore_connect_vtp, called by ubcore_destroy_vtp */ -static int ubcore_disconnect_vtp(struct ubcore_tp *tp) +/* destroy initiator and peer tp created by ubcore_connect_fe_tp, called by ubcore_destroy_vtp */ +static int ubcore_disconnect_fe_tp(struct ubcore_tp *tp) { struct ubcore_tp_node *tp_node = tp->priv; struct ubcore_device *dev = tp->ub_dev; @@ -548,78 +368,25 @@ static int ubcore_disconnect_vtp(struct ubcore_tp *tp) return 0; } -static void ubcore_set_multipath_tp_cfg(struct ubcore_tp_cfg *cfg, - enum ubcore_transport_mode trans_mode, - struct ubcore_nl_query_tp_resp *query_tp_resp) +int ubcore_fill_netaddr_macvlan(struct ubcore_net_addr *netaddr, struct net_device *netdev, + enum ubcore_net_addr_type type) { - cfg->flag.bs.sr_en = query_tp_resp->cfg.flag.bs.sr_en; - cfg->flag.bs.spray_en = query_tp_resp->cfg.flag.bs.spray_en; - cfg->flag.bs.oor_en = query_tp_resp->cfg.flag.bs.oor_en; - cfg->flag.bs.cc_en = query_tp_resp->cfg.flag.bs.cc_en; - cfg->udp_range = query_tp_resp->cfg.tp_range; - if (trans_mode == UBCORE_TP_RC) { - cfg->data_udp_start = query_tp_resp->cfg.data_rctp_start; - cfg->ack_udp_start = query_tp_resp->cfg.ack_rctp_start; - } else if (trans_mode == UBCORE_TP_RM) { - cfg->data_udp_start = query_tp_resp->cfg.data_rmtp_start; - cfg->ack_udp_start = query_tp_resp->cfg.ack_rmtp_start; - } -} + netaddr->type = type; -static int ubcore_set_initiator_tp_cfg(struct ubcore_tp_cfg *cfg, struct ubcore_device *dev, - enum ubcore_transport_mode trans_mode, - const union ubcore_eid *remote_eid, - struct ubcore_nl_query_tp_resp *query_tp_resp) -{ - cfg->flag.value = 0; - cfg->flag.bs.target = 0; - cfg->trans_mode = trans_mode; - cfg->local_eid = dev->attr.eid; - - if (dev->attr.virtualization) { - cfg->peer_eid = *remote_eid; - ubcore_set_net_addr_with_eid(&cfg->local_net_addr, &dev->attr.eid); - ubcore_set_net_addr_with_eid(&cfg->peer_net_addr, remote_eid); - } else { - if (dev->netdev == NULL) - ubcore_log_warn("Could not find netdev.\n"); - - cfg->peer_eid = query_tp_resp->dst_eid; /* set eid to be the remote underlay eid */ - cfg->local_net_addr = query_tp_resp->src_addr; - if (dev->netdev != NULL && dev->netdev->dev_addr != NULL) - (void)memcpy(cfg->local_net_addr.mac, dev->netdev->dev_addr, - dev->netdev->addr_len); - if (dev->netdev != NULL) - cfg->local_net_addr.vlan = (uint64_t)dev->netdev->vlan_features; - cfg->peer_net_addr = query_tp_resp->dst_addr; - ubcore_set_multipath_tp_cfg(cfg, trans_mode, query_tp_resp); - } - - /* set mtu to active mtu temperately */ - if (ubcore_get_active_mtu(dev, 0, &cfg->mtu) != 0) { - ubcore_log_err("Failed to get active mtu"); + /* UB does not have a mac address + * to prevent the duplication of the mac address from hanging + */ + if (netdev->type == UBCORE_NETDEV_UB_TYPE) { + ubcore_log_err("Pure ub does not support uboe mac\n"); return -1; } - /* set psn to 0 temperately */ - cfg->rx_psn = 0; - return 0; -} - -static int ubcore_query_initiator_tp_cfg(struct ubcore_tp_cfg *cfg, struct ubcore_device *dev, - const union ubcore_eid *remote_eid, - enum ubcore_transport_mode trans_mode) -{ - struct ubcore_nl_query_tp_resp query_tp_resp; - - /* Do not query tp as TPS is not running on VM */ - if (dev->attr.virtualization) - return ubcore_set_initiator_tp_cfg(cfg, dev, trans_mode, remote_eid, NULL); + (void)memcpy(netaddr->mac, netdev->dev_addr, netdev->addr_len); + if (is_vlan_dev(netdev)) + netaddr->vlan = vlan_dev_vlan_id(netdev); + else + netaddr->vlan = 0; - if (ubcore_query_tp(dev, remote_eid, trans_mode, &query_tp_resp) != 0) { - ubcore_log_err("Failed to query tp"); - return -1; - } - return ubcore_set_initiator_tp_cfg(cfg, dev, trans_mode, NULL, &query_tp_resp); + return 0; } static int ubcore_modify_tp_to_rts(const struct ubcore_device *dev, struct ubcore_tp *tp) @@ -630,6 +397,7 @@ static int ubcore_modify_tp_to_rts(const struct ubcore_device *dev, struct ubcor mask.value = 0; mask.bs.state = 1; attr.state = UBCORE_TP_STATE_RTS; + attr.tx_psn = 0; if (dev->ops->modify_tp(tp, &attr, mask) != 0) { /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ @@ -643,12 +411,16 @@ static int ubcore_modify_tp_to_rts(const struct ubcore_device *dev, struct ubcor #define ubcore_mod_tp_attr_with_mask(tp, attr, field, mask) \ (tp->field = mask.bs.field ? attr->field : tp->field) -static void ubcore_modify_tp_attr(struct ubcore_tp *tp, const struct ubcore_tp_attr *attr, - union ubcore_tp_attr_mask mask) +void ubcore_modify_tp_attr(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask) { - /* flag and mod flag must have the same layout */ - if (mask.bs.flag) - tp->flag.value = tp->flag.bs.target | (attr->flag.value << 1); + if (mask.bs.flag) { + tp->flag.bs.oor_en = attr->flag.bs.oor_en; + tp->flag.bs.sr_en = attr->flag.bs.sr_en; + tp->flag.bs.cc_en = attr->flag.bs.cc_en; + tp->flag.bs.cc_alg = attr->flag.bs.cc_alg; + tp->flag.bs.spray_en = attr->flag.bs.spray_en; + } ubcore_mod_tp_attr_with_mask(tp, attr, peer_tpn, mask); ubcore_mod_tp_attr_with_mask(tp, attr, state, mask); @@ -657,150 +429,51 @@ static void ubcore_modify_tp_attr(struct ubcore_tp *tp, const struct ubcore_tp_a ubcore_mod_tp_attr_with_mask(tp, attr, mtu, mask); ubcore_mod_tp_attr_with_mask(tp, attr, cc_pattern_idx, mask); ubcore_mod_tp_attr_with_mask(tp, attr, peer_ext, mask); -} - -static int ubcore_enable_tp(const struct ubcore_device *dev, struct ubcore_tp_node *tp_node, - struct ubcore_ta *ta, struct ubcore_udata *udata) -{ - struct ubcore_tp *tp = tp_node->tp; - struct ubcore_nlmsg *resp_msg; - union ubcore_tp_attr_mask mask; - struct ubcore_tp_attr attr; - int ret; - - /* Do not exchange tp with remote in the VM */ - if (dev->attr.virtualization) - return 0; - - mutex_lock(&tp_node->lock); - if (tp->state == UBCORE_TP_STATE_RTR) { - ret = ubcore_modify_tp_to_rts(dev, tp); - mutex_unlock(&tp_node->lock); - return ret; - } - mutex_unlock(&tp_node->lock); - - /* send request to connection agent and set peer cfg and peer ext from response */ - resp_msg = ubcore_exchange_tp(tp, ta, udata); - if (resp_msg == NULL) { - ubcore_log_err("Failed to exchange tp info"); - return -1; - } - - mutex_lock(&tp_node->lock); - if (tp->state == UBCORE_TP_STATE_RTS) { - mutex_unlock(&tp_node->lock); - kfree(resp_msg); - ubcore_log_info("TP %u is already at RTS", tp->tpn); - return 0; - } - - ret = ubcore_set_initiator_peer( - tp, &attr, &mask, - (const struct ubcore_nl_create_tp_resp *)(void *)resp_msg->payload); - - /* Here we can free resp msg after use */ - kfree(resp_msg); - - if (ret != 0) { - mutex_unlock(&tp_node->lock); - (void)ubcore_destroy_peer_tp(tp, ta); - ubcore_unset_tp_peer_ext(&attr); - ubcore_log_err("Failed to set initiator peer"); - return -1; - } - - ret = dev->ops->modify_tp(tp, &attr, mask); - if (ret != 0) { - mutex_unlock(&tp_node->lock); - (void)ubcore_destroy_peer_tp(tp, ta); - ubcore_unset_tp_peer_ext(&attr); - ubcore_log_err("Failed to modify tp"); - return -1; - } - ubcore_modify_tp_attr(tp, &attr, mask); - mutex_unlock(&tp_node->lock); - return 0; + ubcore_mod_tp_attr_with_mask(tp, attr, local_net_addr_idx, mask); } /* create vtp and connect to a remote vtp peer, called by ubcore_create_vtp */ -static struct ubcore_tp *ubcore_connect_vtp(struct ubcore_device *dev, - const union ubcore_eid *remote_eid, - enum ubcore_transport_mode trans_mode, - struct ubcore_udata *udata) +static struct ubcore_tp *ubcore_connect_fe_tp(struct ubcore_device *dev, + union ubcore_eid *remote_eid, enum ubcore_transport_mode trans_mode, + struct ubcore_udata *udata) { - struct ubcore_tp_cfg cfg = { 0 }; - struct ubcore_tp_node *tp_node; struct ubcore_tp *tp = NULL; - struct ubcore_ta ta; - - if (ubcore_query_initiator_tp_cfg(&cfg, dev, remote_eid, trans_mode) != 0) { - ubcore_log_err("Failed to init tp cfg"); - return NULL; - } - - tp = ubcore_create_tp(dev, &cfg, udata); - if (tp == NULL) { - ubcore_log_err("Failed to create tp"); - return NULL; - } - - tp_node = ubcore_add_tp_with_tpn(dev, tp); - if (tp_node == NULL) { - (void)ubcore_destroy_tp(tp); - ubcore_log_err("Failed to add vtp"); - return NULL; - } - ta.type = UBCORE_TA_VIRT; - /* send request to connection agent and set peer cfg and peer ext from response */ - if (ubcore_enable_tp(dev, tp_node, &ta, udata) != 0) { - ubcore_remove_tp_node(&dev->ht[UBCORE_HT_TP], tp_node); - (void)ubcore_destroy_tp(tp); - ubcore_log_err("Failed to enable tp"); - return NULL; - } return tp; } -static int ubcore_set_target_peer(const struct ubcore_tp *tp, struct ubcore_tp_attr *attr, - union ubcore_tp_attr_mask *mask, - const struct ubcore_nl_create_tp_req *create) +static int ubcore_set_target_peer(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, struct ubcore_tp_attr *tp_attr, struct ubcore_udata udata) { - int ret; - mask->value = 0; mask->bs.peer_tpn = 1; mask->bs.mtu = 1; mask->bs.tx_psn = 1; mask->bs.state = 1; mask->bs.flag = 1; + mask->bs.data_udp_start = 1; + mask->bs.ack_udp_start = 1; + mask->bs.udp_range = 1; memset(attr, 0, sizeof(*attr)); - attr->peer_tpn = create->tpn; - attr->mtu = min(tp->mtu, create->mtu); - attr->tx_psn = create->rx_psn; - attr->state = UBCORE_TP_STATE_RTR; - - /* Negotiate local and remote optimal algorithms */ - ret = ubcore_negotiate_optimal_cc_alg(tp->ub_dev->attr.dev_cap.congestion_ctrl_alg, - create->cfg.congestion_alg); - if (ret == -1) { - ubcore_log_err("No congestion control algorithm available"); - return -1; - } - attr->flag.value = tp->flag.value >> 1; - attr->flag.bs.cc_alg = (enum ubcore_tp_cc_alg)ret; + attr->peer_tpn = tp_attr->peer_tpn; + attr->mtu = tp_attr->mtu; + attr->tx_psn = tp_attr->rx_psn; + attr->state = tp_attr->state; + attr->flag.value = tp_attr->flag.value; + attr->data_udp_start = tp_attr->data_udp_start; + attr->ack_udp_start = tp_attr->ack_udp_start; + attr->udp_range = tp_attr->udp_range; if (tp->peer_ext.addr != 0) return 0; mask->bs.peer_ext = 1; - return ubcore_set_tp_peer_ext(attr, create->ext_udrv, create->ext_len); + return ubcore_set_tp_peer_ext(attr, udata.udrv_data->in_addr, udata.udrv_data->in_len); } static struct ubcore_nlmsg *ubcore_get_destroy_tp_response(enum ubcore_nl_resp_status ret, - struct ubcore_nlmsg *req) + struct ubcore_nlmsg *req) { struct ubcore_nl_destroy_tp_resp *destroy_resp; struct ubcore_nlmsg *resp = NULL; @@ -824,8 +497,8 @@ static struct ubcore_nlmsg *ubcore_get_destroy_tp_response(enum ubcore_nl_resp_s static struct ubcore_nlmsg *ubcore_get_create_tp_response(struct ubcore_tp *tp, struct ubcore_nlmsg *req) { - uint32_t payload_len = - sizeof(struct ubcore_nl_create_tp_resp) + (tp == NULL ? 0 : tp->tp_ext.len); + uint32_t payload_len = (uint32_t)sizeof(struct ubcore_nl_create_tp_resp) + + (tp == NULL ? 0 : tp->tp_ext.len); struct ubcore_nl_create_tp_resp *create_resp; struct ubcore_nlmsg *resp = NULL; @@ -851,63 +524,45 @@ static struct ubcore_nlmsg *ubcore_get_create_tp_response(struct ubcore_tp *tp, create_resp->peer_rx_psn = tp->rx_psn; create_resp->peer_ext_len = tp->tp_ext.len; if (tp->tp_ext.len > 0) - (void)memcpy(create_resp->peer_ext, (void *)tp->tp_ext.addr, tp->tp_ext.len); + (void)memcpy(create_resp->peer_ext, (void *)tp->tp_ext.addr, + tp->tp_ext.len); return resp; } -static void ubcore_set_multipath_target_tp_cfg(struct ubcore_tp_cfg *cfg, - enum ubcore_transport_mode trans_mode, - const struct ubcore_multipath_tp_cfg *tp_cfg) +static void ubcore_set_jetty_for_tp_param(struct ubcore_ta *ta, + enum ubcore_transport_mode trans_mode, struct ubcore_vtp_param *vtp_param) { - cfg->flag.bs.sr_en = tp_cfg->flag.bs.sr_en; - cfg->flag.bs.oor_en = tp_cfg->flag.bs.oor_en; - cfg->flag.bs.spray_en = tp_cfg->flag.bs.spray_en; - cfg->flag.bs.cc_en = tp_cfg->flag.bs.cc_en; - cfg->udp_range = tp_cfg->tp_range; - if (trans_mode == UBCORE_TP_RC) { - cfg->data_udp_start = tp_cfg->data_rctp_start; - cfg->ack_udp_start = tp_cfg->ack_rctp_start; - } else if (trans_mode == UBCORE_TP_RM) { - cfg->data_udp_start = tp_cfg->data_rmtp_start; - cfg->ack_udp_start = tp_cfg->ack_rmtp_start; - } -} + struct ubcore_jetty *jetty; + struct ubcore_jfs *jfs; -static int ubcore_set_target_tp_cfg(struct ubcore_tp_cfg *cfg, const struct ubcore_device *dev, - struct ubcore_nlmsg *req, struct ubcore_ta *ta) -{ - struct ubcore_nl_create_tp_req *create = - (struct ubcore_nl_create_tp_req *)(void *)req->payload; + if (ta == NULL) + return; - /* set ubcore_ta */ - cfg->ta = ta; - ubcore_set_multipath_target_tp_cfg(cfg, create->trans_mode, &create->cfg); - cfg->flag.bs.target = !create->cfg.flag.bs.target; - cfg->trans_mode = create->trans_mode; - cfg->local_eid = dev->attr.eid; /* or req->dst_eid */ - cfg->peer_eid = req->src_eid; - - if (dev->netdev == NULL) - ubcore_log_warn("Could not find netdev.\n"); - - cfg->local_net_addr = create->peer_net_addr; - if (dev->netdev != NULL && dev->netdev->dev_addr != NULL) - (void)memcpy(cfg->local_net_addr.mac, dev->netdev->dev_addr, dev->netdev->addr_len); - if (dev->netdev != NULL) - cfg->local_net_addr.vlan = (uint64_t)dev->netdev->vlan_features; - cfg->peer_net_addr = create->local_net_addr; - - /* set mtu to active mtu temperately */ - if (ubcore_get_active_mtu(dev, 0, &cfg->mtu) != 0) { - ubcore_log_err("Failed to get active mtu"); - return -1; + switch (ta->type) { + case UBCORE_TA_JFS_TJFR: + jfs = ta->jfs; + vtp_param->local_eid = + jfs->ub_dev->eid_table.eid_entries[jfs->jfs_cfg.eid_index].eid; + vtp_param->local_jetty = jfs->id; + break; + case UBCORE_TA_JETTY_TJETTY: + jetty = ta->jetty; + vtp_param->local_eid = + jetty->ub_dev->eid_table.eid_entries[jetty->jetty_cfg.eid_index].eid; + vtp_param->local_jetty = jetty->id; + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return; } - cfg->mtu = min(cfg->mtu, create->mtu); - /* set psn to 0 temperately */ - cfg->rx_psn = 0; - /* todonext: set cc */ - return 0; + vtp_param->trans_mode = trans_mode; + vtp_param->peer_eid = ta->tjetty_id.eid; + vtp_param->peer_jetty = ta->tjetty_id.id; + vtp_param->eid_index = 0; + vtp_param->wait = true; + vtp_param->ta = *ta; } static struct ubcore_tp *ubcore_create_target_tp(struct ubcore_device *dev, @@ -916,20 +571,19 @@ static struct ubcore_tp *ubcore_create_target_tp(struct ubcore_device *dev, struct ubcore_nl_create_tp_req *create = (struct ubcore_nl_create_tp_req *)(void *)req->payload; /* create tp parameters */ - struct ubcore_udrv_priv udrv_data = { .in_addr = (uintptr_t)(create->ext_udrv + - create->ext_len), - .in_len = create->udrv_in_len, - .out_addr = 0, - .out_len = 0 }; - struct ubcore_udata udata = { .uctx = NULL, .udrv_data = &udrv_data }; - struct ubcore_tp_cfg cfg = { 0 }; + struct ubcore_udrv_priv udrv_data = { + .in_addr = (uint64_t)(create->ext_udrv + create->ext_len), + .in_len = create->udrv_in_len, + .out_addr = 0, + .out_len = 0 + }; + struct ubcore_udata udata = { + .uctx = NULL, + .udrv_data = &udrv_data + }; + struct ubcore_tp_cfg cfg = {0}; struct ubcore_tp *tp = NULL; - if (ubcore_set_target_tp_cfg(&cfg, dev, req, ta) != 0) { - ubcore_log_err("Failed to init tp cfg in create target tp.\n"); - return NULL; - } - tp = ubcore_create_tp(dev, &cfg, &udata); if (tp == NULL) { ubcore_log_err("Failed to create tp in create target tp.\n"); @@ -939,8 +593,8 @@ static struct ubcore_tp *ubcore_create_target_tp(struct ubcore_device *dev, return tp; } -static int ubcore_modify_target_tp(const struct ubcore_device *dev, struct ubcore_tp_node *tp_node, - const struct ubcore_nl_create_tp_req *create) +int ubcore_modify_tp(struct ubcore_device *dev, struct ubcore_tp_node *tp_node, + struct ubcore_tp_attr *tp_attr, struct ubcore_udata udata) { struct ubcore_tp *tp = tp_node->tp; union ubcore_tp_attr_mask mask; @@ -951,11 +605,12 @@ static int ubcore_modify_target_tp(const struct ubcore_device *dev, struct ubcor switch (tp->state) { case UBCORE_TP_STATE_RTS: - ubcore_log_info("Reuse existing tp with tpn %u", tp->tpn); + ubcore_log_info("Reuse tp state:(RTS) with tpn %u, peer_tpn %u", + tp->tpn, tp->peer_tpn); break; case UBCORE_TP_STATE_RESET: /* Modify target tp to RTR */ - if (ubcore_set_target_peer(tp, &attr, &mask, create) != 0) { + if (ubcore_set_target_peer(tp, &attr, &mask, tp_attr, udata) != 0) { ubcore_log_err("Failed to set target peer"); ret = -1; break; @@ -967,20 +622,17 @@ static int ubcore_modify_target_tp(const struct ubcore_device *dev, struct ubcor break; } ubcore_modify_tp_attr(tp, &attr, mask); - fallthrough; + ubcore_log_info( + "tp state:(RESET to RTR) with tpn %u, peer_tpn %u", tp->tpn, tp->peer_tpn); + break; case UBCORE_TP_STATE_RTR: - /* For RC target TP: modify to RTR only, to RTS when call bind_jetty; - * For IB RM target TP: modify to RTR only, to RTS when call advise_jetty - */ - if (tp->trans_mode == UBCORE_TP_RC || (dev->transport_type == UBCORE_TRANSPORT_IB)) - break; - - /* TRANSPORT_UB: modify target tp to RTS when receive ACK from intiator, - * currently, modify target tp to RTS immediately after target tp is modified to RTR - */ ret = ubcore_modify_tp_to_rts(dev, tp); + ubcore_log_info( + "tp state:(RTR to RTS) with tpn %u, peer_tpn %u", tp->tpn, tp->peer_tpn); break; - case UBCORE_TP_STATE_ERROR: + case UBCORE_TP_STATE_ERR: + ubcore_log_info("tp state: TP_STATE_ERR\n"); + fallthrough; default: ret = -1; break; @@ -994,8 +646,6 @@ static struct ubcore_tp *ubcore_accept_target_tp(struct ubcore_device *dev, struct ubcore_nlmsg *req, struct ubcore_tp_advice *advice) { - struct ubcore_nl_create_tp_req *create = - (struct ubcore_nl_create_tp_req *)(void *)req->payload; struct ubcore_tp_meta *meta = &advice->meta; struct ubcore_tp *new_tp = NULL; /* new created target tp */ struct ubcore_tp_node *tp_node; @@ -1021,12 +671,6 @@ static struct ubcore_tp *ubcore_accept_target_tp(struct ubcore_device *dev, new_tp = NULL; } } - - if (ubcore_modify_target_tp(dev, tp_node, create) != 0) { - ubcore_abort_tp(new_tp, meta); - ubcore_log_err("Failed to modify tp"); - return NULL; - } return tp_node->tp; } @@ -1035,18 +679,18 @@ static int ubcore_parse_ta(struct ubcore_device *dev, struct ubcore_ta_data *ta_ { struct ubcore_tp_meta *meta; struct ubcore_jetty *jetty; - struct ubcore_jfr *jfr; + struct ubcore_jfs *jfs; (void)memset(advice, 0, sizeof(struct ubcore_tp_advice)); meta = &advice->meta; - advice->ta.type = ta_data->type; + advice->ta.type = ta_data->ta_type; - switch (ta_data->type) { + switch (ta_data->ta_type) { case UBCORE_TA_JFS_TJFR: - jfr = ubcore_find_jfr(dev, ta_data->tjetty_id.id); - if (jfr != NULL) { - meta->ht = ubcore_get_tptable(jfr->tptable); - advice->ta.jfr = jfr; + jfs = ubcore_find_jfs(dev, ta_data->tjetty_id.id); + if (jfs != NULL) { + meta->ht = ubcore_get_tptable(jfs->tptable); + advice->ta.jfs = jfs; advice->ta.tjetty_id = ta_data->jetty_id; } break; @@ -1090,7 +734,7 @@ static struct ubcore_tp *ubcore_advise_target_tp(struct ubcore_device *dev, meta = &advice.meta; if (ubcore_parse_ta(dev, &create->ta, &advice) != 0) { - ubcore_log_err("Failed to parse ta with type %u", create->ta.type); + ubcore_log_err("Failed to parse ta with type %u", create->ta.ta_type); return NULL; } else if (meta->ht == NULL) { ubcore_log_err("tp table is already released"); @@ -1106,8 +750,6 @@ static struct ubcore_tp *ubcore_advise_target_tp(struct ubcore_device *dev, static struct ubcore_tp *ubcore_accept_target_vtp(struct ubcore_device *dev, struct ubcore_nlmsg *req) { - struct ubcore_nl_create_tp_req *create = - (struct ubcore_nl_create_tp_req *)(void *)req->payload; struct ubcore_tp_node *tp_node; struct ubcore_tp *tp = NULL; @@ -1122,22 +764,15 @@ static struct ubcore_tp *ubcore_accept_target_vtp(struct ubcore_device *dev, ubcore_log_err("Failed to add tp to the tp table in the device"); goto destroy_tp; } - - if (ubcore_modify_target_tp(dev, tp_node, create) != 0) { - ubcore_log_err("Failed to modify tp"); - goto remove_tp_node; - } - return tp; -remove_tp_node: - ubcore_remove_tp_node(&dev->ht[UBCORE_HT_TP], tp_node); destroy_tp: (void)ubcore_destroy_tp(tp); return NULL; } -static struct ubcore_tp *ubcore_bind_target_tp(struct ubcore_device *dev, struct ubcore_nlmsg *req) +static struct ubcore_tp *ubcore_bind_target_tp(struct ubcore_device *dev, + struct ubcore_nlmsg *req) { return ubcore_advise_target_tp(dev, req); } @@ -1162,7 +797,7 @@ struct ubcore_nlmsg *ubcore_handle_create_tp_req(struct ubcore_nlmsg *req) return ubcore_get_create_tp_response(NULL, req); } - if (create->ta.type == UBCORE_TA_VIRT) { + if (create->ta.ta_type == UBCORE_TA_VIRT) { tp = ubcore_accept_target_vtp(dev, req); } else if (create->trans_mode == UBCORE_TP_RC) { tp = ubcore_bind_target_tp(dev, req); @@ -1203,7 +838,7 @@ static int ubcore_unadvise_target_tp(struct ubcore_device *dev, meta = &advice.meta; if (ubcore_parse_ta(dev, &destroy->ta, &advice) != 0) { - ubcore_log_err("Failed to parse ta with type %u", destroy->ta.type); + ubcore_log_err("Failed to parse ta with type %u", destroy->ta.ta_type); return -1; } else if (meta->ht == NULL) { ubcore_log_warn("tp table is already released"); @@ -1249,7 +884,7 @@ struct ubcore_nlmsg *ubcore_handle_destroy_tp_req(struct ubcore_nlmsg *req) return ubcore_get_destroy_tp_response(UBCORE_NL_RESP_FAIL, req); } - if (destroy->ta.type == UBCORE_TA_VIRT) { + if (destroy->ta.ta_type == UBCORE_TA_VIRT) { ret = ubcore_unaccept_target_vtp(dev, destroy); } else if (destroy->trans_mode == UBCORE_TP_RC) { ret = ubcore_unbind_target_tp(dev, destroy); @@ -1262,7 +897,7 @@ struct ubcore_nlmsg *ubcore_handle_destroy_tp_req(struct ubcore_nlmsg *req) } EXPORT_SYMBOL(ubcore_handle_destroy_tp_req); -struct ubcore_tp *ubcore_create_vtp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, +struct ubcore_tp *ubcore_create_vtp(struct ubcore_device *dev, union ubcore_eid *remote_eid, enum ubcore_transport_mode trans_mode, struct ubcore_udata *udata) { @@ -1274,8 +909,8 @@ struct ubcore_tp *ubcore_create_vtp(struct ubcore_device *dev, const union ubcor switch (dev->transport_type) { case UBCORE_TRANSPORT_IB: /* alpha */ - if (trans_mode == UBCORE_TP_RM || trans_mode == UBCORE_TP_RC) - return ubcore_connect_vtp(dev, remote_eid, trans_mode, udata); + if (trans_mode == UBCORE_TP_RM || trans_mode == UBCORE_TP_RC) + return ubcore_connect_fe_tp(dev, remote_eid, trans_mode, udata); break; case UBCORE_TRANSPORT_UB: /* beta */ case UBCORE_TRANSPORT_IP: @@ -1303,7 +938,7 @@ int ubcore_destroy_vtp(struct ubcore_tp *vtp) switch (dev->transport_type) { case UBCORE_TRANSPORT_IB: /* alpha */ if (trans_mode == UBCORE_TP_RM || trans_mode == UBCORE_TP_RC) - return ubcore_disconnect_vtp(vtp); + return ubcore_disconnect_fe_tp(vtp); break; case UBCORE_TRANSPORT_UB: /* beta */ case UBCORE_TRANSPORT_IP: @@ -1316,28 +951,61 @@ int ubcore_destroy_vtp(struct ubcore_tp *vtp) } EXPORT_SYMBOL(ubcore_destroy_vtp); -static inline void ubcore_set_ta_for_tp_cfg(struct ubcore_device *dev, struct ubcore_ta *ta, - struct ubcore_tp_cfg *cfg) +static int ubcore_send_create_tp_req(struct ubcore_device *dev, struct ubcore_vtp_param *tp_param, + struct ubcore_udata *udata) { - if (dev->transport_type == UBCORE_TRANSPORT_IB) - cfg->ta = ta; - else - cfg->ta = NULL; + struct ubcore_create_vtp_req *data; + struct ubcore_resp_args user_arg; + struct ubcore_msg *req_msg; + struct ubcore_resp_cb cb; + uint32_t payload_len; + int ret; + + payload_len = (uint32_t)sizeof(struct ubcore_create_vtp_req) + get_udrv_in_len(udata); + + req_msg = kcalloc(1, sizeof(struct ubcore_msg) + payload_len, GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->hdr.type = UBCORE_MSG_TYPE_FE2TPF; + req_msg->hdr.opcode = UBCORE_MSG_CREATE_VTP; + req_msg->hdr.len = payload_len; + data = (struct ubcore_create_vtp_req *)req_msg->data; + data->trans_mode = tp_param->trans_mode; + data->local_eid = tp_param->local_eid; + data->peer_eid = tp_param->peer_eid; + data->eid_index = tp_param->eid_index; + data->local_jetty = tp_param->local_jetty; + data->peer_jetty = tp_param->peer_jetty; + (void)strcpy(data->dev_name, dev->dev_name); + data->virtualization = dev->attr.virtualization; + /* for alpha start */ + ubcore_get_ta_data_from_ta(&tp_param->ta, dev->transport_type, &data->ta_data); + data->udrv_in_len = get_udrv_in_len(udata); + data->udrv_out_len = get_udrv_out_len(udata); + if (get_udrv_in_data(data->udrv_data, get_udrv_in_len(udata), udata) != 0) { + ubcore_log_err("Failed to get udrv data"); + kfree(req_msg); + return -1; + } + /* for alpha end */ + user_arg.udata = udata; + user_arg.trans_mode = tp_param->trans_mode; + cb.callback = ubcore_nl_handle_create_tp_resp_cb; + cb.user_arg = &user_arg; + ret = ubcore_send_fe2tpf_msg(dev, req_msg, true, &cb); + if (ret != 0) + ubcore_log_err("send fe2tpf failed.\n"); + + return ret; } -int ubcore_bind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, - struct ubcore_tp_advice *advice, struct ubcore_udata *udata) +int ubcore_bind_tp(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, struct ubcore_tp_advice *advice, struct ubcore_udata *udata) { struct ubcore_device *dev = jetty->ub_dev; - struct ubcore_tp_cfg cfg = { 0 }; + struct ubcore_vtp_param tp_param = { 0 }; struct ubcore_tp_node *tp_node; - struct ubcore_tp *new_tp = NULL; - - if (ubcore_query_initiator_tp_cfg(&cfg, dev, (union ubcore_eid *)&tjetty->cfg.id.eid, - tjetty->cfg.trans_mode) != 0) { - ubcore_log_err("Failed to init tp cfg.\n"); - return -1; - } mutex_lock(&tjetty->lock); if (tjetty->tp != NULL) { @@ -1345,40 +1013,22 @@ int ubcore_bind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, ubcore_log_err("The same tjetty, different jetty, prevent duplicate bind.\n"); return -1; } + mutex_unlock(&tjetty->lock); - ubcore_set_ta_for_tp_cfg(dev, &advice->ta, &cfg); - - /* driver gurantee to return the same tp if we have created it as a target */ - new_tp = ubcore_create_tp(dev, &cfg, udata); - if (new_tp == NULL) { - ubcore_log_err("Failed to create tp.\n"); - mutex_unlock(&tjetty->lock); + ubcore_set_jetty_for_tp_param(&advice->ta, UBCORE_TP_RC, &tp_param); + if (ubcore_send_create_tp_req(dev, &tp_param, udata) != 0) { + ubcore_log_err("Failed to send tp req"); return -1; } - - tp_node = ubcore_add_tp_node(advice->meta.ht, advice->meta.hash, &advice->meta.key, new_tp, - &advice->ta); + mutex_lock(&tjetty->lock); + tp_node = (struct ubcore_tp_node *)ubcore_hash_table_lookup(advice->meta.ht, + advice->meta.hash, &advice->meta.key); if (tp_node == NULL) { - (void)ubcore_destroy_tp(new_tp); mutex_unlock(&tjetty->lock); - ubcore_log_err("Failed to find and add tp\n"); return -1; - } else if (tp_node != NULL && tp_node->tp != new_tp) { - (void)ubcore_destroy_tp(new_tp); - new_tp = NULL; } tjetty->tp = tp_node->tp; mutex_unlock(&tjetty->lock); - - /* send request to connection agent and set peer cfg and peer ext from response */ - if (ubcore_enable_tp(dev, tp_node, &advice->ta, udata) != 0) { - mutex_lock(&tjetty->lock); - tjetty->tp = NULL; - mutex_unlock(&tjetty->lock); - ubcore_abort_tp(new_tp, &advice->meta); - ubcore_log_err("Failed to enable tp.\n"); - return -1; - } return 0; } EXPORT_SYMBOL(ubcore_bind_tp); @@ -1401,36 +1051,35 @@ int ubcore_unbind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, } EXPORT_SYMBOL(ubcore_unbind_tp); -int ubcore_advise_tp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, - struct ubcore_tp_advice *advice, struct ubcore_udata *udata) +int ubcore_advise_tp(struct ubcore_device *dev, union ubcore_eid *remote_eid, + struct ubcore_tp_advice *advice, struct ubcore_udata *udata) { + struct ubcore_vtp_param tp_param = {0}; struct ubcore_tp_node *tp_node; - struct ubcore_tp_cfg cfg = { 0 }; struct ubcore_tp *new_tp; + struct ubcore_tp_cfg tp_cfg; /* Must call driver->create_tp with udata if we are advising jetty */ tp_node = ubcore_hash_table_lookup(advice->meta.ht, advice->meta.hash, &advice->meta.key); - if (tp_node != NULL && !tp_node->tp->flag.bs.target) { - atomic_inc(&tp_node->tp->use_cnt); + if (tp_node != NULL && !tp_node->tp->flag.bs.target) return 0; - } - - if (ubcore_query_initiator_tp_cfg(&cfg, dev, remote_eid, UBCORE_TP_RM) != 0) { - ubcore_log_err("Failed to init tp cfg"); - return -1; - } - ubcore_set_ta_for_tp_cfg(dev, &advice->ta, &cfg); + ubcore_set_jetty_for_tp_param(&advice->ta, UBCORE_TP_RM, &tp_param); + tp_cfg.flag.bs.target = 0; + tp_cfg.local_eid = tp_param.local_eid; + tp_cfg.peer_eid = tp_param.peer_eid; + tp_cfg.trans_mode = tp_param.trans_mode; - /* driver gurantee to return the same tp if we have created it as a target */ - new_tp = ubcore_create_tp(dev, &cfg, udata); + /* advise tp requires the user to pass in the pin memory operation + * and cannot be used in the uvs context ioctl to create tp + */ + new_tp = ubcore_create_tp(dev, &tp_cfg, udata); if (new_tp == NULL) { ubcore_log_err("Failed to create tp"); return -1; } - tp_node = ubcore_add_tp_node(advice->meta.ht, advice->meta.hash, &advice->meta.key, new_tp, - &advice->ta); + &advice->ta); if (tp_node == NULL) { (void)ubcore_destroy_tp(new_tp); ubcore_log_err("Failed to find and add tp\n"); @@ -1440,29 +1089,73 @@ int ubcore_advise_tp(struct ubcore_device *dev, const union ubcore_eid *remote_e new_tp = NULL; } - if (ubcore_enable_tp(dev, tp_node, &advice->ta, udata) != 0) { - ubcore_abort_tp(new_tp, &advice->meta); - ubcore_log_err("Failed to enable tp"); + if (ubcore_send_create_tp_req(dev, &tp_param, udata) != 0) { + ubcore_abort_tp(tp_node->tp, &advice->meta); + ubcore_log_err("Failed to send tp req"); return -1; } - if (new_tp == NULL) - atomic_inc(&tp_node->tp->use_cnt); - return 0; } EXPORT_SYMBOL(ubcore_advise_tp); -int ubcore_unadvise_tp(struct ubcore_device *dev, struct ubcore_tp_advice *advice) +static int ubcore_handle_del_tp_resp(struct ubcore_device *dev, struct ubcore_msg *msg, + void *user_arg) { - struct ubcore_tp *tp = - ubcore_find_remove_tp(advice->meta.ht, advice->meta.hash, &advice->meta.key); - if (tp == NULL) { - ubcore_log_warn("TP is not found, already removed or under use\n"); - return 0; + struct ubcore_destroy_vtp_resp *resp = (struct ubcore_destroy_vtp_resp *)msg->data; + + if (resp->ret == UBCORE_MSG_RESP_FAIL) { + ubcore_log_err("failed to destroy vtp: response error"); + return -1; + } else if (resp->ret == UBCORE_MSG_RESP_IN_PROGRESS) { + ubcore_log_err("failed: try to del vtp which is being created. Try again later"); + return -1; } + return 0; +} + +static int ubcore_send_del_tp_req(struct ubcore_device *dev, struct ubcore_vtp_param *tp_param) +{ + struct ubcore_create_vtp_req *data; + struct ubcore_msg *req_msg; + struct ubcore_resp_cb cb; + int ret; - return ubcore_destroy_local_peer_tp(tp, &advice->ta); + req_msg = kcalloc(1, sizeof(struct ubcore_msg) + + sizeof(struct ubcore_create_vtp_req), GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->hdr.type = UBCORE_MSG_TYPE_FE2TPF; + req_msg->hdr.opcode = UBCORE_MSG_DESTROY_VTP; + req_msg->hdr.len = sizeof(struct ubcore_create_vtp_req); + data = (struct ubcore_create_vtp_req *)req_msg->data; + data->trans_mode = tp_param->trans_mode; + data->local_eid = tp_param->local_eid; + data->peer_eid = tp_param->peer_eid; + data->eid_index = tp_param->eid_index; + data->local_jetty = tp_param->local_jetty; + data->peer_jetty = tp_param->peer_jetty; + (void)strcpy(data->dev_name, dev->dev_name); + data->virtualization = dev->attr.virtualization; + /* for alpha start */ + ubcore_get_ta_data_from_ta(&tp_param->ta, dev->transport_type, &data->ta_data); + /* for alpha end */ + cb.callback = ubcore_handle_del_tp_resp; + cb.user_arg = NULL; + ret = ubcore_send_fe2tpf_msg(dev, req_msg, true, &cb); + if (ret != 0) + ubcore_log_err("send fe2tpf failed.\n"); + + return ret; +} + +int ubcore_unadvise_tp(struct ubcore_device *dev, struct ubcore_tp_advice *advice) +{ + struct ubcore_vtp_param tp_param; + + ubcore_set_jetty_for_tp_param(&advice->ta, UBCORE_TP_RM, &tp_param); + return ubcore_send_del_tp_req(dev, &tp_param); } EXPORT_SYMBOL(ubcore_unadvise_tp); @@ -1486,7 +1179,7 @@ static void ubcore_get_ta_from_tp(struct ubcore_ta *ta, struct ubcore_tp *tp) static struct ubcore_nlmsg *ubcore_get_restore_tp_req(struct ubcore_tp *tp) { - uint32_t payload_len = sizeof(struct ubcore_nl_restore_tp_req); + uint32_t payload_len = (uint32_t)sizeof(struct ubcore_nl_restore_tp_req); struct ubcore_nl_restore_tp_req *restore; struct ubcore_ta ta; struct ubcore_nlmsg *req; @@ -1504,7 +1197,7 @@ static struct ubcore_nlmsg *ubcore_get_restore_tp_req(struct ubcore_tp *tp) restore->rx_psn = get_random_u32(); ubcore_get_ta_from_tp(&ta, tp); - ubcore_get_ta_data_from_ta(&ta, &restore->ta); + ubcore_get_ta_data_from_ta(&ta, tp->ub_dev->transport_type, &restore->ta); return req; } @@ -1536,8 +1229,28 @@ static struct ubcore_nlmsg *ubcore_get_restore_tp_response(struct ubcore_nlmsg * return resp; } -static int ubcore_restore_tp_to_rts(const struct ubcore_device *dev, struct ubcore_tp *tp, - uint32_t rx_psn, uint32_t tx_psn) +static int ubcore_restore_tp_to_reset(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + attr.state = UBCORE_TP_STATE_RESET; + ubcore_log_info("restore tp to reset(mask): state: %u", mask.bs.state); + ubcore_log_info("restore tp to reset(attr): state: %u", (uint32_t)attr.state); + + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err("Failed to modify tp"); + return -1; + } + tp->state = UBCORE_TP_STATE_RESET; + return 0; +} + +static int ubcore_restore_tp_to_rts(struct ubcore_device *dev, struct ubcore_tp *tp, + uint32_t rx_psn, uint32_t tx_psn) { union ubcore_tp_attr_mask mask; struct ubcore_tp_attr attr; @@ -1550,6 +1263,10 @@ static int ubcore_restore_tp_to_rts(const struct ubcore_device *dev, struct ubco attr.state = UBCORE_TP_STATE_RTS; attr.rx_psn = rx_psn; attr.tx_psn = tx_psn; + ubcore_log_info("restore tp to rts(mask): state: %u, rx_psn: %u, tx_psn: %u", + mask.bs.state, mask.bs.rx_psn, mask.bs.tx_psn); + ubcore_log_info("restore tp to rts(attr): state: %u, rx_psn: %u, tx_psn: %u", + (uint32_t)attr.state, attr.rx_psn, attr.tx_psn); if (dev->ops->modify_tp(tp, &attr, mask) != 0) { /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ @@ -1564,6 +1281,93 @@ static int ubcore_restore_tp_to_rts(const struct ubcore_device *dev, struct ubco return 0; } +int ubcore_restore_tp_error_to_rtr(struct ubcore_device *dev, struct ubcore_tp *tp, + uint32_t rx_psn, uint32_t tx_psn, uint16_t data_udp_start, uint16_t ack_udp_start) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + mask.bs.rx_psn = 1; + mask.bs.tx_psn = 1; + mask.bs.data_udp_start = 1; + mask.bs.ack_udp_start = 1; + + attr.state = UBCORE_TP_STATE_RTR; + attr.rx_psn = rx_psn; + attr.tx_psn = tx_psn; + attr.data_udp_start = data_udp_start; + attr.ack_udp_start = ack_udp_start; + ubcore_log_info( + "restore tp to rtr(mask): state: %u, rx_psn: %u, tx_psn: %u, data_udp: %u, ack_udp: %u", + mask.bs.state, mask.bs.rx_psn, mask.bs.tx_psn, + mask.bs.data_udp_start, mask.bs.ack_udp_start); + ubcore_log_info( + "restore tp to rtr(attr): state: %u, rx_psn: %u, tx_psn: %u, data_udp: %hu, ack_udp: %hu", + (uint32_t)attr.state, attr.rx_psn, attr.tx_psn, + attr.data_udp_start, attr.ack_udp_start); + + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err("Failed to modify tp"); + return -1; + } + + tp->state = UBCORE_TP_STATE_RTR; + tp->rx_psn = rx_psn; + tp->tx_psn = tx_psn; + tp->data_udp_start = data_udp_start; + tp->ack_udp_start = ack_udp_start; + + return 0; +} + +int ubcore_restore_tp_error_to_rts(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + + attr.state = UBCORE_TP_STATE_RTS; + + ubcore_log_info("restore tp to rts, state mask: %u state: %u", + mask.bs.state, (uint32_t)attr.state); + + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err("Failed to modify tp"); + return -1; + } + + tp->state = UBCORE_TP_STATE_RTS; + + return 0; +} + +int ubcore_change_tp_to_err(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + + attr.state = UBCORE_TP_STATE_ERR; + + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err("Failed to modify tp"); + return -1; + } + + tp->state = UBCORE_TP_STATE_ERR; + + return 0; +} + void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp) { struct ubcore_nlmsg *req_msg, *resp_msg; @@ -1574,9 +1378,9 @@ void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp) * Do not send retore tp req from target to inititor, * Do not restore UM TP, as it is only visable by the driver */ - if (dev->transport_type != UBCORE_TRANSPORT_IB || tp->flag.bs.target || tp->priv == NULL || - tp->trans_mode == UBCORE_TP_UM || tp->state != UBCORE_TP_STATE_ERROR || - !ubcore_have_tp_ops(dev)) + if (dev->transport_type != UBCORE_TRANSPORT_IB || tp->flag.bs.target || + tp->priv == NULL || tp->trans_mode == UBCORE_TP_UM || + tp->state != UBCORE_TP_STATE_ERR || !ubcore_have_tp_ops(dev)) return; req_msg = ubcore_get_restore_tp_req(tp); @@ -1612,6 +1416,110 @@ void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp) } EXPORT_SYMBOL(ubcore_restore_tp); +static struct ubcore_nlmsg *ubcore_get_tp_error_req(struct ubcore_tp *tp) +{ + uint32_t payload_len = (uint32_t)sizeof(struct ubcore_tp_error_req); + struct ubcore_tp_error_req *error_req; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = tp->ub_dev->transport_type; + req->msg_type = UBCORE_NL_TP_ERROR_REQ; + error_req = (struct ubcore_tp_error_req *)(void *)req->payload; + error_req->tpgn = tp->tpg->tpgn; + error_req->tpn = tp->tpn; + error_req->data_udp_start = tp->data_udp_start; + error_req->ack_udp_start = tp->ack_udp_start; + error_req->tx_psn = tp->tx_psn; + error_req->peer_tpn = tp->peer_tpn; + error_req->trans_mode = tp->trans_mode; + error_req->sip_idx = tp->local_net_addr_idx; + error_req->local_eid = tp->local_eid; + error_req->peer_eid = tp->peer_eid; + ubcore_log_info("report tp error: tx_psn: %u, data_udp: %hu, ack_udp: %hu", + tp->tx_psn, tp->data_udp_start, tp->ack_udp_start); + if (tp->trans_mode == UBCORE_TP_RC) { + error_req->local_jetty_id = tp->local_jetty.id; + error_req->peer_jetty_id = tp->peer_jetty.id; + } + + return req; +} + +void ubcore_report_tp_error(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + struct ubcore_nlmsg *req_msg; + int ret; + + if (ubcore_restore_tp_to_reset(dev, tp) != 0) { + ubcore_log_err("Failed to restore tp to reset"); + return; + } + + req_msg = ubcore_get_tp_error_req(tp); + if (req_msg == NULL) { + ubcore_log_err("Failed to get tp error req"); + return; + } + + ret = ubcore_nl_send_nowait_without_cb(req_msg); + if (ret) + ubcore_log_err("Failed to nowait send tp error request"); + else + ubcore_log_info("Success to nowait send tp error request"); + + kfree(req_msg); +} +EXPORT_SYMBOL(ubcore_report_tp_error); + +static struct ubcore_nlmsg *ubcore_get_tp_suspend_req(struct ubcore_tp *tp) +{ + uint32_t payload_len = (uint32_t)sizeof(struct ubcore_tp_suspend_req); + struct ubcore_tp_suspend_req *suspend_req; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = tp->ub_dev->transport_type; + req->msg_type = UBCORE_NL_TP_SUSPEND_REQ; + suspend_req = (struct ubcore_tp_suspend_req *)(void *)req->payload; + suspend_req->tpgn = tp->tpg->tpgn; + suspend_req->tpn = tp->tpn; + suspend_req->data_udp_start = tp->data_udp_start; + suspend_req->ack_udp_start = tp->ack_udp_start; + suspend_req->sip_idx = tp->local_net_addr_idx; + ubcore_log_info("report tp suspend: data_udp_start: %u, ack_udp_start: %u", + tp->data_udp_start, tp->ack_udp_start); + + return req; +} + +void ubcore_report_tp_suspend(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + struct ubcore_nlmsg *req_msg; + int ret; + + req_msg = ubcore_get_tp_suspend_req(tp); + if (req_msg == NULL) { + ubcore_log_err("Failed to get tp suspend req"); + return; + } + + ret = ubcore_nl_send_nowait_without_cb(req_msg); + if (ret) + ubcore_log_err("Failed to nowait send tp suspend request"); + else + ubcore_log_info("Success to nowait send tp suspend request"); + + kfree(req_msg); +} +EXPORT_SYMBOL(ubcore_report_tp_suspend); + /* restore target RM tp created by ubcore_advise_target_tp */ static struct ubcore_tp *ubcore_restore_advised_target_tp(struct ubcore_device *dev, struct ubcore_nl_restore_tp_req *restore) @@ -1623,7 +1531,7 @@ static struct ubcore_tp *ubcore_restore_advised_target_tp(struct ubcore_device * meta = &advice.meta; if (ubcore_parse_ta(dev, &restore->ta, &advice) != 0) { - ubcore_log_err("Failed to parse ta with type %u", restore->ta.type); + ubcore_log_err("Failed to parse ta with type %u", restore->ta.ta_type); return NULL; } else if (meta->ht == NULL) { ubcore_log_err("tp table is already released"); @@ -1656,7 +1564,7 @@ static struct ubcore_tp *ubcore_handle_restore_tp(struct ubcore_device *dev, struct ubcore_nl_restore_tp_req *restore) { if (dev->transport_type != UBCORE_TRANSPORT_IB || restore->trans_mode == UBCORE_TP_UM || - restore->ta.type == UBCORE_TA_NONE || restore->ta.type >= UBCORE_TA_VIRT) + restore->ta.ta_type == UBCORE_TA_NONE || restore->ta.ta_type >= UBCORE_TA_VIRT) return NULL; if (restore->trans_mode == UBCORE_TP_RM) @@ -1695,16 +1603,11 @@ struct ubcore_nlmsg *ubcore_handle_restore_tp_req(struct ubcore_nlmsg *req) } EXPORT_SYMBOL(ubcore_handle_restore_tp_req); -int ubcore_config_utp(struct ubcore_device *dev, const union ubcore_eid *eid, - const struct ubcore_utp_attr *attr, union ubcore_utp_attr_mask mask) +int ubcore_config_utp(struct ubcore_device *dev, uint8_t utp_id, struct ubcore_utp_attr *attr, + union ubcore_utp_attr_mask mask) { - struct ubcore_res_dev_val dev_val = { 0 }; - struct ubcore_res_key key_val; - struct ubcore_res_val val; - uint32_t i; - - if (dev == NULL || eid == NULL || attr == NULL || dev->ops == NULL || - dev->ops->query_res == NULL || dev->ops->config_utp == NULL) { + if (dev == NULL || attr == NULL || dev->ops == NULL || + dev->ops->config_utp == NULL) { ubcore_log_err("dev ops has a null pointer.\n"); return -1; } @@ -1713,63 +1616,6 @@ int ubcore_config_utp(struct ubcore_device *dev, const union ubcore_eid *eid, "The configuration modification of this version of utp is not supported.\n"); return -1; } - // Query the utp_list under the device - val.addr = (uintptr_t)&dev_val; - val.len = sizeof(struct ubcore_res_dev_val); - key_val.type = UBCORE_RES_KEY_URMA_DEV; - key_val.key = eid->in4.addr; - if (dev->ops->query_res(dev, &key_val, &val) != 0) { - ubcore_log_err("failed to query res.\n"); - return -1; - } - for (i = 0; dev_val.utp_list != NULL && i < dev_val.utp_cnt; i++) { - if (dev->ops->config_utp(dev, dev_val.utp_list[i], attr, mask) != 0) { - ubcore_log_err("failed to config utp.\n"); - return -1; - } - } - return 0; + return dev->ops->config_utp(dev, utp_id, attr, mask); } EXPORT_SYMBOL(ubcore_config_utp); - -int ubcore_show_utp(struct ubcore_device *dev, const union ubcore_eid *eid) -{ - struct ubcore_res_dev_val dev_val = { 0 }; - struct ubcore_res_utp_val utp_val = { 0 }; - struct ubcore_res_key key_val; - struct ubcore_res_val val; - uint32_t i; - - if (dev == NULL || eid == NULL || dev->ops == NULL || dev->ops->query_res == NULL) { - ubcore_log_err("dev ops has a null pointer.\n"); - return -1; - } - // Query the utp_list under the device - val.addr = (uintptr_t)&dev_val; - val.len = sizeof(struct ubcore_res_dev_val); - key_val.type = UBCORE_RES_KEY_URMA_DEV; - key_val.key = eid->in4.addr; - if (dev->ops->query_res(dev, &key_val, &val) != 0) { - ubcore_log_err("failed to query res.\n"); - return -1; - } - for (i = 0; dev_val.utp_list != NULL && i < dev_val.utp_cnt; i++) { - // Query the utp_val under the utp list - val.addr = (uintptr_t)&utp_val; - val.len = sizeof(struct ubcore_res_utp_val); - key_val.type = UBCORE_RES_KEY_UTP; - key_val.key = dev_val.utp_list[i]; - if (dev->ops->query_res(dev, &key_val, &val) != 0) { - ubcore_log_err("failed to query res.\n"); - return -1; - } - ubcore_log_info("-----------utp_info---------\n"); - ubcore_log_info("--utp_id: %d\n", (int)utp_val.utp_id); - ubcore_log_info("--spray_en: %d\n", (int)utp_val.spray_en); - ubcore_log_info("--data_udp_start: %d\n", (int)utp_val.data_udp_start); - ubcore_log_info("--udp_range: %d\n", (int)utp_val.udp_range); - ubcore_log_info("----------------------------\n"); - } - return 0; -} -EXPORT_SYMBOL(ubcore_show_utp); diff --git a/drivers/ub/urma/ubcore/ubcore_tp.h b/drivers/ub/urma/ubcore/ubcore_tp.h index 567f80d05642..2fb087ba6b03 100644 --- a/drivers/ub/urma/ubcore/ubcore_tp.h +++ b/drivers/ub/urma/ubcore/ubcore_tp.h @@ -22,8 +22,8 @@ #define UBCORE_TP_H #include -#include "ubcore_netlink.h" #include "ubcore_tp_table.h" +#include "ubcore_netlink.h" struct ubcore_tp_meta { struct ubcore_hash_table *ht; @@ -36,14 +36,14 @@ struct ubcore_tp_advice { struct ubcore_tp_meta meta; }; -static inline bool ubcore_have_tp_ops(const struct ubcore_device *dev) +static inline bool ubcore_have_tp_ops(struct ubcore_device *dev) { return (dev != NULL && dev->ops->create_tp != NULL && dev->ops->modify_tp != NULL && dev->ops->destroy_tp != NULL); } /* alpha */ -int ubcore_advise_tp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, +int ubcore_advise_tp(struct ubcore_device *dev, union ubcore_eid *remote_eid, struct ubcore_tp_advice *advice, struct ubcore_udata *udata); int ubcore_unadvise_tp(struct ubcore_device *dev, struct ubcore_tp_advice *advice); @@ -62,4 +62,14 @@ int ubcore_destroy_tp(struct ubcore_tp *tp); /* restore tp from error state */ void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp); +int ubcore_restore_tp_error_to_rtr(struct ubcore_device *dev, struct ubcore_tp *tp, + uint32_t rx_psn, uint32_t tx_psn, uint16_t data_udp_start, uint16_t ack_udp_start); +int ubcore_restore_tp_error_to_rts(struct ubcore_device *dev, struct ubcore_tp *tp); +int ubcore_change_tp_to_err(struct ubcore_device *dev, struct ubcore_tp *tp); + +void ubcore_report_tp_suspend(struct ubcore_device *dev, struct ubcore_tp *tp); +void ubcore_report_tp_error(struct ubcore_device *dev, struct ubcore_tp *tp); + +void ubcore_modify_tp_attr(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask); #endif diff --git a/drivers/ub/urma/ubcore/ubcore_tp_table.c b/drivers/ub/urma/ubcore/ubcore_tp_table.c index 2b2a26acb4d5..fbbfaf26c66d 100644 --- a/drivers/ub/urma/ubcore/ubcore_tp_table.c +++ b/drivers/ub/urma/ubcore/ubcore_tp_table.c @@ -24,7 +24,8 @@ #include "ubcore_tp.h" #include "ubcore_tp_table.h" -void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, const struct ubcore_jetty_id *jetty_id) +void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, + const struct ubcore_jetty_id *jetty_id) { memset(key, 0, sizeof(struct ubcore_tp_key)); key->key_type = UBCORE_TP_KEY_JETTY_ID; @@ -69,24 +70,26 @@ struct ubcore_tp *ubcore_find_remove_tp(struct ubcore_hash_table *ht, uint32_t h struct ubcore_hash_table *ubcore_create_tptable(void) { - struct ubcore_ht_param p = { .size = UBCORE_HASH_TABLE_SIZE, - .node_offset = offsetof(struct ubcore_tp_node, hnode), - .key_offset = offsetof(struct ubcore_tp_node, key), - .key_size = sizeof(struct ubcore_tp_key), - .cmp_f = NULL, - .free_f = NULL }; - struct ubcore_hash_table *ht; - - ht = kcalloc(1, sizeof(struct ubcore_hash_table), GFP_KERNEL); - if (ht == NULL) + struct ubcore_ht_param p = { + .size = UBCORE_HASH_TABLE_SIZE, + .node_offset = offsetof(struct ubcore_tp_node, hnode), + .key_offset = offsetof(struct ubcore_tp_node, key), + .key_size = (uint32_t)sizeof(struct ubcore_tp_key), + .cmp_f = NULL, + .free_f = NULL + }; + struct ubcore_hash_table *htable; + + htable = kcalloc(1, sizeof(struct ubcore_hash_table), GFP_KERNEL); + if (htable == NULL) return NULL; - if (ubcore_hash_table_alloc(ht, &p) != 0) { - kfree(ht); + if (ubcore_hash_table_alloc(htable, &p) != 0) { + kfree(htable); ubcore_log_err("Failed to calloc jfs tp hash table"); return NULL; } - return ht; + return htable; } static void ubcore_free_tp_node(void *obj) @@ -145,7 +148,6 @@ struct ubcore_tp_node *ubcore_add_tp_node(struct ubcore_hash_table *ht, uint32_t if (new_tp_node == NULL) return NULL; - new_tp_node->key = *key; new_tp_node->tp = tp; new_tp_node->ta = *ta; diff --git a/drivers/ub/urma/ubcore/ubcore_tp_table.h b/drivers/ub/urma/ubcore/ubcore_tp_table.h index 5aa0f70ab9dd..abf6e12c4fc6 100644 --- a/drivers/ub/urma/ubcore/ubcore_tp_table.h +++ b/drivers/ub/urma/ubcore/ubcore_tp_table.h @@ -42,7 +42,8 @@ struct ubcore_tp_node { struct mutex lock; }; -void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, const struct ubcore_jetty_id *jetty_id); +void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, + const struct ubcore_jetty_id *jetty_id); /* Return old tp node if key already exists */ struct ubcore_tp_node *ubcore_add_tp_node(struct ubcore_hash_table *ht, uint32_t hash, @@ -61,5 +62,4 @@ void ubcore_put_tptable(struct ubcore_hash_table *ht); struct ubcore_tp_node *ubcore_add_tp_with_tpn(struct ubcore_device *dev, struct ubcore_tp *tp); struct ubcore_tp *ubcore_remove_tp_with_tpn(struct ubcore_device *dev, uint32_t tpn); - #endif diff --git a/drivers/ub/urma/ubcore/ubcore_tpg.c b/drivers/ub/urma/ubcore/ubcore_tpg.c new file mode 100644 index 000000000000..4e446b4ad5ea --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tpg.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tpg implementation + * Author: Yan Fangfang + * Create: 2023-07-17 + * Note: + * History: 2023-07-17: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tpg.h" + +struct ubcore_tpg *ubcore_create_tpg(struct ubcore_device *dev, struct ubcore_tpg_cfg *cfg) +{ + struct ubcore_tpg *tpg; + uint32_t i; + int ret; + + if (dev->ops == NULL || dev->ops->create_tpg == NULL) + return NULL; + + tpg = dev->ops->create_tpg(dev, cfg, NULL); + if (tpg == NULL) { + ubcore_log_err("Failed to create tpg"); + return NULL; + } + tpg->ub_dev = dev; + tpg->tpg_cfg = *cfg; + for (i = 0; i < cfg->tp_cnt; i++) + tpg->tp_list[i] = NULL; + atomic_set(&tpg->use_cnt, 1); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_TPG], &tpg->hnode, tpg->tpgn); + if (ret != 0) { + (void)dev->ops->destroy_tpg(tpg); + tpg = NULL; + ubcore_log_err("Failed to add tpg to the tpg table"); + } + return tpg; +} + +int ubcore_destroy_tpg(struct ubcore_tpg *tpg) +{ + struct ubcore_device *dev = tpg->ub_dev; + int ret; + + if (dev->ops == NULL || dev->ops->destroy_tpg == NULL) + return -EINVAL; + + if (atomic_dec_return(&tpg->use_cnt) > 0) { + ubcore_log_err("tpg in use"); + return -EBUSY; + } + + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_TPG], &tpg->hnode); + + ret = dev->ops->destroy_tpg(tpg); + if (ret != 0) { + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_TPG], &tpg->hnode, tpg->tpgn); + /* inc tpg use cnt? */ + ubcore_log_err("Failed to destroy tpg"); + } + + return ret; +} + +struct ubcore_tpg *ubcore_find_tpg(struct ubcore_device *dev, uint32_t tpgn) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_TPG], tpgn, &tpgn); +} + +static void ubcore_set_tp_init_flag(union ubcore_tp_flag *flag, union ubcore_tp_cfg_flag in) +{ + flag->bs.target = in.bs.target; + flag->bs.loopback = in.bs.loopback; + flag->bs.ack_resp = in.bs.ack_resp; + flag->bs.bonding = in.bs.bonding; + flag->bs.dca_enable = in.bs.dca_enable; +} + +/* todonext: merge with the function in tp.c */ +static void ubcore_store_tp_init_cfg(struct ubcore_tpg *tpg, struct ubcore_tp *tp, + struct ubcore_device *dev, const struct ubcore_tp_cfg *cfg) +{ + ubcore_set_tp_init_flag(&tp->flag, cfg->flag); + tp->local_jetty = cfg->local_jetty; + tp->peer_jetty = cfg->peer_jetty; + tp->trans_mode = cfg->trans_mode; + tp->retry_num = cfg->retry_num; + tp->ack_timeout = cfg->ack_timeout; + tp->retry_factor = cfg->retry_factor; + tp->dscp = cfg->dscp; + tp->oor_cnt = cfg->oor_cnt; + + tp->ub_dev = dev; + tp->state = UBCORE_TP_STATE_RESET; + tp->tpg = tpg; + tp->priv = NULL; + atomic_set(&tp->use_cnt, 1); +} + +int ubcore_create_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg, + struct ubcore_tp_cfg *cfg) +{ + struct ubcore_tp *tp[UBCORE_MAX_TP_CNT_IN_GRP]; + uint32_t tp_cnt = tpg->tpg_cfg.tp_cnt; + uint32_t i; + int ret; + + if (dev->ops == NULL || dev->ops->create_multi_tp == NULL || + dev->ops->destroy_multi_tp == NULL) + return -EINVAL; + + ret = dev->ops->create_multi_tp(dev, tp_cnt, cfg, NULL, tp); + if (ret != (int)tp_cnt) { + if (ret > 0) + (void)dev->ops->destroy_multi_tp(ret, tp); + ubcore_log_err("Failed to create multi tp"); + return -ENOSPC; + } + + /* add tp to tpg */ + for (i = 0; i < tp_cnt; i++) { + ubcore_store_tp_init_cfg(tpg, tp[i], dev, &cfg[i]); + tpg->tp_list[i] = tp[i]; + } + + return 0; +} + +uint32_t ubcore_destroy_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg) +{ + struct ubcore_tp *tp[UBCORE_MAX_TP_CNT_IN_GRP]; + uint32_t tp_cnt = tpg->tpg_cfg.tp_cnt; + int ret; + int i; + + if (dev->ops == NULL || dev->ops->destroy_multi_tp == NULL) + return 0; + + for (i = 0; i < (int)tp_cnt; i++) + tp[i] = tpg->tp_list[i]; + + /* todonext: modify to error, and reset first */ + ret = dev->ops->destroy_multi_tp(tp_cnt, tp); + if (ret != (int)tp_cnt) + ubcore_log_err("Failed to destroy multi tp %d", ret); + + for (i = 0; i < ret; i++) + tpg->tp_list[i] = NULL; + + return (ret > 0 ? (uint32_t)ret : 0); +} + +uint32_t ubcore_modify_tp_in_tpg(struct ubcore_device *dev, struct ubcore_tpg *tpg, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask *mask, struct ubcore_tp **failed_tp) +{ + int ret; + int i; + + if (dev->ops == NULL || dev->ops->modify_multi_tp == NULL) + return 0; + + ret = dev->ops->modify_multi_tp(tpg->tpg_cfg.tp_cnt, tpg->tp_list, attr, mask, failed_tp); + if (ret != (int)tpg->tpg_cfg.tp_cnt) + ubcore_log_err("Failed to modify multi tp %d", ret); + + for (i = 0; i < ret; i++) + ubcore_modify_tp_attr(tpg->tp_list[i], &attr[i], mask[i]); + + return (ret > 0 ? (uint32_t)ret : 0); +} + +struct ubcore_tp *ubcore_find_tp_in_tpg(struct ubcore_tpg *tpg, uint32_t tpn) +{ + uint32_t i; + + for (i = 0; i < tpg->tpg_cfg.tp_cnt; i++) { + if (tpg->tp_list[i] == NULL || tpg->tp_list[i]->tpn != tpn) + continue; + + return tpg->tp_list[i]; + } + + return NULL; +} diff --git a/drivers/ub/urma/ubcore/ubcore_tpg.h b/drivers/ub/urma/ubcore/ubcore_tpg.h new file mode 100644 index 000000000000..7ef40bd0d222 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tpg.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tpg header + * Author: Yan Fangfang + * Create: 2023-07-17 + * Note: + * History: 2023-07-17: Create file + */ +#ifndef UBCORE_TPG_H +#define UBCORE_TPG_H + +#include + +struct ubcore_tpg *ubcore_create_tpg(struct ubcore_device *dev, struct ubcore_tpg_cfg *cfg); +int ubcore_destroy_tpg(struct ubcore_tpg *tpg); +struct ubcore_tpg *ubcore_find_tpg(struct ubcore_device *dev, uint32_t tpgn); +int ubcore_create_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg, + struct ubcore_tp_cfg *cfg); +uint32_t ubcore_destroy_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg); +uint32_t ubcore_modify_tp_in_tpg(struct ubcore_device *dev, struct ubcore_tpg *tpg, + struct ubcore_tp_attr *attr, union ubcore_tp_attr_mask *mask, struct ubcore_tp **failed_tp); +struct ubcore_tp *ubcore_find_tp_in_tpg(struct ubcore_tpg *tpg, uint32_t tpn); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_umem.c b/drivers/ub/urma/ubcore/ubcore_umem.c index 433c3e89116e..a7a6e582d4bd 100644 --- a/drivers/ub/urma/ubcore/ubcore_umem.c +++ b/drivers/ub/urma/ubcore/ubcore_umem.c @@ -35,7 +35,6 @@ static void umem_unpin_pages(struct ubcore_umem *umem, uint64_t nents) for_each_sg(umem->sg_head.sgl, sg, nents, i) { struct page *page = sg_page(sg); - unpin_user_page(page); } sg_free_table(&umem->sg_head); @@ -103,8 +102,8 @@ static int umem_add_new_pinned(struct ubcore_umem *umem, uint64_t npages) return 0; } -static uint64_t umem_pin_all_pages(struct ubcore_umem *umem, uint64_t npages, uint32_t gup_flags, - struct page **page_list) +static uint64_t umem_pin_all_pages(struct ubcore_umem *umem, uint64_t npages, + uint32_t gup_flags, struct page **page_list) { struct scatterlist *sg_list_start = umem->sg_head.sgl; uint64_t cur_base = umem->va & PAGE_MASK; @@ -127,11 +126,12 @@ static uint64_t umem_pin_all_pages(struct ubcore_umem *umem, uint64_t npages, ui return npages; } -static int umem_verify_input(const struct ubcore_device *ub_dev, uint64_t va, uint64_t len, +static int umem_verify_input(struct ubcore_device *ub_dev, uint64_t va, uint64_t len, union ubcore_umem_flag flag) { - if (ub_dev == NULL || ((va + len) < va) || PAGE_ALIGN(va + len) < (va + len)) { - ubcore_log_err("Invalid parameter.\n"); + if (ub_dev == NULL || ((va + len) < va) || + PAGE_ALIGN(va + len) < (va + len)) { + ubcore_log_err("Invalid parameter, va: %llx, len: %llx.\n", va, len); return -EINVAL; } if (flag.bs.non_pin == 1) { @@ -147,8 +147,8 @@ static int umem_dma_map(struct ubcore_umem *umem, uint64_t npages, unsigned long { int ret; - ret = dma_map_sg_attrs(umem->ub_dev->dma_dev, umem->sg_head.sgl, npages, DMA_BIDIRECTIONAL, - dma_attrs); + ret = dma_map_sg_attrs(umem->ub_dev->dma_dev, umem->sg_head.sgl, npages, + DMA_BIDIRECTIONAL, dma_attrs); if (ret == 0) { ubcore_log_err("Dma map failed, ret: %d\n", ret); return -ENOMEM; @@ -224,8 +224,8 @@ static struct ubcore_umem *ubcore_get_target_umem(struct ubcore_device *dev, uin return ret != 0 ? ERR_PTR(ret) : umem; } -struct ubcore_umem *ubcore_umem_get(struct ubcore_device *dev, uint64_t va, uint64_t len, - union ubcore_umem_flag flag) +struct ubcore_umem *ubcore_umem_get(struct ubcore_device *dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag) { struct page **page_list; int ret; diff --git a/drivers/ub/urma/ubcore/ubcore_utp.c b/drivers/ub/urma/ubcore/ubcore_utp.c new file mode 100644 index 000000000000..478fed8ce25d --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_utp.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore utp implementation + * Author: Ji Lei + * Create: 2023-08-03 + * Note: + * History: 2023-08-03: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_utp.h" + +static int utp_get_active_mtu(struct ubcore_device *dev, uint8_t port_num, + enum ubcore_mtu *mtu) +{ + struct ubcore_device_status st = { 0 }; + + if (port_num >= dev->attr.port_cnt || dev->ops->query_device_status == NULL) { + ubcore_log_err("Invalid parameter"); + return -1; + } + if (dev->ops->query_device_status(dev, &st) != 0) { + ubcore_log_err("Failed to query query_device_status for port %d", port_num); + return -1; + } + if (st.port_status[port_num].state != UBCORE_PORT_ACTIVE) { + ubcore_log_err("Port %d is not active", port_num); + return -1; + } + *mtu = st.port_status[port_num].active_mtu; + return 0; +} + +struct ubcore_utp *ubcore_create_utp(struct ubcore_device *dev, struct ubcore_utp_cfg *cfg) +{ + struct ubcore_utp *utp; + enum ubcore_mtu mtu; + int ret; + + if (dev->ops == NULL || dev->ops->create_utp == NULL) + return NULL; + + if (((int32_t)cfg->mtu) == 0) { + ret = utp_get_active_mtu(dev, (uint8_t)cfg->port_id, &mtu); + if (ret < 0) { + ubcore_log_warn("Failed to get active mtu, use default 1024"); + mtu = UBCORE_MTU_1024; + } + cfg->mtu = mtu; + ubcore_log_info("Global cfg not config, device mtu is %d", (int32_t)cfg->mtu); + } + + ubcore_log_info("Utp mtu config to %u", (int32_t)cfg->mtu); + + utp = dev->ops->create_utp(dev, cfg, NULL); + if (utp == NULL) { + ubcore_log_err("Failed to create utp"); + return NULL; + } + utp->ub_dev = dev; + utp->utp_cfg = *cfg; + atomic_set(&utp->use_cnt, 1); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_UTP], &utp->hnode, utp->utpn); + if (ret != 0) { + (void)dev->ops->destroy_utp(utp); + utp = NULL; + ubcore_log_err("Failed to add utp to the utp table"); + return utp; + } + + ubcore_log_info("Success to create utp, utp_idx %u", utp->utpn); + return utp; +} + +int ubcore_destroy_utp(struct ubcore_utp *utp) +{ + struct ubcore_device *dev = utp->ub_dev; + uint32_t utp_idx = utp->utpn; + int ret; + + if (dev->ops == NULL || dev->ops->destroy_utp == NULL) + return -EINVAL; + + if (atomic_dec_return(&utp->use_cnt) > 0) { + ubcore_log_err("utp in use"); + return -EBUSY; + } + + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_UTP], &utp->hnode); + + ret = dev->ops->destroy_utp(utp); + if (ret != 0) { + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_UTP], &utp->hnode, utp->utpn); + /* inc utp use cnt? */ + ubcore_log_err("Failed to destroy utp"); + return ret; + } + + ubcore_log_info("Success to destroy utp, utp_idx %u", utp_idx); + return ret; +} + +struct ubcore_utp *ubcore_find_utp(struct ubcore_device *dev, uint32_t idx) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_UTP], idx, &idx); +} diff --git a/drivers/ub/urma/ubcore/ubcore_utp.h b/drivers/ub/urma/ubcore/ubcore_utp.h new file mode 100644 index 000000000000..edfea4ca9b3d --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_utp.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore utp header + * Author: Ji Lei + * Create: 2023-08-03 + * Note: + * History: 2023-08-03: Create file + */ + +#ifndef UBCORE_UTP_H +#define UBCORE_UTP_H + +#include + +struct ubcore_utp *ubcore_create_utp(struct ubcore_device *dev, struct ubcore_utp_cfg *cfg); +int ubcore_destroy_utp(struct ubcore_utp *utp); +struct ubcore_utp *ubcore_find_utp(struct ubcore_device *dev, uint32_t idx); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c new file mode 100644 index 000000000000..a3b8a5c66b38 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c @@ -0,0 +1,2148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore uvs cmd implement + * Author: Ji Lei + * Create: 2023-07-03 + * Note: + * History: 2023-07-03: create file + */ + +#include +#include +#include "ubcore_priv.h" +#include "ubcore_tpg.h" +#include "ubcore_utp.h" +#include "ubcore_ctp.h" +#include "ubcore_netdev.h" +#include "ubcore_tp.h" +#include +#include "ubcore_uvs_cmd.h" + +static int ubcore_uvs_cmd_channel_init(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_channel_init arg = {0}; + int ret; + + ret = ubcore_copy_from_user(&arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct ubcore_cmd_channel_init)); + if (ret != 0) + return -EPERM; + + if (strlen(arg.in.userspace_in) == 0 || strcmp(arg.in.userspace_in, "Hello ubcore!") != 0) + return -EPERM; + + ubcore_log_info("ubcore recv uvs user space call, ctx is %s.\n", arg.in.userspace_in); + + (void)strncpy(arg.out.kernel_out, "Hello uvs!", strlen("Hello uvs!")); + + ret = ubcore_copy_to_user( + (void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_channel_init) + ); + if (ret != 0) + return -EPERM; + + return 0; +} + +static void ubcore_set_tp_cfg_with_cmd(struct ubcore_tp_cfg *cfg, struct ubcore_cmd_tp_cfg *cmd) +{ + cfg->flag = cmd->flag; + cfg->local_jetty = cmd->local_jetty; + cfg->peer_jetty = cmd->peer_jetty; + cfg->trans_mode = cmd->trans_mode; + cfg->retry_num = cmd->retry_num; + cfg->retry_factor = cmd->retry_factor; + cfg->ack_timeout = cmd->ack_timeout; + cfg->dscp = cmd->dscp; + cfg->oor_cnt = cmd->oor_cnt; + cfg->fe_idx = cmd->fe_idx; +} + +static struct ubcore_tp_cfg *ubcore_get_multi_tp_cfg(uint32_t tp_cnt, + struct ubcore_cmd_tp_cfg *arg, struct ubcore_tpg *tpg) +{ + struct ubcore_tp_cfg *tp_cfgs; + uint32_t i; + + tp_cfgs = kcalloc(1, tp_cnt * sizeof(struct ubcore_tp_cfg), GFP_KERNEL); + if (tp_cfgs == NULL) + return NULL; + + for (i = 0; i < tp_cnt; i++) { + ubcore_set_tp_cfg_with_cmd(&tp_cfgs[i], &arg[i]); + tp_cfgs[i].tpg = tpg; + } + return tp_cfgs; +} + +#define RC_TP_CNT 2 + +/* create tpg and multiple tp in the tpg at initiator or target */ +static struct ubcore_tpg *ubcore_create_tpg_and_multi_tp(struct ubcore_device *dev, + struct ubcore_tpg_cfg *tpg_cfg, struct ubcore_cmd_tp_cfg *tp_cfg_arg) +{ + struct ubcore_tp_cfg *tp_cfgs; + struct ubcore_tpg *tpg; + int ret = 0; + + if (tpg_cfg->tp_cnt > UBCORE_MAX_TP_CNT_IN_GRP || + (tpg_cfg->trans_mode == UBCORE_TP_RC && tpg_cfg->tp_cnt != RC_TP_CNT)) + return ERR_PTR(-EINVAL); + + tpg = ubcore_create_tpg(dev, tpg_cfg); + if (tpg == NULL) + return ERR_PTR(-ENOSPC); + + /* create tp in the tpg */ + tp_cfgs = ubcore_get_multi_tp_cfg(tpg_cfg->tp_cnt, tp_cfg_arg, tpg); + if (tp_cfgs == NULL) { + ret = -ENOMEM; + goto destroy_tpg; + } + ret = ubcore_create_multi_tp(dev, tpg, tp_cfgs); + if (ret) + goto free_tp_cfg; + + kfree(tp_cfgs); + return tpg; + +free_tp_cfg: + kfree(tp_cfgs); +destroy_tpg: + (void)ubcore_destroy_tpg(tpg); + return ERR_PTR(ret); +} + +static int ubcore_para_ta(struct ubcore_device *dev, struct ubcore_tp_advice *advice, + struct ubcore_ta_data *ta_data) +{ + struct ubcore_tp_meta *meta; + struct ubcore_jetty *jetty; + struct ubcore_jfs *jfs; + + advice->ta.type = ta_data->ta_type; + meta = &advice->meta; + + switch (ta_data->ta_type) { + case UBCORE_TA_JFS_TJFR: + jfs = ubcore_find_jfs(dev, ta_data->jetty_id.id); + if (jfs != NULL) { + meta->ht = ubcore_get_tptable(jfs->tptable); + advice->ta.jfs = jfs; + advice->ta.tjetty_id = ta_data->tjetty_id; + } + break; + case UBCORE_TA_JETTY_TJETTY: + jetty = ubcore_find_jetty(dev, ta_data->jetty_id.id); + if (jetty != NULL) { + meta->ht = ubcore_get_tptable(jetty->tptable); + advice->ta.jetty = jetty; + advice->ta.tjetty_id = ta_data->tjetty_id; + } + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return -1; + } + ubcore_init_tp_key_jetty_id(&meta->key, &ta_data->tjetty_id); + advice->meta.hash = ubcore_get_jetty_hash(&ta_data->tjetty_id); + return 0; +} + +static int ubcore_get_active_mtu(struct ubcore_device *dev, uint8_t port_num, + enum ubcore_mtu *mtu) +{ + struct ubcore_device_status st = { 0 }; + + if (port_num >= dev->attr.port_cnt || dev->ops->query_device_status == NULL) { + ubcore_log_err("Invalid parameter"); + return -1; + } + if (dev->ops->query_device_status(dev, &st) != 0) { + ubcore_log_err("Failed to query query_device_status for port %d", port_num); + return -1; + } + if (st.port_status[port_num].state != UBCORE_PORT_ACTIVE) { + ubcore_log_err("Port %d is not active", port_num); + return -1; + } + *mtu = st.port_status[port_num].active_mtu; + return 0; +} + +static int ubcore_copy_tpg_udrv_data(struct ubcore_cmd_hdr *hdr, struct ubcore_cmd_create_tpg *arg, + struct ubcore_tp_node *tp_node) +{ + int ret = 0; + + if (ubcore_get_active_mtu(tp_node->tp->ub_dev, 0, &arg->local_mtu) != 0) + return -1; + + if (arg->udrv_ext.out_len < tp_node->tp->tp_ext.len) { + ubcore_log_err("tp_ext memory is not long enough\n"); + return -1; + } + arg->udrv_ext.out_len = tp_node->tp->tp_ext.len; + arg->out.tpn[0] = tp_node->tp->tpn; + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_tpg)); + if (ret) + return -1; + + ret = (int)copy_to_user((void __user *)(uintptr_t)arg->udrv_ext.out_addr, + (char *)tp_node->tp->tp_ext.addr, + tp_node->tp->tp_ext.len); + + return ret; +} + +static inline void ubcore_set_udata(struct ubcore_udata *udata, struct ubcore_tp_advice *advice, + struct ubcore_udrv_priv *udrv_data) +{ + udata->uctx = (advice->ta.type == UBCORE_TA_JFS_TJFR ? + advice->ta.jfs->uctx : advice->ta.jetty->uctx); + udata->udrv_data = udrv_data; +} + +static struct ubcore_tp_node *ubcore_get_tp_node(struct ubcore_device *dev, + struct ubcore_tp_advice *advice, struct ubcore_tp_cfg *tp_cfg, struct ubcore_udata *udata) +{ + struct ubcore_tp_node *tp_node = NULL; + struct ubcore_tp *new_tp = NULL; + + tp_node = ubcore_hash_table_lookup(advice->meta.ht, advice->meta.hash, &advice->meta.key); + if (tp_node == NULL) { + new_tp = ubcore_create_tp(dev, tp_cfg, udata); + if (new_tp == NULL) { + ubcore_log_err("Failed to create tp"); + return NULL; + } + tp_node = ubcore_add_tp_node(advice->meta.ht, advice->meta.hash, &advice->meta.key, + new_tp, &advice->ta); + if (tp_node == NULL) { + (void)ubcore_destroy_tp(new_tp); + ubcore_log_err("Failed to find and add tp\n"); + return NULL; + } else if (tp_node != NULL && tp_node->tp != new_tp) { + (void)ubcore_destroy_tp(new_tp); + new_tp = NULL; + } + } + atomic_inc(&tp_node->tp->use_cnt); + return tp_node; +} + +static int ubcore_cmd_create_tp(struct ubcore_cmd_hdr *hdr, struct ubcore_cmd_create_tpg *arg) +{ + struct ubcore_tp_advice advice = { 0 }; + struct ubcore_tp_cfg tp_cfg = { 0 }; + struct ubcore_tp_node *tp_node = NULL; + struct ubcore_device *dev = NULL; + struct ubcore_udata udata = { 0 }; + int ret = 0; + + dev = ubcore_find_device(&arg->ta_data.jetty_id.eid, arg->ta_data.trans_type); + if (dev == NULL) + return -ENODEV; + + if (ubcore_para_ta(dev, &advice, &arg->ta_data) != 0) { + ubcore_log_err("Failed to parse ta with type %u", advice.ta.type); + goto put_device; + } else if (advice.meta.ht == NULL) { + ubcore_log_err("tp table is already released"); + goto put_device; + } + + ubcore_set_tp_cfg_with_cmd(&tp_cfg, &arg->in.tp_cfg[0]); + ubcore_set_udata(&udata, &advice, (struct ubcore_udrv_priv *)&arg->udata); + tp_node = ubcore_get_tp_node(dev, &advice, &tp_cfg, &udata); + if (!tp_node) + goto put_device; + + /* 1636 rm mode creates tp in the user context, but the tp parameter is not filled in. + * It needs to be obtained from uvs and filled in. + */ + if (tp_node->tp != NULL && tp_cfg.trans_mode == UBCORE_TP_RM) + ubcore_set_tp_init_cfg(tp_node->tp, &tp_cfg); + + ret = ubcore_copy_tpg_udrv_data(hdr, arg, tp_node); + if (ret) + goto remove_tp_node; + + ubcore_put_device(dev); + return ret; + +remove_tp_node: + ubcore_abort_tp(tp_node->tp, &advice.meta); +put_device: + ubcore_put_device(dev); + return -1; +} + +static int ubcore_cmd_create_tpg(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_create_tpg *arg; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + int ret = 0; + uint32_t i; + + arg = kzalloc(sizeof(struct ubcore_cmd_create_tpg), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct ubcore_cmd_create_tpg)); + if (ret != 0) + goto free_arg; + + if (arg->ta_data.trans_type == UBCORE_TRANSPORT_IB) + return ubcore_cmd_create_tp(hdr, arg); + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL) { + ret = -ENODEV; + goto free_arg; + } + + tpg = ubcore_create_tpg_and_multi_tp(dev, &arg->in.tpg_cfg, arg->in.tp_cfg); + if (IS_ERR_OR_NULL(tpg)) { + ret = -EPERM; + goto put_device; + } + + /* fill output */ + arg->out.tpgn = tpg->tpgn; + for (i = 0; i < tpg->tpg_cfg.tp_cnt; i++) + arg->out.tpn[i] = tpg->tp_list[i]->tpn; + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_tpg)); + if (ret) + goto destroy_tpg; + else + goto put_device; + +destroy_tpg: + (void)ubcore_destroy_multi_tp(dev, tpg); + (void)ubcore_destroy_tpg(tpg); +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +static int ubcore_get_tp_state_attr_and_mask(enum ubcore_tp_state s, uint32_t tp_cnt, + struct ubcore_tp_attr **attr, union ubcore_tp_attr_mask **mask) +{ + union ubcore_tp_attr_mask *_mask; + struct ubcore_tp_attr *_attr; + uint32_t i; + + _attr = kcalloc(1, tp_cnt * sizeof(struct ubcore_tp_attr), GFP_KERNEL); + if (_attr == NULL) + return -ENOMEM; + + _mask = kcalloc(1, tp_cnt * sizeof(union ubcore_tp_attr_mask), GFP_KERNEL); + if (_mask == NULL) { + kfree(_attr); + return -ENOMEM; + } + + for (i = 0; i < tp_cnt; i++) { + _attr[i].state = s; + _mask[i].value = 0; + _mask[i].bs.state = 1; + } + *attr = _attr; + *mask = _mask; + return 0; +} + +static void ubcore_set_vtp_common_cfg(struct ubcore_vtp_cfg *cfg, struct ubcore_cmd_vtp_cfg *cmd) +{ + cfg->fe_idx = cmd->fe_idx; + cfg->vtpn = cmd->vtpn; + cfg->local_jetty = cmd->local_jetty; + cfg->local_eid = cmd->local_eid; + cfg->peer_eid = cmd->peer_eid; + cfg->peer_jetty = cmd->peer_jetty; + cfg->flag = cmd->flag; + cfg->trans_mode = cmd->trans_mode; +} + +static void ubcore_set_vtp2tpg_cfg(struct ubcore_vtp_cfg *cfg, + struct ubcore_cmd_vtp_cfg *cmd, struct ubcore_tpg *tpg) +{ + ubcore_set_vtp_common_cfg(cfg, cmd); + cfg->tpg = tpg; +} + +static void ubcore_set_vtp2utp_cfg(struct ubcore_vtp_cfg *cfg, + struct ubcore_cmd_vtp_cfg *cmd, struct ubcore_utp *utp) +{ + ubcore_set_vtp_common_cfg(cfg, cmd); + cfg->utp = utp; +} + +static void ubcore_set_vtp2ctp_cfg(struct ubcore_vtp_cfg *cfg, + struct ubcore_cmd_vtp_cfg *cmd, struct ubcore_ctp *ctp) +{ + ubcore_set_vtp_common_cfg(cfg, cmd); + cfg->ctp = ctp; +} + +static int ubcore_cmd_create_vtp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_tp *failed_tp[UBCORE_MAX_TP_CNT_IN_GRP]; + union ubcore_tp_attr_mask *rts_mask = NULL; + struct ubcore_tp_attr *rts_attr = NULL; + struct ubcore_cmd_create_vtp *arg; + struct ubcore_vtp_cfg vtp_cfg; + struct ubcore_vtp *vtp = NULL; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + int ret; + + arg = kzalloc(sizeof(struct ubcore_cmd_create_vtp), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct ubcore_cmd_create_vtp)); + if (ret != 0) + goto free_arg; + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL) { + ret = -ENODEV; + goto free_arg; + } + + /* deal with RM first */ + tpg = ubcore_find_tpg(dev, arg->in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + + /* modify to RTR */ + arg->out.rtr_tp_cnt = ubcore_modify_tp_in_tpg(dev, tpg, arg->in.rtr_attr, + arg->in.rtr_mask, failed_tp); + if (arg->out.rtr_tp_cnt != tpg->tpg_cfg.tp_cnt) { + /* todonext: modify tp to reset ? */ + ret = -EPERM; + goto to_user; + } + + /* modify to RTS */ + ret = ubcore_get_tp_state_attr_and_mask(UBCORE_TP_STATE_RTS, tpg->tpg_cfg.tp_cnt, + &rts_attr, &rts_mask); + if (ret != 0) + goto to_user; + + arg->out.rts_tp_cnt = ubcore_modify_tp_in_tpg(dev, tpg, rts_attr, rts_mask, failed_tp); + if (arg->out.rts_tp_cnt != tpg->tpg_cfg.tp_cnt) { + /* todonext: modify tp to reset ? */ + ret = -EPERM; + goto to_user; + } + + ubcore_set_vtp2tpg_cfg(&vtp_cfg, &arg->in.vtp, tpg); + vtp = ubcore_map_vtp(dev, &vtp_cfg); + if (vtp == NULL) { + ret = -EPERM; + goto to_user; + } + + arg->out.vtpn = vtp->cfg.vtpn; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_vtp)); + if (ret != 0) + goto unmap_vtp; + else + goto free_attr; + +to_user: + (void)ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_vtp)); +unmap_vtp: + if (vtp != NULL) + (void)ubcore_unmap_vtp(vtp); +free_attr: + if (rts_attr != NULL) + kfree(rts_attr); + if (rts_mask != NULL) + kfree(rts_mask); +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +static int ubcore_modify_tp_node(struct ubcore_tp_node *tp_node, struct ubcore_tp_attr *tp_attr, + struct ubcore_udata *udata) +{ + /* Modify REST TO RTR */ + if (tp_node->tp->state == UBCORE_TP_STATE_RESET && + ubcore_modify_tp(tp_node->tp->ub_dev, tp_node, tp_attr, *udata) != 0) { + ubcore_log_err("Failed to modify tp"); + return -1; + } + /* modify RTR TO RTS */ + if (tp_node->tp->state == UBCORE_TP_STATE_RTR && + ubcore_modify_tp(tp_node->tp->ub_dev, tp_node, tp_attr, *udata) != 0) { + ubcore_log_err("Failed to modify tp"); + return -1; + } + return 0; +} + +static int ubcore_cmd_modify_tp(struct ubcore_cmd_hdr *hdr, struct ubcore_cmd_modify_tpg *arg) +{ + struct ubcore_device *dev = NULL; + struct ubcore_udata udata = { 0 }; + struct ubcore_tp_node *tp_node; + struct ubcore_tp_advice advice = {0}; + int ret = 0; + + dev = ubcore_find_device(&arg->ta_data.jetty_id.eid, arg->ta_data.trans_type); + if (dev == NULL) + return -ENODEV; + + if (ubcore_para_ta(dev, &advice, &arg->ta_data) != 0) { + ubcore_log_err("Failed to parse ta with type %u", advice.ta.type); + goto put_device; + } else if (advice.meta.ht == NULL) { + ubcore_log_err("tp table is already released"); + goto put_device; + } + + tp_node = ubcore_hash_table_lookup(advice.meta.ht, advice.meta.hash, &advice.meta.key); + if (tp_node == NULL) { + ubcore_log_err("tp node is already released"); + goto put_device; + } + + ubcore_set_udata(&udata, &advice, (struct ubcore_udrv_priv *)&arg->udrv_ext); + ret = ubcore_modify_tp_node(tp_node, &arg->in.rtr_attr[0], &udata); + if (ret) + goto put_device; + + ubcore_put_device(dev); + return ret; + +put_device: + ubcore_put_device(dev); + return -1; +} + +static int ubcore_cmd_modify_tpg(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_tp *failed_tp[UBCORE_MAX_TP_CNT_IN_GRP]; + union ubcore_tp_attr_mask *rts_mask = NULL; + struct ubcore_tp_attr *rts_attr = NULL; + struct ubcore_cmd_modify_tpg *arg; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + int ret; + + arg = kzalloc(sizeof(struct ubcore_cmd_modify_tpg), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct ubcore_cmd_modify_tpg)); + if (ret != 0) + goto free_arg; + + if (arg->ta_data.trans_type == UBCORE_TRANSPORT_IB) + return ubcore_cmd_modify_tp(hdr, arg); + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL) { + ret = -ENODEV; + goto free_arg; + } + + /* deal with RM first */ + tpg = ubcore_find_tpg(dev, arg->in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + + /* modify to RTR */ + arg->out.rtr_tp_cnt = ubcore_modify_tp_in_tpg(dev, tpg, arg->in.rtr_attr, + arg->in.rtr_mask, failed_tp); + if (arg->out.rtr_tp_cnt != tpg->tpg_cfg.tp_cnt) { + /* todonext: modify tp to reset ? */ + ret = -EPERM; + goto to_user; + } + + /* modify to RTS */ + ret = ubcore_get_tp_state_attr_and_mask(UBCORE_TP_STATE_RTS, tpg->tpg_cfg.tp_cnt, + &rts_attr, &rts_mask); + if (ret != 0) + goto to_user; + + arg->out.rts_tp_cnt = ubcore_modify_tp_in_tpg(dev, tpg, rts_attr, rts_mask, failed_tp); + if (arg->out.rts_tp_cnt != tpg->tpg_cfg.tp_cnt) { + /* todonext: modify tp to reset ? */ + ret = -EPERM; + goto to_user; + } + +to_user: + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_modify_tpg)); + if (ret) + ubcore_log_warn("ubcore cmd modify tpg to user failed"); + if (rts_attr != NULL) + kfree(rts_attr); + if (rts_mask != NULL) + kfree(rts_mask); +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +static int ubcore_mark_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tp_meta *meta, + struct ubcore_jetty_id *default_tjetty_id, struct ubcore_ta_data *ta_data) +{ + spin_lock(&meta->ht->lock); + if (jetty->jetty_cfg.trans_mode == UBCORE_TP_RC && + memcmp(&meta->ht->rc_tjetty_id, default_tjetty_id, + sizeof(struct ubcore_jetty_id)) == 0) { + meta->ht->rc_tjetty_id = ta_data->jetty_id; + } else if (jetty->jetty_cfg.trans_mode == UBCORE_TP_RC && + memcmp(&meta->ht->rc_tjetty_id, + &ta_data->jetty_id, sizeof(struct ubcore_jetty_id)) != 0) { + spin_unlock(&meta->ht->lock); + ubcore_log_err("the same jetty_id: %u is binded with another remote jetty_id: %pI6c-%u.\n", + jetty->id, &meta->ht->rc_tjetty_id.eid, meta->ht->rc_tjetty_id.id); + return -1; + } + spin_unlock(&meta->ht->lock); + return 0; +} + +static int ubcore_para_target_ta(struct ubcore_device *dev, struct ubcore_tp_advice *advice, + struct ubcore_ta_data *ta_data) +{ + struct ubcore_jetty_id default_tjetty_id; + struct ubcore_tp_meta *meta; + struct ubcore_jetty *jetty; + struct ubcore_jfr *jfr; + int ret = 0; + + advice->ta.type = ta_data->ta_type; + meta = &advice->meta; + + (void)memset(&default_tjetty_id, 0, + sizeof(struct ubcore_jetty_id)); + + switch (ta_data->ta_type) { + case UBCORE_TA_JFS_TJFR: + jfr = ubcore_find_jfr(dev, ta_data->tjetty_id.id); + if (jfr != NULL) { + meta->ht = ubcore_get_tptable(jfr->tptable); + advice->ta.jfr = jfr; + advice->ta.tjetty_id = ta_data->jetty_id; + } + break; + case UBCORE_TA_JETTY_TJETTY: + jetty = ubcore_find_jetty(dev, ta_data->tjetty_id.id); + if (jetty != NULL) { + meta->ht = ubcore_get_tptable(jetty->tptable); + advice->ta.jetty = jetty; + advice->ta.tjetty_id = ta_data->jetty_id; + ret = ubcore_mark_bind_jetty(jetty, meta, &default_tjetty_id, ta_data); + } + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return -1; + } + ubcore_init_tp_key_jetty_id(&meta->key, &ta_data->jetty_id); + advice->meta.hash = ubcore_get_jetty_hash(&ta_data->jetty_id); + return ret; +} + +static int ubcore_copy_target_tpg_udrv_data(struct ubcore_cmd_hdr *hdr, + struct ubcore_cmd_create_target_tpg *arg, struct ubcore_tp_node *tp_node) +{ + int ret; + + if (arg->udrv_ext.out_len < tp_node->tp->tp_ext.len) { + ubcore_log_err("tp_ext memory is not long enough\n"); + return -1; + } + arg->udrv_ext.out_len = tp_node->tp->tp_ext.len; + arg->out.tpn[0] = tp_node->tp->tpn; + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_target_tpg)); + if (ret) + return -1; + + ret = (int)copy_to_user((void __user *)(uintptr_t)arg->udrv_ext.out_addr, + (char *)tp_node->tp->tp_ext.addr, + tp_node->tp->tp_ext.len); + + return ret; +} + +static int ubcore_modify_target_tp_node(struct ubcore_tp_node *tp_node, + struct ubcore_tp_attr *tp_attr, struct ubcore_udata *udata, + struct ubcore_cmd_create_target_tpg *arg) +{ + if (ubcore_get_active_mtu(tp_node->tp->ub_dev, 0, &arg->local_mtu) != 0 && + (arg->local_mtu == 0 || arg->peer_mtu == 0)) + return -1; + tp_attr->mtu = min(arg->local_mtu, arg->peer_mtu); + + /* The receiving side rm mode cannot switch the rts state */ + if (tp_node->tp->trans_mode == UBCORE_TP_RM && + tp_node->tp->state == UBCORE_TP_STATE_RTR) + return 0; + + udata->udrv_data->in_addr = arg->udrv_ext.in_addr; + udata->udrv_data->in_len = arg->udrv_ext.in_len; + if (ubcore_modify_tp(tp_node->tp->ub_dev, tp_node, tp_attr, *udata) != 0) { + ubcore_log_err("Failed to modify tp"); + return -1; + } + return 0; +} + +static int ubcore_cmd_create_target_tp(struct ubcore_cmd_hdr *hdr, + struct ubcore_cmd_create_target_tpg *arg) +{ + struct ubcore_udata udata = { 0 }; + struct ubcore_device *dev = NULL; + struct ubcore_tp_node *tp_node; + struct ubcore_tp_advice advice = { 0 }; + struct ubcore_tp_cfg tp_cfg = { 0 }; + struct ubcore_tp_attr *tp_attr = NULL; + + int ret = 0; + + tp_attr = &arg->in.rtr_attr[0]; + dev = ubcore_find_device(&arg->ta_data.tjetty_id.eid, arg->ta_data.trans_type); + if (dev == NULL) + return -ENODEV; + + if (ubcore_para_target_ta(dev, &advice, &arg->ta_data) != 0) { + ubcore_log_err("Failed to parse ta with type %u", advice.ta.type); + goto put_device; + } else if (advice.meta.ht == NULL) { + ubcore_log_err("tp table is already released"); + goto put_device; + } + + ubcore_set_udata(&udata, &advice, (struct ubcore_udrv_priv *)&arg->udata); + ubcore_set_tp_cfg_with_cmd(&tp_cfg, &arg->in.tp_cfg[0]); + tp_node = ubcore_get_tp_node(dev, &advice, &tp_cfg, &udata); + if (!tp_node) + goto put_device; + + ret = ubcore_modify_target_tp_node(tp_node, tp_attr, &udata, arg); + if (ret) + goto remove_tp_node; + + ret = ubcore_copy_target_tpg_udrv_data(hdr, arg, tp_node); + if (ret) + goto remove_tp_node; + + ubcore_put_device(dev); + return ret; + +remove_tp_node: + ubcore_abort_tp(tp_node->tp, &advice.meta); +put_device: + ubcore_put_device(dev); + return -1; +} + +static int ubcore_cmd_create_target_tpg(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_tp *failed_tp[UBCORE_MAX_TP_CNT_IN_GRP]; + struct ubcore_cmd_create_target_tpg *arg; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + int ret = 0; + uint32_t i; + + arg = kzalloc(sizeof(struct ubcore_cmd_create_target_tpg), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_create_target_tpg)); + if (ret != 0) + goto free_arg; + + if (arg->ta_data.trans_type == UBCORE_TRANSPORT_IB) + return ubcore_cmd_create_target_tp(hdr, arg); + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL) { + ret = -ENODEV; + goto free_arg; + } + + tpg = ubcore_create_tpg_and_multi_tp(dev, &arg->in.tpg_cfg, arg->in.tp_cfg); + if (IS_ERR_OR_NULL(tpg)) { + ret = -EPERM; + goto put_device; + } + + /* modify to RTR */ + if (ubcore_modify_tp_in_tpg(dev, tpg, arg->in.rtr_attr, arg->in.rtr_mask, failed_tp) != + tpg->tpg_cfg.tp_cnt) { + /* todonext: modify tp to reset ? */ + ret = -EPERM; + goto destroy_tpg; + } + + /* fill output */ + arg->out.tpgn = tpg->tpgn; + for (i = 0; i < tpg->tpg_cfg.tp_cnt; i++) + arg->out.tpn[i] = tpg->tp_list[i]->tpn; + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_target_tpg)); + if (ret) + goto destroy_tpg; + else + goto put_device; + +destroy_tpg: + (void)ubcore_destroy_multi_tp(dev, tpg); + (void)ubcore_destroy_tpg(tpg); +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +static int ubcore_cmd_modify_target_tp(struct ubcore_cmd_hdr *hdr, + struct ubcore_cmd_modify_target_tpg *arg) +{ + struct ubcore_device *dev = NULL; + struct ubcore_tp_node *tp_node; + struct ubcore_tp_advice advice = {0}; + struct ubcore_tp_attr rtr_attr = {0}; + struct ubcore_udata udata = {0}; + int ret = 0; + + dev = ubcore_find_device(&arg->ta_data.tjetty_id.eid, arg->ta_data.trans_type); + if (dev == NULL) + return -ENODEV; + + if (ubcore_para_target_ta(dev, &advice, &arg->ta_data) != 0) { + ubcore_log_err("Failed to parse ta with type %u", advice.ta.type); + goto put_device; + } else if (advice.meta.ht == NULL) { + ubcore_log_err("tp table is already released"); + goto put_device; + } + + tp_node = ubcore_hash_table_lookup(advice.meta.ht, advice.meta.hash, &advice.meta.key); + if (tp_node == NULL) { + ubcore_log_err("tp node is already released"); + goto put_device; + } + /* The receiving side rm mode cannot switch the rts state */ + if (tp_node->tp->trans_mode == UBCORE_TP_RM && tp_node->tp->state == UBCORE_TP_STATE_RTR) { + ubcore_put_device(dev); + return 0; + } + if (tp_node->tp->state == UBCORE_TP_STATE_RTR && + ubcore_modify_tp(dev, tp_node, &rtr_attr, udata) != 0) { + ubcore_log_err("Failed to modify tp"); + goto put_device; + } + ubcore_put_device(dev); + return ret; + +put_device: + ubcore_put_device(dev); + return -1; +} + +static int ubcore_cmd_modify_target_tpg(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_tp *failed_tp[UBCORE_MAX_TP_CNT_IN_GRP]; + struct ubcore_cmd_modify_target_tpg arg = {0}; + union ubcore_tp_attr_mask *rts_mask = NULL; + struct ubcore_tp_attr *rts_attr = NULL; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_modify_target_tpg)); + if (ret != 0) + return ret; + + if (arg.ta_data.trans_type == UBCORE_TRANSPORT_IB) + return ubcore_cmd_modify_target_tp(hdr, &arg); + + dev = ubcore_find_tpf_device(&arg.in.tpf.netaddr, arg.in.tpf.trans_type); + if (dev == NULL) + return -ENODEV; + + /* deal with RM first */ + tpg = ubcore_find_tpg(dev, arg.in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + + /* modify to RTS */ + ret = ubcore_get_tp_state_attr_and_mask(UBCORE_TP_STATE_RTS, tpg->tpg_cfg.tp_cnt, + &rts_attr, &rts_mask); + if (ret != 0) + goto put_device; + + arg.out.rts_tp_cnt = ubcore_modify_tp_in_tpg(dev, tpg, rts_attr, rts_mask, failed_tp); + if (arg.out.rts_tp_cnt != tpg->tpg_cfg.tp_cnt) + /* todonext: modify tp to reset ? */ + ret = -EPERM; + + /* do not modify ret if copy success */ + if (ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_modify_target_tpg)) != 0) + ret = -EPERM; + + kfree(rts_attr); + kfree(rts_mask); +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_destroy_vtp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_destroy_vtp arg; + struct ubcore_device *dev; + struct ubcore_vtp *vtp; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_destroy_vtp)); + if (ret != 0) + return ret; + + dev = ubcore_find_tpf_device(&arg.in.tpf.netaddr, arg.in.tpf.trans_type); + if (dev == NULL) + return -ENODEV; + + vtp = ubcore_find_vtp(dev, arg.in.mode, &arg.in.local_eid, &arg.in.peer_eid); + if (vtp == NULL) { + ret = -EINVAL; + goto put_device; + } + + ret = ubcore_unmap_vtp(vtp); + +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_check_dev_name(char *dev_name) +{ + struct ubcore_device *pf_dev = NULL; + + pf_dev = ubcore_find_device_with_name(dev_name); + if (pf_dev == NULL) { + ubcore_log_err("cannot find dev_name: %s", dev_name); + return -EINVAL; + } + + ubcore_put_device(pf_dev); + return 0; +} + +static int ubcore_cmd_opt_sip(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_opt_sip arg; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_opt_sip)); + if (ret != 0) + return ret; + + ret = ubcore_check_dev_name(arg.in.info.dev_name); + if (ret != 0) + return ret; + + if (hdr->command == UBCORE_CMD_ADD_SIP) + return ubcore_add_sip(&arg.in.info); + + return ubcore_delete_sip(&arg.in.info); +} + +static int ubcore_eidtbl_add_entry(struct ubcore_device *dev, union ubcore_eid *eid, + uint32_t *eid_idx) +{ + uint32_t i; + + for (i = 0; i < dev->attr.max_eid_cnt; i++) { + if (memcmp(dev->eid_table.eid_entries[i].eid.raw, eid->raw, UBCORE_EID_SIZE) == 0) { + ubcore_log_warn("eid already exists\n"); + break; + } + if (dev->eid_table.eid_entries[i].valid == false) { + dev->eid_table.eid_entries[i].eid = *eid; + dev->eid_table.eid_entries[i].valid = true; + dev->eid_table.eid_entries[i].eid_index = i; + *eid_idx = i; + ubcore_log_info("add eid: %pI6c, idx: %u\n", eid, i); + break; + } + } + if (i == dev->attr.max_eid_cnt) { + ubcore_log_err("eid table is full"); + return -1; + } + return 0; +} + +static int ubcore_eidtbl_del_entry(struct ubcore_device *dev, union ubcore_eid *eid, + uint32_t *eid_idx) +{ + uint32_t i; + + for (i = 0; i < dev->attr.max_eid_cnt; i++) { + if (memcmp(dev->eid_table.eid_entries[i].eid.raw, eid->raw, UBCORE_EID_SIZE) == 0) { + (void)memset(&dev->eid_table.eid_entries[i], + 0, sizeof(struct ubcore_eid_entry)); + *eid_idx = i; + ubcore_log_info("del eid: %pI6c, idx: %u\n", eid, i); + break; + } + } + if (i == dev->attr.max_eid_cnt) { + ubcore_log_err("eid table is empty"); + return -1; + } + return 0; +} + +static int ubcore_eidtbl_update_entry(struct ubcore_device *dev, union ubcore_eid *eid, + uint32_t eid_idx, bool is_add) +{ + if (eid_idx >= dev->attr.max_eid_cnt) { + ubcore_log_err("eid table is full\n"); + return -1; + } + if (is_add) + dev->eid_table.eid_entries[eid_idx].eid = *eid; + else + (void)memset(&dev->eid_table.eid_entries[eid_idx].eid, 0, sizeof(union ubcore_eid)); + + dev->eid_table.eid_entries[eid_idx].valid = is_add; + dev->eid_table.eid_entries[eid_idx].eid_index = eid_idx; + ubcore_log_info("%s eid: %pI6c, idx: %u\n", is_add == true ? "add" : "del", eid, eid_idx); + return 0; +} + +int ubcore_update_eidtbl_by_eid(struct ubcore_device *dev, union ubcore_eid *eid, + uint32_t *eid_idx, bool is_alloc_eid) +{ + int ret; + + spin_lock(&dev->eid_table.lock); + if (is_alloc_eid) + ret = ubcore_eidtbl_add_entry(dev, eid, eid_idx); + else + ret = ubcore_eidtbl_del_entry(dev, eid, eid_idx); + + spin_unlock(&dev->eid_table.lock); + return ret; +} + +int ubcore_update_eidtbl_by_idx(struct ubcore_device *dev, union ubcore_eid *eid, + uint32_t eid_idx, bool is_alloc_eid) +{ + int ret; + + spin_lock(&dev->eid_table.lock); + ret = ubcore_eidtbl_update_entry(dev, eid, eid_idx, is_alloc_eid); + spin_unlock(&dev->eid_table.lock); + return ret; +} + +static int ubcore_cmd_opt_update_eid(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_opt_eid arg; + struct ubcore_ueid_cfg cfg; + struct ubcore_device *dev; + bool is_alloc_eid; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_opt_eid)); + if (ret != 0) + return ret; + + cfg.eid = arg.in.eid; + cfg.eid_index = arg.in.eid_index; + cfg.upi = arg.in.upi; + + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) + return -1; + + if (!dev->attr.virtualization && dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_1) { + ubcore_put_device(dev); + ubcore_log_err("pattern1 does not support static mode\n"); + return -1; + } + is_alloc_eid = hdr->command == UBCORE_CMD_ALLOC_EID ? true : false; + ret = ubcore_update_eidtbl_by_idx(dev, &cfg.eid, arg.in.eid_index, is_alloc_eid); + if (ret != 0) { + ubcore_put_device(dev); + return ret; + } + + if (hdr->command == UBCORE_CMD_ALLOC_EID) + ret = ubcore_add_ueid(dev, arg.in.fe_idx, &cfg); + else + ret = ubcore_delete_ueid(dev, arg.in.fe_idx, &cfg); + + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_set_upi(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_set_upi arg; + struct ubcore_ueid_cfg cfg; + struct ubcore_device *dev; + uint32_t pattern3_upi; + uint32_t i; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_set_upi)); + if (ret != 0) + return ret; + + dev = ubcore_find_upi_with_dev_name(arg.in.dev_name, &pattern3_upi); + if (dev == NULL) { + ubcore_log_err("device not found by name: %s\n", arg.in.dev_name); + return -1; + } + if (!(dev->dynamic_eid && dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_3)) { + ubcore_log_err("This mode does not support setting upi\n"); + return -1; + } + for (i = 0; i < dev->attr.max_eid_cnt; i++) { + cfg.eid = dev->eid_table.eid_entries[i].eid; + cfg.eid_index = i; + if (dev->eid_table.eid_entries[i].valid == false) + continue; + if (pattern3_upi == UCBORE_INVALID_UPI) { + cfg.upi = arg.in.upi; + (void)ubcore_add_ueid( + dev, (uint16_t)UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + } else { + cfg.upi = pattern3_upi; + (void)ubcore_delete_ueid( + dev, (uint16_t)UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + cfg.upi = arg.in.upi; + (void)ubcore_add_ueid( + dev, (uint16_t)UBCORE_NON_VIRTUALIZATION_FE_IDX, &cfg); + } + } + (void)ubcore_add_upi_list(dev, arg.in.upi); + return 0; +} + +static int ubcore_cmd_show_upi(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_show_upi arg; + struct ubcore_device *dev; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_show_upi)); + if (ret != 0) + return ret; + + dev = ubcore_find_upi_with_dev_name(arg.in.dev_name, &arg.out.upi); + if (dev == NULL) { + ubcore_log_err("device not found by name: %s\n", arg.in.dev_name); + return -1; + } + + if (dev->transport_type == UBCORE_TRANSPORT_UB && dev->dynamic_eid == 0) { + ubcore_log_err("Failed to use show_upi to query upi in pattern3 static mode"); + return -1; + } + + if (ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_show_upi)) != 0) + ret = -EPERM; + + return ret; +} + +static int ubcore_cmd_set_global_cfg(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_set_global_cfg arg; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_set_global_cfg)); + if (ret != 0) + return ret; + + ret = ubcore_tpf_device_set_global_cfg(&arg.in.global_cfg); + return ret; +} + +static int ubcore_set_vport_cfg(struct ubcore_device *dev, + struct ubcore_set_vport_cfg *vport_cfg) +{ + struct ubcore_device_cfg dev_cfg = {0}; + int ret; + + dev_cfg.fe_idx = vport_cfg->fe_idx; + if (vport_cfg->mask.bs.pattern == 1) { + dev_cfg.mask.bs.pattern = 1; + dev_cfg.pattern = (uint8_t)vport_cfg->pattern; + dev->cfg.pattern = (uint8_t)vport_cfg->pattern; + } + if (vport_cfg->mask.bs.virtualization == 1) { + dev_cfg.mask.bs.virtualization = 1; + dev_cfg.virtualization = (bool)vport_cfg->virtualization; + dev->cfg.virtualization = (bool)vport_cfg->virtualization; + } + if (vport_cfg->mask.bs.min_jetty_cnt == 1) { + dev_cfg.mask.bs.min_jetty_cnt = 1; + dev_cfg.min_jetty_cnt = vport_cfg->min_jetty_cnt; + dev->cfg.min_jetty_cnt = vport_cfg->min_jetty_cnt; + } + if (vport_cfg->mask.bs.max_jetty_cnt == 1) { + dev_cfg.mask.bs.max_jetty_cnt = 1; + dev_cfg.max_jetty_cnt = vport_cfg->max_jetty_cnt; + dev->cfg.max_jetty_cnt = vport_cfg->max_jetty_cnt; + } + if (vport_cfg->mask.bs.min_jfr_cnt == 1) { + dev_cfg.mask.bs.min_jfr_cnt = 1; + dev_cfg.min_jfr_cnt = vport_cfg->min_jfr_cnt; + dev->cfg.min_jfr_cnt = vport_cfg->min_jfr_cnt; + } + if (vport_cfg->mask.bs.max_jfr_cnt == 1) { + dev_cfg.mask.bs.max_jfr_cnt = 1; + dev_cfg.max_jfr_cnt = vport_cfg->max_jfr_cnt; + dev->cfg.max_jfr_cnt = vport_cfg->max_jfr_cnt; + } + if (vport_cfg->mask.bs.slice == 1) { + dev_cfg.mask.bs.slice = 1; + dev_cfg.slice = vport_cfg->slice; + dev->cfg.slice = vport_cfg->slice; + } + dev->cfg.mask.value |= dev_cfg.mask.value; + + ret = ubcore_config_device(dev, &dev_cfg); + if (ret != 0) + ubcore_log_err("dev: %s set vport cfg failed, ret: %d", dev->dev_name, ret); + return ret; +} + +static int ubcore_cmd_set_vport_cfg(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_set_vport_cfg arg; + struct ubcore_device *dev; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_set_vport_cfg)); + if (ret != 0) + return ret; + + dev = ubcore_find_device_with_name(arg.in.vport_cfg.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev failed, arg_in: %s.\n", arg.in.vport_cfg.dev_name); + return -EINVAL; + } + ret = ubcore_set_vport_cfg(dev, &arg.in.vport_cfg); + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_get_dev_info(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_get_dev_info arg; + struct ubcore_device *tpf_dev; + struct ubcore_device *pf_dev; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_get_dev_info)); + if (ret != 0) + return ret; + + tpf_dev = ubcore_find_tpf_device(&arg.in.tpf.netaddr, arg.in.tpf.trans_type); + if (tpf_dev == NULL) { + ubcore_log_err("failed to find tpf device"); + return -1; + } + + ubcore_log_info("get tpf device name %s", tpf_dev->dev_name); + + (void)strcpy(arg.out.target_tpf_name, tpf_dev->dev_name); + + ubcore_put_device(tpf_dev); + + pf_dev = ubcore_find_device_with_eid_index(&arg.in.peer_eid, arg.in.tpf.trans_type, + arg.in.eid_index); + if (pf_dev != NULL) { + ubcore_log_info("pf_dev %s has been found by eid_index %u and eid: "EID_FMT"\n", + pf_dev->dev_name, arg.in.eid_index, EID_ARGS(arg.in.peer_eid)); + } else { + ubcore_put_device(pf_dev); + ubcore_log_err("pf_dev cannot be found by eid_index %u and eid: "EID_FMT"\n", + arg.in.eid_index, EID_ARGS(arg.in.peer_eid)); + return -1; + } + + arg.out.port_is_active = true; + if (ubcore_check_port_state(pf_dev, 0) != 0) { + arg.out.port_is_active = false; + ubcore_log_warn("port status unactive on target side, pf_dev: %s", + pf_dev->dev_name); + } + (void)strcpy(arg.out.target_pf_name, pf_dev->dev_name); + ubcore_put_device(pf_dev); + + if (ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_get_dev_info)) != 0) + ret = -EPERM; + + return ret; +} + +static int ubcore_unmark_bind_jetty(struct ubcore_device *dev, struct ubcore_tp_advice *advice, + struct ubcore_ta_data *ta_data) +{ + struct ubcore_tp_meta *meta; + struct ubcore_jetty *jetty; + + advice->ta.type = ta_data->ta_type; + meta = &advice->meta; + + switch (ta_data->ta_type) { + case UBCORE_TA_JFS_TJFR: + break; + case UBCORE_TA_JETTY_TJETTY: + jetty = ubcore_find_jetty(dev, ta_data->jetty_id.id); + if (jetty != NULL && jetty->jetty_cfg.trans_mode == UBCORE_TP_RC) { + meta->ht = ubcore_get_tptable(jetty->tptable); + advice->ta.jetty = jetty; + advice->ta.tjetty_id = ta_data->tjetty_id; + if (meta->ht != NULL && memcmp(&meta->ht->rc_tjetty_id, &ta_data->tjetty_id, + sizeof(struct ubcore_jetty_id)) == 0) { + (void)memset(&meta->ht->rc_tjetty_id, + 0, sizeof(struct ubcore_jetty_id)); + } else { + ubcore_log_err("The jetty_id: %u is not bound tjetty_id: %u\n", + jetty->id, ta_data->tjetty_id.id); + return -1; + } + } + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return -1; + } + return 0; +} + +static int ubcore_cmd_destroy_tp(struct ubcore_cmd_hdr *hdr, struct ubcore_cmd_destroy_tpg *arg) +{ + struct ubcore_tp_advice advice = {0}; + struct ubcore_device *dev = NULL; + struct ubcore_tp *tp; + int ret = 0; + + dev = ubcore_find_device(&arg->ta_data.jetty_id.eid, arg->ta_data.trans_type); + if (dev == NULL) + return -ENODEV; + + if (ubcore_para_ta(dev, &advice, &arg->ta_data) != 0) { + ubcore_log_err("Failed to parse ta with type %u", advice.ta.type); + ret = -1; + goto put_device; + } else if (advice.meta.ht == NULL) { + ubcore_log_err("tp table is already released"); + ret = 0; + goto put_device; + } + if (arg->ta_data.is_target) { + ret = ubcore_unmark_bind_jetty(dev, &advice, &arg->ta_data); + if (ret != 0) + goto put_device; + } + tp = ubcore_find_remove_tp(advice.meta.ht, advice.meta.hash, &advice.meta.key); + if (tp == NULL) { + ubcore_log_warn("TP is not found, already removed or under use\n"); + ubcore_put_device(dev); + return 0; + } + ret = ubcore_destroy_tp(tp); + ubcore_put_device(dev); + return ret; + +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_destroy_tpg(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_destroy_tpg arg = {0}; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_destroy_tpg)); + if (ret != 0) + return ret; + + if (arg.ta_data.trans_type == UBCORE_TRANSPORT_IB) + return ubcore_cmd_destroy_tp(hdr, &arg); + + dev = ubcore_find_tpf_device(&arg.in.tpf.netaddr, arg.in.tpf.trans_type); + if (dev == NULL) + return -ENODEV; + + /* deal with RM first */ + tpg = ubcore_find_tpg(dev, arg.in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + + arg.out.destroyed_tp_cnt = ubcore_destroy_multi_tp(dev, tpg); + if (arg.out.destroyed_tp_cnt != tpg->tpg_cfg.tp_cnt) { + ret = -EPERM; + goto to_user; + } + + /* todonext: rollback ? */ + ret = ubcore_destroy_tpg(tpg); + if (ret) + ubcore_log_err("Failed to destroy tpg"); + +to_user: + /* do not modify ret if copy success */ + if (ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_destroy_tpg)) != 0) + ret = -EPERM; + /* todonext: rollback ? */ +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_map_vtp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_map_vtp *arg; + struct ubcore_vtp_cfg vtp_cfg; + struct ubcore_vtp *vtp = NULL; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + struct ubcore_utp *utp; + struct ubcore_ctp *ctp; + int ret; + + arg = kzalloc(sizeof(struct ubcore_cmd_map_vtp), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_map_vtp)); + if (ret != 0) + goto free_arg; + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL) { + ret = -ENODEV; + goto free_arg; + } + + if (arg->in.vtp.flag.bs.clan_tp == 0) { + /* deal with trans domain -G */ + if (arg->in.vtp.trans_mode != UBCORE_TP_UM) { + /* deal with RM first */ + tpg = ubcore_find_tpg(dev, arg->in.vtp.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + ubcore_set_vtp2tpg_cfg(&vtp_cfg, &arg->in.vtp, tpg); + } else { + /* deal with UM */ + utp = ubcore_find_utp(dev, arg->in.vtp.utpn); + if (utp == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find utp"); + goto put_device; + } + ubcore_set_vtp2utp_cfg(&vtp_cfg, &arg->in.vtp, utp); + } + } else { + /* deal with trans domain -C */ + ctp = ubcore_find_ctp(dev, arg->in.vtp.ctpn); + if (ctp == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find ctp"); + goto put_device; + } + ubcore_set_vtp2ctp_cfg(&vtp_cfg, &arg->in.vtp, ctp); + } + + vtp = ubcore_map_vtp(dev, &vtp_cfg); + if (vtp == NULL) { + ret = -EPERM; + goto put_device; + } + + arg->out.vtpn = vtp->cfg.vtpn; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_map_vtp)); + if (ret != 0) + goto unmap_vtp; + else + goto put_device; + +unmap_vtp: + (void)ubcore_unmap_vtp(vtp); +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +static int ubcore_cmd_create_utp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_create_utp *arg; + struct ubcore_device *dev; + struct ubcore_utp *utp = NULL; + struct ubcore_vtp_cfg vtp_cfg; + struct ubcore_vtp *vtp = NULL; + int ret = 0; + + arg = kzalloc(sizeof(struct ubcore_cmd_create_utp), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct ubcore_cmd_create_utp)); + if (ret != 0) + goto free_arg; + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL) { + ret = -ENODEV; + goto free_arg; + } + + utp = ubcore_create_utp(dev, &arg->in.utp_cfg); + if (utp == NULL) + goto put_device; + + ubcore_set_vtp2utp_cfg(&vtp_cfg, &arg->in.vtp, utp); + vtp = ubcore_map_vtp(dev, &vtp_cfg); + if (vtp == NULL) { + ret = -EPERM; + goto destroy_utp; + } + + /* fill output */ + arg->out.idx = utp->utpn; + arg->out.vtpn = vtp->cfg.vtpn; + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_utp)); + if (ret) + goto unmap_vtp; + else + goto put_device; + +unmap_vtp: + (void)ubcore_unmap_vtp(vtp); +destroy_utp: + (void)dev->ops->destroy_utp(utp); +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +static int ubcore_cmd_destroy_utp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_destroy_utp arg = {0}; + struct ubcore_device *dev; + struct ubcore_utp *utp; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_destroy_utp)); + if (ret != 0) + return ret; + + dev = ubcore_find_tpf_device(&arg.in.tpf.netaddr, arg.in.tpf.trans_type); + if (dev == NULL) + return -ENODEV; + + utp = ubcore_find_utp(dev, arg.in.utp_idx); + if (utp == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find utp"); + goto put_device; + } + + /* todonext: rollback ? */ + ret = ubcore_destroy_utp(utp); + if (ret) + ubcore_log_err("Failed to destroy utp"); + + /* todonext: rollback ? */ +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_create_ctp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_create_ctp *arg = NULL; + struct ubcore_device *dev = NULL; + struct ubcore_ctp *ctp = NULL; + struct ubcore_vtp_cfg vtp_cfg; + struct ubcore_vtp *vtp = NULL; + int ret = 0; + + arg = kzalloc(sizeof(struct ubcore_cmd_create_ctp), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct ubcore_cmd_create_ctp)); + if (ret != 0) + goto free_arg; + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL) { + ret = -ENODEV; + goto free_arg; + } + + ctp = ubcore_create_ctp(dev, &arg->in.ctp_cfg); + if (ctp == NULL) + goto put_device; + + ubcore_set_vtp2ctp_cfg(&vtp_cfg, &arg->in.vtp, ctp); + vtp = ubcore_map_vtp(dev, &vtp_cfg); + if (vtp == NULL) { + ret = -EPERM; + goto destroy_ctp; + } + + /* fill output */ + arg->out.idx = ctp->ctpn; + arg->out.vtpn = vtp->cfg.vtpn; + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, arg, + sizeof(struct ubcore_cmd_create_ctp)); + if (ret) + goto unmap_vtp; + else + goto put_device; + +unmap_vtp: + (void)ubcore_unmap_vtp(vtp); +destroy_ctp: + (void)dev->ops->destroy_ctp(ctp); +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +static int ubcore_cmd_destroy_ctp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_destroy_ctp arg = {0}; + struct ubcore_device *dev = NULL; + struct ubcore_ctp *ctp = NULL; + int ret = 0; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_destroy_ctp)); + if (ret != 0) + return ret; + + dev = ubcore_find_tpf_device(&arg.in.tpf.netaddr, arg.in.tpf.trans_type); + if (dev == NULL) + return -ENODEV; + + ctp = ubcore_find_ctp(dev, arg.in.ctp_idx); + if (ctp == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find ctp"); + goto put_device; + } + + ret = ubcore_destroy_ctp(ctp); + if (ret) + ubcore_log_err("Failed to destroy ctp"); + +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_restore_tp_error_op(struct ubcore_cmd_hdr *hdr, + bool set_to_rtr, bool set_to_rts) +{ + struct ubcore_cmd_restore_tp_error arg = {0}; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_restore_tp_error)); + if (ret != 0) + return ret; + + /* Currently, 'netaddr' & 'type' are not necessary */ + dev = ubcore_find_tpf_device(NULL, UBCORE_TRANSPORT_UB); + if (dev == NULL || dev->ops == NULL || dev->ops->modify_tp == NULL) + return -ENODEV; + + tpg = ubcore_find_tpg(dev, arg.in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg %u, cmd:%u", arg.in.tpgn, hdr->command); + goto put_device; + } + + tp = ubcore_find_tp_in_tpg(tpg, arg.in.tpn); + if (tp == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tp %u, cmd:%u", arg.in.tpn, hdr->command); + goto put_device; + } + + if (set_to_rtr && ubcore_restore_tp_error_to_rtr(dev, tp, arg.in.rx_psn, arg.in.tx_psn, + arg.in.data_udp_start, arg.in.ack_udp_start) != 0) { + ret = -1; + ubcore_log_err("Failed to restore error tp %u to rtr, cmd:%u", + arg.in.tpn, hdr->command); + goto put_device; + } + + if (set_to_rts && ubcore_restore_tp_error_to_rts(dev, tp) != 0) { + ret = -1; + ubcore_log_err("Failed to restore error tp %u to rts, cmd:%u", + arg.in.tpn, hdr->command); + goto put_device; + } + + ubcore_log_info("Success to restore tp %u error, cmd:%u", arg.in.tpn, hdr->command); + +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_restore_tp_error_rsp(struct ubcore_cmd_hdr *hdr) +{ + return ubcore_cmd_restore_tp_error_op(hdr, true, true); +} + +static int ubcore_cmd_restore_target_tp_error_req(struct ubcore_cmd_hdr *hdr) +{ + return ubcore_cmd_restore_tp_error_op(hdr, true, false); +} + +static int ubcore_cmd_restore_target_tp_error_ack(struct ubcore_cmd_hdr *hdr) +{ + return ubcore_cmd_restore_tp_error_op(hdr, false, true); +} + +static int ubcore_cmd_restore_tp_suspend(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_restore_tp_suspend arg = {0}; + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_restore_tp_suspend)); + if (ret != 0) + return ret; + + /* Currently, 'netaddr' & 'type' are not necessary */ + dev = ubcore_find_tpf_device(NULL, UBCORE_TRANSPORT_UB); + if (dev == NULL || dev->ops == NULL || dev->ops->modify_tp == NULL) + return -ENODEV; + + /* deal with RM first */ + tpg = ubcore_find_tpg(dev, arg.in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + + tp = ubcore_find_tp_in_tpg(tpg, arg.in.tpn); + if (tp == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tp"); + goto put_device; + } + + mask.value = 0; + mask.bs.state = 1; + mask.bs.data_udp_start = 1; + mask.bs.ack_udp_start = 1; + attr.state = UBCORE_TP_STATE_RTS; + attr.data_udp_start = arg.in.data_udp_start; + attr.ack_udp_start = arg.in.ack_udp_start; + ubcore_log_info("restore tp suspend(mask): state: %u, data_udp_start: %u, ack_udp_start: %u", + mask.bs.state, mask.bs.data_udp_start, mask.bs.ack_udp_start); + ubcore_log_info("restore tp suspend(attr): state: %u, data_udp_start: %hu, ack_udp_start: %hu", + (uint32_t)attr.state, attr.data_udp_start, attr.ack_udp_start); + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + ret = -1; + ubcore_log_err("Failed to modify tp"); + goto put_device; + } + tp->state = UBCORE_TP_STATE_RTS; + tp->data_udp_start = arg.in.data_udp_start; + tp->ack_udp_start = arg.in.ack_udp_start; + + ubcore_log_info("Success to restore tp suspend"); + +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_get_dev_feature(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_get_dev_feature arg = {0}; + struct ubcore_device *dev; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_get_dev_feature)); + if (ret != 0) + return ret; + + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("no available devices found by dev_name: %s\n", arg.in.dev_name); + ret = -1; + return ret; + } + + arg.out.feature.value = dev->attr.dev_cap.feature.value; + ubcore_put_device(dev); + + if (ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_get_dev_feature)) != 0) + ret = -EPERM; + + return ret; +} + +static int ubcore_cmd_change_tp_to_error(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_change_tp_to_error arg = {0}; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_change_tp_to_error)); + if (ret != 0) + return ret; + + /* Currently, 'netaddr' & 'type' are not necessary */ + dev = ubcore_find_tpf_device(NULL, UBCORE_TRANSPORT_UB); + if (dev == NULL || dev->ops == NULL || dev->ops->modify_tp == NULL) + return -ENODEV; + + tpg = ubcore_find_tpg(dev, arg.in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + + tp = ubcore_find_tp_in_tpg(tpg, arg.in.tpn); + if (tp == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tp"); + goto put_device; + } + + if (ubcore_change_tp_to_err(dev, tp) != 0) { + ret = -EINVAL; + ubcore_log_err("Failed to change tp to error"); + goto put_device; + } + + ubcore_log_info("Success to change tp to error"); + +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_change_tpg_to_error(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_change_tpg_to_error arg = {0}; + struct ubcore_device *dev; + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + int ret; + uint32_t i; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_change_tpg_to_error)); + if (ret != 0) + return ret; + + /* Currently, 'netaddr' & 'type' are not necessary */ + dev = ubcore_find_tpf_device(NULL, UBCORE_TRANSPORT_UB); + if (dev == NULL || dev->ops == NULL || dev->ops->modify_tp == NULL) + return -ENODEV; + + tpg = ubcore_find_tpg(dev, arg.in.tpgn); + if (tpg == NULL) { + ret = -EINVAL; + ubcore_log_err("Failed to find tpg"); + goto put_device; + } + + for (i = 0; i < tpg->tpg_cfg.tp_cnt; i++) { + tp = tpg->tp_list[i]; + if (tp == NULL) { + ubcore_log_warn("tp in tpg %u is NULL", arg.in.tpgn); + continue; + } + + if (ubcore_change_tp_to_err(dev, tp) != 0) + ubcore_log_warn("Failed to change tp to error in tpg %u", arg.in.tpgn); + } + + ubcore_log_info("Success to change tpg to error"); + +put_device: + ubcore_put_device(dev); + return ret; +} + +static int ubcore_cmd_config_function_migrate_state(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_config_function_migrate_state arg = {0}; + struct ubcore_device *dev; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_config_function_migrate_state)); + if (ret != 0) + return ret; + + dev = ubcore_find_tpf_device(&arg.in.tpf.netaddr, arg.in.tpf.trans_type); + if (dev == NULL || dev->ops == NULL || dev->ops->config_function_migrate_state == NULL) { + ret = -ENODEV; + ubcore_log_err("fail to find tpf device"); + return ret; + } + + arg.out.cnt = (uint32_t)ubcore_config_function_migrate_state(dev, arg.in.fe_idx, + arg.in.config_cnt, &arg.in.config[0], arg.in.state); + + if (arg.out.cnt != arg.in.config_cnt) + ret = -EPERM; + + /* do not modify ret if copy success */ + if (ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_config_function_migrate_state)) != 0) + ret = -EPERM; + + ubcore_put_device(dev); + return ret; +} + +static int ubcore_init_modify_vtp(struct ubcore_device *dev, struct ubcore_cmd_vtp_cfg *vtp, + struct ubcore_vtp_param *vtp_param, struct ubcore_vtp_attr *vattr, + union ubcore_vtp_attr_mask *vattr_mask) +{ + vtp_param->trans_mode = vtp->trans_mode; + vtp_param->local_eid = vtp->local_eid; + vtp_param->peer_eid = vtp->peer_eid; + vtp_param->local_jetty = vtp->local_jetty; + vtp_param->peer_jetty = vtp->peer_jetty; + + if (vtp_param->trans_mode != UBCORE_TP_UM) { + vattr->tp.tpg = ubcore_find_tpg(dev, vtp->tpgn); + if (vattr->tp.tpg == NULL) { + ubcore_log_err("fail to find tpg"); + return -EPERM; + } + } else { + vattr->tp.utp = ubcore_find_utp(dev, vtp->utpn); + if (vattr->tp.utp == NULL) { + ubcore_log_err("fail to find utp"); + return -EPERM; + } + } + vattr_mask->bs.tp = 1; + + return 0; +} + +static int ubcore_cmd_modify_vtp(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_modify_vtp *arg; + struct ubcore_device *dev; + struct ubcore_vtp_param vtp_param = {0}; + struct ubcore_vtp_attr vattr = {0}; + union ubcore_vtp_attr_mask vattr_mask = {0}; + int ret; + uint32_t i; + + arg = kzalloc(sizeof(struct ubcore_cmd_modify_vtp), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = ubcore_copy_from_user(arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_modify_vtp)); + if (ret != 0) + goto free_arg; + + dev = ubcore_find_tpf_device(&arg->in.tpf.netaddr, arg->in.tpf.trans_type); + if (dev == NULL || dev->ops == NULL || dev->ops->modify_vtp == NULL) { + ret = -ENODEV; + ubcore_log_err("fail to find tpf device"); + goto free_arg; + } + + for (i = 0; i < arg->in.cfg_cnt; i++) { + ret = ubcore_init_modify_vtp(dev, &arg->in.vtp[i], &vtp_param, + &vattr, &vattr_mask); + if (ret < 0) { + ubcore_log_err("fail to init modify vtp"); + goto put_device; + } + + ret = ubcore_modify_vtp(dev, &vtp_param, &vattr, &vattr_mask); + if (ret < 0) { + ubcore_log_err("fail to modify vtp"); + goto put_device; + } + } + +put_device: + ubcore_put_device(dev); +free_arg: + kfree(arg); + return ret; +} + +typedef int (*ubcore_uvs_cmd_handler)(struct ubcore_cmd_hdr *hdr); + +static ubcore_uvs_cmd_handler g_ubcore_uvs_cmd_handlers[] = { + [0] = NULL, + [UBCORE_CMD_CHANNEL_INIT] = ubcore_uvs_cmd_channel_init, + [UBCORE_CMD_CREATE_TPG] = ubcore_cmd_create_tpg, + [UBCORE_CMD_CREATE_VTP] = ubcore_cmd_create_vtp, + [UBCORE_CMD_MODIFY_TPG] = ubcore_cmd_modify_tpg, + [UBCORE_CMD_CREATE_TARGET_TPG] = ubcore_cmd_create_target_tpg, + [UBCORE_CMD_MODIFY_TARGET_TPG] = ubcore_cmd_modify_target_tpg, + [UBCORE_CMD_DESTROY_VTP] = ubcore_cmd_destroy_vtp, + [UBCORE_CMD_DESTROY_TPG] = ubcore_cmd_destroy_tpg, + [UBCORE_CMD_ADD_SIP] = ubcore_cmd_opt_sip, + [UBCORE_CMD_DEL_SIP] = ubcore_cmd_opt_sip, + [UBCORE_CMD_ALLOC_EID] = ubcore_cmd_opt_update_eid, + [UBCORE_CMD_DEALLOC_EID] = ubcore_cmd_opt_update_eid, + [UBCORE_CMD_MAP_VTP] = ubcore_cmd_map_vtp, + [UBCORE_CMD_CREATE_UTP] = ubcore_cmd_create_utp, + [UBCORE_CMD_DESTROY_UTP] = ubcore_cmd_destroy_utp, + [UBCORE_CMD_RESTORE_TP_ERROR_RSP] = ubcore_cmd_restore_tp_error_rsp, + [UBCORE_CMD_RESTORE_TARGET_TP_ERROR_REQ] = ubcore_cmd_restore_target_tp_error_req, + [UBCORE_CMD_RESTORE_TARGET_TP_ERROR_ACK] = ubcore_cmd_restore_target_tp_error_ack, + [UBCORE_CMD_RESTORE_TP_SUSPEND] = ubcore_cmd_restore_tp_suspend, + [UBCORE_CMD_GET_DEV_FEATURE] = ubcore_cmd_get_dev_feature, + [UBCORE_CMD_CHANGE_TP_TO_ERROR] = ubcore_cmd_change_tp_to_error, + [UBCORE_CMD_SET_UPI] = ubcore_cmd_set_upi, + [UBCORE_CMD_SHOW_UPI] = ubcore_cmd_show_upi, + [UBCORE_CMD_SET_GLOBAL_CFG] = ubcore_cmd_set_global_cfg, + [UBCORE_CMD_CONFIG_FUNCTION_MIGRATE_STATE] = ubcore_cmd_config_function_migrate_state, + [UBCORE_CMD_SET_VPORT_CFG] = ubcore_cmd_set_vport_cfg, + [UBCORE_CMD_MODIFY_VTP] = ubcore_cmd_modify_vtp, + [UBCORE_CMD_GET_DEV_INFO] = ubcore_cmd_get_dev_info, + [UBCORE_CMD_CREATE_CTP] = ubcore_cmd_create_ctp, + [UBCORE_CMD_DESTROY_CTP] = ubcore_cmd_destroy_ctp, + [UBCORE_CMD_CHANGE_TPG_TO_ERROR] = ubcore_cmd_change_tpg_to_error, +}; + +int ubcore_uvs_cmd_parse(struct ubcore_cmd_hdr *hdr) +{ + if (hdr->command < UBCORE_CMD_CHANNEL_INIT || hdr->command >= UBCORE_CMD_LAST || + g_ubcore_uvs_cmd_handlers[hdr->command] == NULL) { + ubcore_log_err("bad ubcore command: %d.\n", (int)hdr->command); + return -EINVAL; + } + return g_ubcore_uvs_cmd_handlers[hdr->command](hdr); +} diff --git a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h new file mode 100644 index 000000000000..feb5f348e1f2 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore uvs cmd header file + * Author: Ji Lei + * Create: 2023-07-03 + * Note: + * History: 2023-07-03: Create file + */ + +#ifndef UBCORE_UVS_CMD_H +#define UBCORE_UVS_CMD_H + +#include +#include + +#include "ubcore_cmd.h" +#include "ubcore_log.h" +#include +#include "ubcore_priv.h" +#include "ubcore_vtp.h" + +#define UBCORE_UVS_CMD_MAGIC 'V' +#define UBCORE_UVS_CMD _IOWR(UBCORE_UVS_CMD_MAGIC, 1, struct ubcore_cmd_hdr) +#define UBCORE_CMD_CHANNEL_INIT_SIZE 32 +#define UBCORE_UVS_CMD_DEV_MAX 64 +#define UBCORE_MAX_VTP_CFG_CNT 32 +#define UBCORE_MAX_EID_CONFIG_CNT 32 + +/* only for uvs control ubcore device ioctl */ +enum ubcore_uvs_cmd { + UBCORE_CMD_CHANNEL_INIT = 1, + UBCORE_CMD_CREATE_TPG, /* initiator */ + UBCORE_CMD_CREATE_VTP, /* initiator */ + UBCORE_CMD_MODIFY_TPG, + UBCORE_CMD_CREATE_TARGET_TPG, /* target */ + UBCORE_CMD_MODIFY_TARGET_TPG, /* target */ + UBCORE_CMD_DESTROY_VTP, /* initiator or target */ + UBCORE_CMD_DESTROY_TPG, /* initiator or target */ + UBCORE_CMD_ADD_SIP, + UBCORE_CMD_DEL_SIP, + UBCORE_CMD_ALLOC_EID, + UBCORE_CMD_DEALLOC_EID, + UBCORE_CMD_MAP_VTP, + UBCORE_CMD_CREATE_UTP, + UBCORE_CMD_DESTROY_UTP, + UBCORE_CMD_GET_DEV_FEATURE, + UBCORE_CMD_RESTORE_TP_ERROR_RSP, + UBCORE_CMD_RESTORE_TARGET_TP_ERROR_REQ, + UBCORE_CMD_RESTORE_TARGET_TP_ERROR_ACK, + UBCORE_CMD_RESTORE_TP_SUSPEND, + UBCORE_CMD_CHANGE_TP_TO_ERROR, + UBCORE_CMD_SET_UPI, + UBCORE_CMD_SHOW_UPI, + UBCORE_CMD_SET_GLOBAL_CFG, + UBCORE_CMD_CONFIG_FUNCTION_MIGRATE_STATE, + UBCORE_CMD_SET_VPORT_CFG, + UBCORE_CMD_MODIFY_VTP, + UBCORE_CMD_GET_DEV_INFO, + UBCORE_CMD_CREATE_CTP, + UBCORE_CMD_DESTROY_CTP, + UBCORE_CMD_CHANGE_TPG_TO_ERROR, + UBCORE_CMD_LAST +}; + +struct ubcore_cmd_channel_init { + struct { + char userspace_in[UBCORE_CMD_CHANNEL_INIT_SIZE]; + } in; + struct { + char kernel_out[UBCORE_CMD_CHANNEL_INIT_SIZE]; + } out; +}; + +struct ubcore_cmd_tpf { + enum ubcore_transport_type trans_type; + struct ubcore_net_addr netaddr; +}; + +struct ubcore_cmd_tp_cfg { + union ubcore_tp_cfg_flag flag; /* flag of initial tp */ + /* transaction layer attributes */ + union { + union ubcore_eid local_eid; + struct ubcore_jetty_id local_jetty; + }; + uint16_t fe_idx; + union { + union ubcore_eid peer_eid; + struct ubcore_jetty_id peer_jetty; + }; + /* tranport layer attributes */ + enum ubcore_transport_mode trans_mode; + uint8_t retry_num; + uint8_t retry_factor; /* for calculate the time slot to retry */ + uint8_t ack_timeout; + uint8_t dscp; /* priority */ + uint32_t oor_cnt; /* OOR window size: by packet */ +}; + +struct ubcore_udrv_ext { + uint64_t in_addr; + uint32_t in_len; + uint64_t out_addr; + uint32_t out_len; +}; + +/* create tpg and all tp in the tpg */ +struct ubcore_cmd_create_tpg { + struct { + struct ubcore_cmd_tpf tpf; + struct ubcore_tpg_cfg tpg_cfg; + struct ubcore_cmd_tp_cfg tp_cfg[UBCORE_MAX_TP_CNT_IN_GRP]; + } in; + struct { + uint32_t tpgn; + uint32_t tpn[UBCORE_MAX_TP_CNT_IN_GRP]; + } out; + /* for alpha */ + struct ubcore_ta_data ta_data; + enum ubcore_mtu local_mtu; + struct ubcore_udrv_priv udata; + struct ubcore_udrv_ext udrv_ext; +}; + +/* modify tps in the tp list of tpg to RTR, RTS, and then map vtpn to tpg */ +struct ubcore_cmd_create_vtp { + struct { + struct ubcore_cmd_tpf tpf; + /* modify tp to RTR */ + uint32_t tpgn; + struct ubcore_tp_attr rtr_attr[UBCORE_MAX_TP_CNT_IN_GRP]; + union ubcore_tp_attr_mask rtr_mask[UBCORE_MAX_TP_CNT_IN_GRP]; + /* modify tp to RTS */ + /* create vtp */ + struct ubcore_cmd_vtp_cfg vtp; + } in; + struct { + uint32_t rtr_tp_cnt; + uint32_t rts_tp_cnt; + uint32_t vtpn; + } out; +}; + +/* modify tps in the tp list of tpg to RTR, RTS */ +struct ubcore_cmd_modify_tpg { + struct { + struct ubcore_cmd_tpf tpf; + /* modify tp to RTR */ + uint32_t tpgn; + struct ubcore_tp_attr rtr_attr[UBCORE_MAX_TP_CNT_IN_GRP]; + union ubcore_tp_attr_mask rtr_mask[UBCORE_MAX_TP_CNT_IN_GRP]; + /* modify tp to RTS */ + } in; + struct { + uint32_t rtr_tp_cnt; + uint32_t rts_tp_cnt; + } out; + /* for alpha */ + struct ubcore_ta_data ta_data; + struct ubcore_udrv_ext udrv_ext; +}; + +/* create tpg, create and modify tps in it to RTR at target */ +struct ubcore_cmd_create_target_tpg { + struct { + struct ubcore_cmd_tpf tpf; + /* create tpg and the tps in the tpg */ + struct ubcore_tpg_cfg tpg_cfg; + struct ubcore_cmd_tp_cfg tp_cfg[UBCORE_MAX_TP_CNT_IN_GRP]; + /* modify tp to RTR */ + struct ubcore_tp_attr rtr_attr[UBCORE_MAX_TP_CNT_IN_GRP]; + union ubcore_tp_attr_mask rtr_mask[UBCORE_MAX_TP_CNT_IN_GRP]; + } in; + struct { + uint32_t tpgn; + uint32_t tpn[UBCORE_MAX_TP_CNT_IN_GRP]; + } out; + /* for alpha */ + struct ubcore_ta_data ta_data; + enum ubcore_mtu local_mtu; + enum ubcore_mtu peer_mtu; + struct ubcore_udrv_priv udata; + struct ubcore_udrv_ext udrv_ext; +}; + +/* modify tps in the tpg to RTS at target */ +struct ubcore_cmd_modify_target_tpg { + struct { + struct ubcore_cmd_tpf tpf; + uint32_t tpgn; + } in; + struct { + uint32_t rts_tp_cnt; + } out; + /* for alpha */ + struct ubcore_ta_data ta_data; +}; + +struct ubcore_cmd_destroy_vtp { + struct { + struct ubcore_cmd_tpf tpf; + enum ubcore_transport_mode mode; + uint32_t local_jetty; + /* key start */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t peer_jetty; + /* key end */ + } in; +}; + +/* modify to error, reset, destroy tps in the tp_list of tpg, then destroy tpg */ +struct ubcore_cmd_destroy_tpg { + struct { + struct ubcore_cmd_tpf tpf; + uint32_t tpgn; + } in; + struct { + uint32_t destroyed_tp_cnt; /* the first "destroyed_tp_cnt" tps are destroyed */ + } out; + /* for alpha */ + struct ubcore_ta_data ta_data; +}; + +struct ubcore_cmd_opt_sip { + struct { + struct ubcore_sip_info info; + } in; +}; + +struct ubcore_cmd_map_vtp { + struct { + struct ubcore_cmd_tpf tpf; + /* create vtp */ + struct ubcore_cmd_vtp_cfg vtp; + } in; + struct { + uint32_t vtpn; + } out; +}; + +/* create utp */ +struct ubcore_cmd_create_utp { + struct { + struct ubcore_cmd_tpf tpf; + struct ubcore_utp_cfg utp_cfg; + struct ubcore_cmd_vtp_cfg vtp; + /* todonext: add user data */ + } in; + struct { + uint32_t idx; + uint32_t vtpn; + } out; +}; + +/* destroy utp */ +struct ubcore_cmd_destroy_utp { + struct { + struct ubcore_cmd_tpf tpf; + uint32_t utp_idx; + /* todonext: add user data and ext */ + } in; +}; + +/* create ctp */ +struct ubcore_cmd_create_ctp { + struct { + struct ubcore_cmd_tpf tpf; + struct ubcore_ctp_cfg ctp_cfg; + struct ubcore_cmd_vtp_cfg vtp; + /* todonext: add user data */ + } in; + struct { + uint32_t idx; + uint32_t vtpn; + } out; +}; + +/* destroy ctp */ +struct ubcore_cmd_destroy_ctp { + struct { + struct ubcore_cmd_tpf tpf; + uint32_t ctp_idx; + /* todonext: add user data and ext */ + } in; +}; + +/* modify vtp */ +struct ubcore_cmd_modify_vtp { + struct { + struct ubcore_cmd_tpf tpf; + struct ubcore_cmd_vtp_cfg vtp[UBCORE_MAX_VTP_CFG_CNT]; + uint32_t cfg_cnt; + } in; +}; + +/* restore tp error */ +struct ubcore_cmd_restore_tp_error { + struct { + uint32_t tpgn; + uint32_t tpn; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint32_t rx_psn; + uint32_t tx_psn; + } in; +}; + +/* restore tp suspend */ +struct ubcore_cmd_restore_tp_suspend { + struct { + uint32_t tpgn; + uint32_t tpn; + uint16_t data_udp_start; + uint16_t ack_udp_start; + } in; +}; + +/* get sr feature */ +struct ubcore_cmd_get_dev_feature { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + } in; + struct { + union ubcore_device_feat feature; + } out; +}; + +/* change tp to error */ +struct ubcore_cmd_change_tp_to_error { + struct { + uint32_t tpgn; + uint32_t tpn; + } in; +}; + +struct ubcore_cmd_opt_eid { + struct { + char dev_name[UBCORE_UVS_CMD_DEV_MAX]; + uint32_t upi; + uint16_t fe_idx; + union ubcore_eid eid; + uint32_t eid_index; + } in; +}; + +struct ubcore_cmd_set_upi { + struct { + char dev_name[UBCORE_UVS_CMD_DEV_MAX]; + uint32_t upi; + } in; +}; + +struct ubcore_cmd_show_upi { + struct { + char dev_name[UBCORE_UVS_CMD_DEV_MAX]; + } in; + struct { + uint32_t upi; + } out; +}; + +struct ubcore_cmd_get_dev_info { + struct { + struct ubcore_cmd_tpf tpf; + union ubcore_eid peer_eid; + uint32_t eid_index; + } in; + struct { + char target_tpf_name[UBCORE_UVS_CMD_DEV_MAX]; + char target_pf_name[UBCORE_UVS_CMD_DEV_MAX]; + bool port_is_active; + } out; +}; + +struct ubcore_cmd_set_global_cfg { + struct { + struct ubcore_set_global_cfg global_cfg; + } in; +}; + +struct ubcore_cmd_config_function_migrate_state { + struct { + uint16_t fe_idx; + struct ubcore_cmd_tpf tpf; + struct ubcore_ueid_cfg config[UBCORE_MAX_EID_CONFIG_CNT]; + uint32_t config_cnt; + enum ubcore_mig_state state; + } in; + struct { + uint32_t cnt; + } out; +}; + +struct ubcore_cmd_set_vport_cfg { + struct { + struct ubcore_set_vport_cfg vport_cfg; + } in; +}; + +struct ubcore_cmd_change_tpg_to_error { + struct { + uint32_t tpgn; + struct ubcore_cmd_tpf tpf; + } in; +}; + +int ubcore_uvs_cmd_parse(struct ubcore_cmd_hdr *hdr); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_vtp.c b/drivers/ub/urma/ubcore/ubcore_vtp.c new file mode 100644 index 000000000000..7159569116dd --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_vtp.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore vtp implementation + * Author: Yan Fangfang + * Create: 2023-07-14 + * Note: + * History: 2023-07-14: Create file + */ + +#include +#include "ubcore_msg.h" +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_priv.h" +#include +#include "ubcore_netdev.h" +#include "ubcore_vtp.h" + +static int ubcore_handle_create_vtp_resp(struct ubcore_device *dev, + struct ubcore_msg *msg, void *user_arg) +{ + struct ubcore_create_vtp_resp *resp = (struct ubcore_create_vtp_resp *)msg->data; + struct ubcore_vtpn *vtpn = (struct ubcore_vtpn *)user_arg; + + if (resp->ret == UBCORE_MSG_RESP_FAIL) { + ubcore_log_err("failed to create vtp: response error"); + return -1; + } else if (resp->ret == UBCORE_MSG_RESP_IN_PROGRESS) { + ubcore_log_err("failed: try to create vtp which is being created. Try again later"); + return -1; + } else if (resp->ret == UBCORE_MSG_RESP_RC_JETTY_ALREADY_BIND) { + ubcore_log_err("failed: rc jetty already bind by other jetty"); + return -1; + } + /* tpf may return a new vtpn */ + vtpn->vtpn = resp->vtpn; + atomic_set(&vtpn->state, (int)UBCORE_VTPS_READY); + return 0; +} + +static int ubcore_send_create_vtp_req(struct ubcore_device *dev, + struct ubcore_vtp_param *p, struct ubcore_vtpn *vtpn) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_create_vtp_req); + struct ubcore_create_vtp_req *create; + struct ubcore_msg *req; + struct ubcore_resp_cb cb; + int ret; + + req = kzalloc(sizeof(struct ubcore_msg) + data_len, GFP_KERNEL); + req->hdr.type = UBCORE_MSG_TYPE_FE2TPF; + req->hdr.opcode = UBCORE_MSG_CREATE_VTP; + req->hdr.len = data_len; + + create = (struct ubcore_create_vtp_req *)req->data; + create->vtpn = vtpn->vtpn; + create->trans_mode = p->trans_mode; + create->local_eid = p->local_eid; + create->peer_eid = p->peer_eid; + create->eid_index = p->eid_index; + create->local_jetty = p->local_jetty; + create->peer_jetty = p->peer_jetty; + (void)strcpy(create->dev_name, dev->dev_name); + create->virtualization = dev->attr.virtualization; + + cb.callback = ubcore_handle_create_vtp_resp; + cb.user_arg = vtpn; + ret = ubcore_send_fe2tpf_msg(dev, req, p->wait, &cb); + /* do not free req here */ + return ret; +} + +static int ubcore_handle_del_vtp_resp(struct ubcore_device *dev, + struct ubcore_msg *msg, void *user_arg) +{ + struct ubcore_destroy_vtp_resp *resp = (struct ubcore_destroy_vtp_resp *)msg->data; + struct ubcore_vtpn *vtpn = (struct ubcore_vtpn *)user_arg; + + if (resp->ret == UBCORE_MSG_RESP_FAIL) { + ubcore_log_err("failed to destroy vtp: response error"); + return -1; + } else if (resp->ret == UBCORE_MSG_RESP_IN_PROGRESS) { + ubcore_log_err("failed: try to del vtp which is being created. Try again later"); + return -1; + } + atomic_set(&vtpn->state, (int)UBCORE_VTPS_DELETED); + return 0; +} + +static int ubcore_send_del_vtp_req(struct ubcore_vtpn *vtpn) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_create_vtp_req); + struct ubcore_create_vtp_req *destroy; + struct ubcore_msg *req; + struct ubcore_resp_cb cb; + int ret; + + req = kzalloc(sizeof(struct ubcore_msg) + data_len, GFP_KERNEL); + req->hdr.type = UBCORE_MSG_TYPE_FE2TPF; + req->hdr.opcode = UBCORE_MSG_DESTROY_VTP; + req->hdr.len = data_len; + + destroy = (struct ubcore_create_vtp_req *)req->data; + destroy->vtpn = vtpn->vtpn; + destroy->trans_mode = vtpn->trans_mode; + destroy->local_eid = vtpn->local_eid; + destroy->peer_eid = vtpn->peer_eid; + destroy->eid_index = vtpn->eid_index; + destroy->local_jetty = vtpn->local_jetty; + destroy->peer_jetty = vtpn->peer_jetty; + (void)memcpy(destroy->dev_name, vtpn->ub_dev->dev_name, UBCORE_MAX_DEV_NAME); + destroy->virtualization = vtpn->ub_dev->attr.virtualization; + + cb.callback = ubcore_handle_del_vtp_resp; + cb.user_arg = vtpn; + ret = ubcore_send_fe2tpf_msg(vtpn->ub_dev, req, true, &cb); + /* do not free req here */ + return ret; +} + +static struct ubcore_vtpn *ubcore_alloc_vtpn(struct ubcore_device *dev, + struct ubcore_vtp_param *param) +{ + struct ubcore_vtpn *vtpn; + + if (dev->ops == NULL || dev->ops->alloc_vtpn == NULL) + return NULL; + + vtpn = dev->ops->alloc_vtpn(dev); + vtpn->ub_dev = dev; + atomic_set(&vtpn->use_cnt, 1); + atomic_set(&vtpn->state, (int)UBCORE_VTPS_CREATING); + vtpn->trans_mode = param->trans_mode; + vtpn->local_eid = param->local_eid; + vtpn->peer_eid = param->peer_eid; + vtpn->eid_index = param->eid_index; + vtpn->local_jetty = param->local_jetty; + vtpn->peer_jetty = param->peer_jetty; + return vtpn; +} + +static int ubcore_free_vtpn(struct ubcore_vtpn *vtpn) +{ + struct ubcore_device *dev = vtpn->ub_dev; + + if (dev == NULL || dev->ops == NULL || dev->ops->free_vtpn == NULL) { + ubcore_log_err("dev == NULL || dev->ops == NULL || dev->ops->free_vtpn == NULL"); + return -EINVAL; + } + + if (atomic_dec_return(&vtpn->use_cnt) > 0) { + ubcore_log_info("vtpn in use, vtpn id = %u, vtpn use_cnt = %d", + vtpn->vtpn, atomic_read(&vtpn->use_cnt)); + return 0; + } + atomic_set(&vtpn->state, (int)UBCORE_VTPS_DELETING); + + return dev->ops->free_vtpn(vtpn); +} + +static int ubcore_find_add_vtpn(struct ubcore_device *dev, + struct ubcore_vtpn *new_vtpn, struct ubcore_vtpn **exist_vtpn) +{ + struct ubcore_hash_table *ht; + + ht = &dev->ht[UBCORE_HT_VTPN]; + if (ht->head == NULL) { + ubcore_log_err("hash table's head equals NULL"); + return -EINVAL; + } + + *exist_vtpn = ubcore_hash_table_lookup_nolock(ht, + ubcore_get_vtpn_hash(&new_vtpn->local_eid), &new_vtpn->local_eid); + if (*exist_vtpn != NULL) + return -EEXIST; + + ubcore_hash_table_add_nolock(ht, &new_vtpn->hnode, + ubcore_get_vtpn_hash(&new_vtpn->local_eid)); + + return 0; +} + +static struct ubcore_vtpn *ubcore_connect_rm_vtp(struct ubcore_device *dev, + struct ubcore_vtp_param *param) +{ + struct ubcore_vtpn *exist_vtpn; + struct ubcore_hash_table *ht; + struct ubcore_vtpn *new_vtpn; + struct ubcore_vtpn *old_vtpn; + int ret; + + ht = &dev->ht[UBCORE_HT_VTPN]; + /* reuse */ + + spin_lock(&ht->lock); + old_vtpn = ubcore_hash_table_lookup_nolock(ht, ubcore_get_vtpn_hash(¶m->local_eid), + ¶m->local_eid); + if (old_vtpn != NULL && atomic_read(&old_vtpn->state) == (int)UBCORE_VTPS_READY) { + atomic_inc(&old_vtpn->use_cnt); + ubcore_log_info("reuse vtpn, with vtpn id = %u, use cnt = %d", + old_vtpn->vtpn, atomic_read(&old_vtpn->use_cnt)); + spin_unlock(&ht->lock); + return old_vtpn; + } else if (old_vtpn != NULL && atomic_read(&old_vtpn->state) == (int)UBCORE_VTPS_CREATING) { + ubcore_log_info("vtpn is already in the list but its creation hasn't completed yet"); + spin_unlock(&ht->lock); + return NULL; + } + + new_vtpn = ubcore_alloc_vtpn(dev, param); + if (new_vtpn == NULL) { + ubcore_log_err("failed to alloc vtpn!"); + spin_unlock(&ht->lock); + return NULL; + } + + /* Conncurrency: only one thread can add vtpn to table successfully */ + ret = ubcore_find_add_vtpn(dev, new_vtpn, &exist_vtpn); + if (ret == -EINVAL) { + (void)ubcore_free_vtpn(new_vtpn); + new_vtpn = NULL; + spin_unlock(&ht->lock); + return NULL; + } else if (ret == -EEXIST) { + (void)ubcore_free_vtpn(new_vtpn); + new_vtpn = NULL; + + if (atomic_read(&exist_vtpn->state) == (int)UBCORE_VTPS_READY) { + atomic_inc(&exist_vtpn->use_cnt); + ubcore_log_info("success to reuse the vtpn, it is ready"); + } else { + exist_vtpn = NULL; + ubcore_log_err("failed to reuse the vtpn, it is not ready"); + } + + spin_unlock(&ht->lock); + return exist_vtpn; + } + /* TODO: port_idx use 0, for now tp_cnt = 1 */ + if (ubcore_check_port_state(dev, 0) != 0 || + ubcore_send_create_vtp_req(dev, param, new_vtpn) != 0) { + ubcore_hash_table_remove_nolock(ht, &new_vtpn->hnode); + (void)ubcore_free_vtpn(new_vtpn); + ubcore_log_err("failed to send create vtp req"); + spin_unlock(&ht->lock); + return NULL; + } + + spin_unlock(&ht->lock); + return new_vtpn; +} + +struct ubcore_vtpn *ubcore_connect_vtp(struct ubcore_device *dev, + struct ubcore_vtp_param *param) +{ + struct ubcore_vtpn *vtpn; + + if (dev == NULL || param == NULL) { + ubcore_log_err("Invalid param"); + return NULL; + } + + if (param->trans_mode == UBCORE_TP_RM) + return ubcore_connect_rm_vtp(dev, param); + + vtpn = ubcore_alloc_vtpn(dev, param); + if (vtpn == NULL) + return NULL; + + if (ubcore_check_port_state(dev, 0) != 0 || ubcore_send_create_vtp_req(dev, param, vtpn)) { + (void)ubcore_free_vtpn(vtpn); + ubcore_log_err("failed to send create vtp req"); + return NULL; + } + return vtpn; +} + +static int ubcore_disconnect_rm_vtp(struct ubcore_vtpn *vtpn) +{ + struct ubcore_device *dev = vtpn->ub_dev; + int ret; + + if (vtpn->ub_dev == NULL) + return -EINVAL; + + if (atomic_dec_return(&vtpn->use_cnt) > 0) { + ubcore_log_info("vtpn in use, vtpn id = %u, vtpn use_cnt = %d", + vtpn->vtpn, atomic_read(&vtpn->use_cnt)); + return 0; + } + + ret = ubcore_send_del_vtp_req(vtpn); + if (ret != 0) { + atomic_inc(&vtpn->use_cnt); + ubcore_log_err("failed to send del vtp req"); + return ret; + } + + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_VTPN], &vtpn->hnode); + + ret = ubcore_free_vtpn(vtpn); + if (ret != 0) { + ubcore_hash_table_add(&vtpn->ub_dev->ht[UBCORE_HT_VTPN], &vtpn->hnode, + ubcore_get_vtpn_hash(&vtpn->local_eid)); + ubcore_log_err("failed to free vtp"); + /* TODO roll back, start connect_vtp process again */ + return ret; + } + + return 0; +} + +int ubcore_disconnect_vtp(struct ubcore_vtpn *vtpn) +{ + int ret; + + if (vtpn == NULL) { + ubcore_log_err("vtp has been deleted\n"); + return -1; + } + + if (vtpn->trans_mode == UBCORE_TP_RM) + return ubcore_disconnect_rm_vtp(vtpn); + + ret = ubcore_send_del_vtp_req(vtpn); + if (ret != 0) { + ubcore_log_err("failed to send del vtp req"); + return ret; + } + + ret = ubcore_free_vtpn(vtpn); + if (ret != 0) { + ubcore_log_err("failed to free vtp"); + return ret; + } + + return 0; +} + +static int ubcore_find_add_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, struct ubcore_vtp *vtp) +{ + int ret; + + switch (mode) { + case UBCORE_TP_RM: + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_RM_VTP], &vtp->hnode, + ubcore_get_vtp_hash(&vtp->cfg.local_eid)); + break; + case UBCORE_TP_RC: + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_RC_VTP], &vtp->hnode, + ubcore_get_rc_vtp_hash(&vtp->cfg.peer_eid)); + break; + case UBCORE_TP_UM: + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_UM_VTP], &vtp->hnode, + ubcore_get_vtp_hash(&vtp->cfg.local_eid)); + break; + default: + ubcore_log_err("unknown mode"); + ret = -EINVAL; + break; + } + + return ret; +} + +struct ubcore_vtp *ubcore_map_vtp(struct ubcore_device *dev, struct ubcore_vtp_cfg *cfg) +{ + struct ubcore_vtp *vtp; + int ret; + + if (dev->ops == NULL || dev->ops->create_vtp == NULL) + return NULL; + + vtp = dev->ops->create_vtp(dev, cfg, NULL); + if (vtp == NULL) { + ubcore_log_err("Failed to create vtp"); + return NULL; + } + + vtp->ub_dev = dev; + + ret = ubcore_find_add_vtp(dev, cfg->trans_mode, vtp); + if (ret != 0) { + (void)dev->ops->destroy_vtp(vtp); + vtp = NULL; + ubcore_log_err("Failed to add vtp to the vtp table"); + return vtp; + } + + if (cfg->flag.bs.clan_tp) { + atomic_inc(&cfg->ctp->use_cnt); + } else { + if (cfg->trans_mode != UBCORE_TP_UM) + atomic_inc(&cfg->tpg->use_cnt); + else + atomic_inc(&cfg->utp->use_cnt); + } + + return vtp; +} + +static void ubcore_remove_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, struct ubcore_vtp *vtp) +{ + switch (mode) { + case UBCORE_TP_RM: + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_RM_VTP], &vtp->hnode); + break; + case UBCORE_TP_RC: + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_RC_VTP], &vtp->hnode); + break; + case UBCORE_TP_UM: + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_UM_VTP], &vtp->hnode); + break; + default: + ubcore_log_err("unknown mode"); + break; + } +} + +int ubcore_unmap_vtp(struct ubcore_vtp *vtp) +{ + struct ubcore_device *dev = vtp->ub_dev; + struct ubcore_vtp_cfg cfg; + int ret = 0; + + if (vtp == NULL || dev == NULL || dev->ops == NULL || dev->ops->destroy_vtp == NULL) + return -EINVAL; + + cfg = vtp->cfg; + + ubcore_remove_vtp(dev, cfg.trans_mode, vtp); + + ret = dev->ops->destroy_vtp(vtp); + if (ret != 0) { + (void)ubcore_find_add_vtp(dev, cfg.trans_mode, vtp); + ubcore_log_err("Failed to destroy vtp"); + return ret; + } + + if (cfg.flag.bs.clan_tp) { + atomic_dec(&cfg.ctp->use_cnt); + } else { + if (cfg.trans_mode != UBCORE_TP_UM) + atomic_dec(&cfg.tpg->use_cnt); + else + atomic_dec(&cfg.utp->use_cnt); + } + + return ret; +} + +struct ubcore_vtp *ubcore_find_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, union ubcore_eid *local_eid, union ubcore_eid *peer_eid) +{ + struct ubcore_vtp *vtp_entry; + + switch (mode) { + case UBCORE_TP_RM: + vtp_entry = ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_RM_VTP], + ubcore_get_vtp_hash(local_eid), local_eid); + break; + case UBCORE_TP_RC: + vtp_entry = ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_RC_VTP], + ubcore_get_rc_vtp_hash(peer_eid), peer_eid); + break; + case UBCORE_TP_UM: + vtp_entry = ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_UM_VTP], + ubcore_get_vtp_hash(local_eid), local_eid); + break; + default: + ubcore_log_err("unknown mode"); + vtp_entry = NULL; + } + return vtp_entry; +} + +void ubcore_set_vtp_param(struct ubcore_device *dev, struct ubcore_jetty *jetty, + struct ubcore_tjetty_cfg *cfg, struct ubcore_vtp_param *vtp_param) +{ + vtp_param->trans_mode = cfg->trans_mode; + /* + * RM/UM VTP for userspace app: get local eid from ucontext + * RM/UM VTP for kernel app: how to get local eid ? + * RC VTP: get eid from jetty + */ + vtp_param->local_eid = dev->eid_table.eid_entries[cfg->eid_index].eid; + vtp_param->peer_eid = cfg->id.eid; + if (jetty != NULL) + vtp_param->local_jetty = jetty->id; + else + vtp_param->local_jetty = 0; + + vtp_param->peer_jetty = cfg->id.id; + vtp_param->eid_index = cfg->eid_index; + vtp_param->wait = true; +} + +int ubcore_config_function_migrate_state(struct ubcore_device *dev, uint16_t fe_idx, + uint32_t cnt, struct ubcore_ueid_cfg *cfg, enum ubcore_mig_state state) +{ + int ret; + + if (cfg == NULL) { + ret = -EINVAL; + ubcore_log_err("ubcore ueid cfg is null"); + return ret; + } + + ret = dev->ops->config_function_migrate_state(dev, fe_idx, cnt, cfg, state); + if (ret < 0) + ubcore_log_err("Fail to config function migrate state"); + + return ret; +} + +int ubcore_modify_vtp(struct ubcore_device *dev, struct ubcore_vtp_param *vtp_param, + struct ubcore_vtp_attr *vattr, union ubcore_vtp_attr_mask *vattr_mask) +{ + struct ubcore_vtp *vtp; + int ret; + + vtp = ubcore_find_vtp(dev, vtp_param->trans_mode, + &vtp_param->local_eid, &vtp_param->peer_eid); + if (vtp == NULL) { + ubcore_log_err("Fail to find vtp when modify vtp"); + return -EINVAL; + } + + ret = dev->ops->modify_vtp(vtp, vattr, vattr_mask); + if (ret != 0) { + ubcore_log_err("Fail to modify vtp when call ubcore ops"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcore_vtp.h b/drivers/ub/urma/ubcore/ubcore_vtp.h new file mode 100644 index 000000000000..67b6fdfff38c --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_vtp.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore vtp header + * Author: Yan Fangfang + * Create: 2023-07-14 + * Note: + * History: 2023-07-14: Create file + */ + +#ifndef UBCORE_VTP_H +#define UBCORE_VTP_H + +#include +#include "ubcore_netlink.h" +#include "ubcore_msg.h" +#include "ubcore_netlink.h" +#include "ubcore_tp.h" + +struct ubcore_vtp_param { + enum ubcore_transport_mode trans_mode; + /* RM vtpn key start */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + /* RM vtpn key end */ + uint32_t local_jetty; + uint32_t peer_jetty; + uint32_t eid_index; + bool wait; /* wait blockingly or no wait */ + /* for alpha */ + struct ubcore_ta ta; +}; + +struct ubcore_create_vtp_req { + uint32_t vtpn; + enum ubcore_transport_mode trans_mode; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t eid_index; + uint32_t local_jetty; + uint32_t peer_jetty; + char dev_name[UBCORE_MAX_DEV_NAME]; + bool virtualization; + char tpfdev_name[UBCORE_MAX_DEV_NAME]; + /* for alpha */ + struct ubcore_ta_data ta_data; + uint32_t udrv_in_len; + uint32_t udrv_out_len; + uint8_t udrv_data[0]; +}; + +struct ubcore_create_vtp_resp { + enum ubcore_msg_resp_status ret; + uint32_t vtpn; + uint32_t udrv_out_len; + uint8_t udrv_out_data[0]; +}; + +struct ubcore_destroy_vtp_resp { + enum ubcore_msg_resp_status ret; +}; + +/* map vtpn to tpg, tp, utp or ctp */ +struct ubcore_cmd_vtp_cfg { + uint16_t fe_idx; + uint32_t vtpn; + uint32_t local_jetty; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t peer_jetty; + union ubcore_vtp_cfg_flag flag; + enum ubcore_transport_mode trans_mode; + union { + uint32_t tpgn; + uint32_t tpn; + uint32_t utpn; + uint32_t ctpn; + uint32_t value; + }; +}; + +struct ubcore_migrate_vtp_req { + struct ubcore_cmd_vtp_cfg vtp_cfg; + char dev_name[UBCORE_MAX_DEV_NAME]; + enum ubcore_event_type event_type; +}; + +struct ubcore_vtpn *ubcore_connect_vtp(struct ubcore_device *dev, + struct ubcore_vtp_param *param); +int ubcore_disconnect_vtp(struct ubcore_vtpn *vtpn); +/* map vtp to tpg, utp .... */ +struct ubcore_vtp *ubcore_map_vtp(struct ubcore_device *dev, struct ubcore_vtp_cfg *cfg); +int ubcore_unmap_vtp(struct ubcore_vtp *vtp); +/* find mapped vtp */ +struct ubcore_vtp *ubcore_find_vtp(struct ubcore_device *dev, enum ubcore_transport_mode mode, + union ubcore_eid *local_eid, union ubcore_eid *peer_eid); + +void ubcore_set_vtp_param(struct ubcore_device *dev, struct ubcore_jetty *jetty, + struct ubcore_tjetty_cfg *cfg, struct ubcore_vtp_param *vtp_param); +/* config function migrate state */ +int ubcore_config_function_migrate_state(struct ubcore_device *dev, uint16_t fe_idx, + uint32_t cnt, struct ubcore_ueid_cfg *cfg, enum ubcore_mig_state state); +int ubcore_modify_vtp(struct ubcore_device *dev, struct ubcore_vtp_param *vtp_param, + struct ubcore_vtp_attr *vattr, union ubcore_vtp_attr_mask *vattr_mask); +#endif diff --git a/drivers/ub/urma/uburma/uburma_cdev_file.c b/drivers/ub/urma/uburma/uburma_cdev_file.c index 700f8cfd1cd8..72a4eb815313 100644 --- a/drivers/ub/urma/uburma/uburma_cdev_file.c +++ b/drivers/ub/urma/uburma/uburma_cdev_file.c @@ -33,24 +33,37 @@ #define UBURMA_MAX_VALUE_LEN 24 /* callback information */ -typedef ssize_t (*uburma_show_attr_cb)(const struct ubcore_device *ubc_dev, char *buf); -typedef ssize_t (*uburma_store_attr_cb)(struct ubcore_device *ubc_dev, const char *buf, size_t len); -typedef ssize_t (*uburma_show_port_attr_cb)(const struct ubcore_device *ubc_dev, char *buf, - uint8_t port_num); -typedef ssize_t (*uburma_show_vf_attr_cb)(const struct ubcore_device *ubc_dev, char *buf, - uint16_t vf_num); -typedef ssize_t (*uburma_store_vf_attr_cb)(struct ubcore_device *ubc_dev, const char *buf, - size_t len, uint16_t vf_num); - -static ssize_t uburma_show_dev_attr(struct device *dev, struct device_attribute *attr, char *buf, - uburma_show_attr_cb show_cb) -{ - struct uburma_device *ubu_dev = dev_get_drvdata(dev); +typedef ssize_t (*uburma_show_attr_cb)(struct ubcore_device *ubc_dev, + char *buf); +typedef ssize_t (*uburma_store_attr_cb)(struct ubcore_device *ubc_dev, + const char *buf, size_t len); +typedef ssize_t (*uburma_show_port_attr_cb)(struct ubcore_device *ubc_dev, + char *buf, uint8_t port_num); +typedef ssize_t (*uburma_show_fe_attr_cb)(struct ubcore_device *ubc_dev, + char *buf, uint16_t fe_num); +typedef ssize_t (*uburma_store_fe_attr_cb)(struct ubcore_device *ubc_dev, + const char *buf, size_t len, uint16_t fe_num); +typedef ssize_t (*uburma_show_eid_attr_cb)(struct ubcore_device *ubc_dev, + char *buf, uint16_t idx); +typedef ssize_t (*uburma_store_eid_attr_cb)(struct ubcore_device *ubc_dev, + const char *buf, size_t len, uint16_t idx); + +static ssize_t uburma_show_dev_attr(struct device *dev, struct device_attribute *attr, + char *buf, uburma_show_attr_cb show_cb) +{ + struct uburma_logic_device *ldev = dev_get_drvdata(dev); + struct uburma_device *ubu_dev = NULL; struct ubcore_device *ubc_dev; ssize_t ret = -ENODEV; int srcu_idx; - if (!ubu_dev || !buf) { + if (!ldev || !buf) { + uburma_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ubu_dev = ldev->ubu_dev; + if (!ubu_dev) { uburma_log_err("Invalid argument.\n"); return -EINVAL; } @@ -67,15 +80,23 @@ static ssize_t uburma_show_dev_attr(struct device *dev, struct device_attribute static ssize_t uburma_store_dev_attr(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, uburma_store_attr_cb store_cb) { - struct uburma_device *ubu_dev = dev_get_drvdata(dev); + struct uburma_logic_device *ldev = dev_get_drvdata(dev); + struct uburma_device *ubu_dev = NULL; struct ubcore_device *ubc_dev; ssize_t ret = -ENODEV; int srcu_idx; - if (!ubu_dev || !buf) { + if (!ldev || !buf) { + uburma_log_err("Invalid argument with ubcore device nullptr.\n"); + return -EINVAL; + } + + ubu_dev = ldev->ubu_dev; + if (!ubu_dev) { uburma_log_err("Invalid argument with ubcore device nullptr.\n"); return -EINVAL; } + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); if (ubc_dev) @@ -86,7 +107,7 @@ static ssize_t uburma_store_dev_attr(struct device *dev, struct device_attribute } /* interface for exporting device attributes */ -static ssize_t ubdev_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t ubdev_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBCORE_MAX_DEV_NAME, "%s\n", ubc_dev->dev_name); } @@ -98,60 +119,7 @@ static ssize_t ubdev_show(struct device *dev, struct device_attribute *attr, cha static DEVICE_ATTR_RO(ubdev); -static ssize_t eid_show_cb(const struct ubcore_device *ubc_dev, char *buf) -{ - return snprintf(buf, (UBCORE_EID_STR_LEN + 1) + 1, EID_FMT "\n", - EID_ARGS(ubc_dev->attr.eid)); -} - -static ssize_t eid_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return uburma_show_dev_attr(dev, attr, buf, eid_show_cb); -} - -static int str_to_eid(const char *buf, size_t len, union ubcore_eid *eid) -{ - char *end; - int ret; - - if (buf == NULL || eid == NULL) { - uburma_log_err("Invalid argument\n"); - return -EINVAL; - } - - ret = in6_pton(buf, (int)len, (u8 *)eid, -1, (const char **)&end); - if (ret == 0) { - uburma_log_err("format error: %s.\n", buf); - return -EINVAL; - } - return 0; -} - -static ssize_t eid_store_cb(struct ubcore_device *ubc_dev, const char *buf, size_t len) -{ - union ubcore_eid eid; - ssize_t ret; - - if (str_to_eid(buf, len, &eid) != 0) { - uburma_log_err("failed to str_to_eid: %s, %zu.\n", buf, len); - return -EINVAL; - } - - ret = ubcore_set_eid(ubc_dev, &eid); - if (ret == 0) - ret = (int)len; // len is required for success return. - return ret; -} - -static ssize_t eid_store(struct device *dev, struct device_attribute *attr, const char *buf, - size_t len) -{ - return uburma_store_dev_attr(dev, attr, buf, len, eid_store_cb); -} - -static DEVICE_ATTR_RW(eid); // 0644 - -static ssize_t guid_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t guid_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%llu\n", ubc_dev->attr.guid); } @@ -163,7 +131,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char static DEVICE_ATTR_RO(guid); -static ssize_t max_upi_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_upi_cnt_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.max_upi_cnt); } @@ -175,7 +143,7 @@ static ssize_t max_upi_cnt_show(struct device *dev, struct device_attribute *att static DEVICE_ATTR_RO(max_upi_cnt); -static ssize_t uburma_query_upi(const struct ubcore_device *ubc_dev, char *buf, uint16_t vf_id) +static ssize_t uburma_query_upi(struct ubcore_device *ubc_dev, char *buf, uint16_t fe_idx) { struct ubcore_res_key key = { 0 }; struct ubcore_res_val val = { 0 }; @@ -184,17 +152,17 @@ static ssize_t uburma_query_upi(const struct ubcore_device *ubc_dev, char *buf, ssize_t ret; key.type = UBCORE_RES_KEY_UPI; - key.key = (uint32_t)vf_id; + key.key = (uint32_t)fe_idx; - val.len = sizeof(uint32_t) * UBCORE_MAX_UPI_CNT; - val.addr = (uintptr_t)kcalloc(1, val.len, GFP_KERNEL); + val.len = (uint32_t)sizeof(uint32_t) * UBCORE_MAX_UPI_CNT; + val.addr = (uint64_t)kcalloc(1, val.len, GFP_KERNEL); if (val.addr == 0) { - uburma_log_err("kcalloc vf%u failed.\n", vf_id); + uburma_log_err("kcalloc fe%hu failed.\n", fe_idx); return -ENOMEM; } if (ubcore_query_resource(ubc_dev, &key, &val) != 0) { - uburma_log_err("query vf%u resource failed.\n", vf_id); + uburma_log_err("query fe%hu resource failed.\n", fe_idx); kfree((void *)val.addr); return -EPERM; } @@ -204,7 +172,7 @@ static ssize_t uburma_query_upi(const struct ubcore_device *ubc_dev, char *buf, upi = *((uint32_t *)val.addr + i); ret = snprintf(buf + (UBURMA_UPI_STR_LEN * i), UBURMA_UPI_STR_LEN + 1, "%8u ", upi); if (ret <= 0) { - uburma_log_err("snprintf for vf%u upi failed %zd.\n", vf_id, ret); + uburma_log_err("snprintf for fe%hu upi failed %ld.\n", fe_idx, ret); kfree((void *)val.addr); return ret; } @@ -221,14 +189,14 @@ static int uburma_parse_upi_str(const char *buf, size_t len, uint16_t *idx, uint int ret; ret = sscanf(buf, "%hu=%u", idx, upi); - if (ret <= 1) // ret must be equal to 2 + if (ret <= 1) // ret must be equal to 2 return -1; return 0; } -static ssize_t uburma_upi_store(struct ubcore_device *ubc_dev, const char *buf, size_t len, - uint16_t vf_id) +static ssize_t uburma_set_upi(struct ubcore_device *ubc_dev, const char *buf, + size_t len, uint16_t fe_idx) { ssize_t ret = -ENODEV; uint16_t idx; @@ -236,20 +204,20 @@ static ssize_t uburma_upi_store(struct ubcore_device *ubc_dev, const char *buf, ret = uburma_parse_upi_str(buf, len, &idx, &upi); if (ret != 0) { - uburma_log_err("parse vf%u upi str:%s failed %zd.\n", vf_id, buf, ret); + uburma_log_err("parse fe%hu upi str:%s failed %ld.\n", fe_idx, buf, ret); return -EINVAL; } - if (ubcore_set_upi(ubc_dev, vf_id, idx, upi) != 0) { - uburma_log_err("set vf%u idx:%u upi:%u failed.\n", vf_id, idx, upi); + if (ubcore_set_upi(ubc_dev, fe_idx, idx, upi) != 0) { + uburma_log_err("set fe%hu idx:%u upi:%u failed.\n", fe_idx, idx, upi); return -EPERM; } return (ssize_t)len; // len is required for success return. } -static ssize_t upi_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t upi_show_cb(struct ubcore_device *ubc_dev, char *buf) { - return uburma_query_upi(ubc_dev, buf, UBCORE_OWN_VF_ID); + return uburma_query_upi(ubc_dev, buf, UBCORE_OWN_FE_IDX); } static ssize_t upi_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -259,18 +227,18 @@ static ssize_t upi_show(struct device *dev, struct device_attribute *attr, char static ssize_t upi_store_cb(struct ubcore_device *ubc_dev, const char *buf, size_t len) { - return uburma_upi_store(ubc_dev, buf, len, UBCORE_OWN_VF_ID); + return uburma_set_upi(ubc_dev, buf, len, UBCORE_OWN_FE_IDX); } -static ssize_t upi_store(struct device *dev, struct device_attribute *attr, const char *buf, - size_t len) +static ssize_t upi_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) { return uburma_store_dev_attr(dev, attr, buf, len, upi_store_cb); } static DEVICE_ATTR_RW(upi); -static ssize_t feature_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t feature_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "0x%x\n", ubc_dev->attr.dev_cap.feature.value); } @@ -282,7 +250,7 @@ static ssize_t feature_show(struct device *dev, struct device_attribute *attr, c static DEVICE_ATTR_RO(feature); -static ssize_t max_jfc_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfc_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfc); } @@ -294,7 +262,7 @@ static ssize_t max_jfc_show(struct device *dev, struct device_attribute *attr, c static DEVICE_ATTR_RO(max_jfc); -static ssize_t max_jfs_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfs_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs); } @@ -306,7 +274,7 @@ static ssize_t max_jfs_show(struct device *dev, struct device_attribute *attr, c static DEVICE_ATTR_RO(max_jfs); -static ssize_t max_jfr_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfr_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfr); } @@ -318,7 +286,7 @@ static ssize_t max_jfr_show(struct device *dev, struct device_attribute *attr, c static DEVICE_ATTR_RO(max_jfr); -static ssize_t max_jetty_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jetty_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jetty); } @@ -330,7 +298,29 @@ static ssize_t max_jetty_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(max_jetty); -static ssize_t max_jfc_depth_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t show_max_jetty_grp_cb(struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jetty_grp); +} +static ssize_t max_jetty_grp_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, show_max_jetty_grp_cb); +} +static DEVICE_ATTR_RO(max_jetty_grp); + +static ssize_t show_max_jetty_in_jetty_grp_cb(struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", + ubc_dev->attr.dev_cap.max_jetty_in_jetty_grp); +} +static ssize_t max_jetty_in_jetty_grp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, show_max_jetty_in_jetty_grp_cb); +} +static DEVICE_ATTR_RO(max_jetty_in_jetty_grp); + +static ssize_t max_jfc_depth_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfc_depth); } @@ -342,7 +332,7 @@ static ssize_t max_jfc_depth_show(struct device *dev, struct device_attribute *a static DEVICE_ATTR_RO(max_jfc_depth); -static ssize_t max_jfs_depth_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfs_depth_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs_depth); } @@ -354,7 +344,7 @@ static ssize_t max_jfs_depth_show(struct device *dev, struct device_attribute *a static DEVICE_ATTR_RO(max_jfs_depth); -static ssize_t max_jfr_depth_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfr_depth_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfr_depth); } @@ -366,10 +356,10 @@ static ssize_t max_jfr_depth_show(struct device *dev, struct device_attribute *a static DEVICE_ATTR_RO(max_jfr_depth); -static ssize_t show_max_jfs_inline_size_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t show_max_jfs_inline_size_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", - ubc_dev->attr.dev_cap.max_jfs_inline_size); + ubc_dev->attr.dev_cap.max_jfs_inline_size); } static ssize_t max_jfs_inline_size_show(struct device *dev, struct device_attribute *attr, @@ -380,7 +370,7 @@ static ssize_t max_jfs_inline_size_show(struct device *dev, struct device_attrib static DEVICE_ATTR_RO(max_jfs_inline_size); -static ssize_t max_jfs_sge_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfs_sge_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs_sge); } @@ -392,7 +382,7 @@ static ssize_t max_jfs_sge_show(struct device *dev, struct device_attribute *att static DEVICE_ATTR_RO(max_jfs_sge); -static ssize_t max_jfs_rsge_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfs_rsge_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs_rsge); } @@ -404,7 +394,7 @@ static ssize_t max_jfs_rsge_show(struct device *dev, struct device_attribute *at static DEVICE_ATTR_RO(max_jfs_rsge); -static ssize_t max_jfr_sge_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_jfr_sge_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfr_sge); } @@ -416,7 +406,7 @@ static ssize_t max_jfr_sge_show(struct device *dev, struct device_attribute *att static DEVICE_ATTR_RO(max_jfr_sge); -static ssize_t max_msg_size_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t max_msg_size_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%llu\n", ubc_dev->attr.dev_cap.max_msg_size); } @@ -428,20 +418,41 @@ static ssize_t max_msg_size_show(struct device *dev, struct device_attribute *at static DEVICE_ATTR_RO(max_msg_size); -static ssize_t max_rc_outstd_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t show_max_atomic_size_cb(struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_atomic_size); +} +static ssize_t max_atomic_size_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%llu\n", - ubc_dev->attr.dev_cap.max_rc_outstd_cnt); + return uburma_show_dev_attr(dev, attr, buf, show_max_atomic_size_cb); } +static DEVICE_ATTR_RO(max_atomic_size); -static ssize_t max_rc_outstd_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t show_atomic_feat_cb(struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.atomic_feat.value); +} +static ssize_t atomic_feat_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, show_atomic_feat_cb); +} +static DEVICE_ATTR_RO(atomic_feat); + +static ssize_t max_rc_outstd_cnt_show_cb(struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%llu\n", ubc_dev->attr.dev_cap.max_rc_outstd_cnt); +} + +static ssize_t max_rc_outstd_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) { return uburma_show_dev_attr(dev, attr, buf, max_rc_outstd_cnt_show_cb); } static DEVICE_ATTR_RO(max_rc_outstd_cnt); -static ssize_t trans_mode_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t trans_mode_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.trans_mode); } @@ -453,10 +464,10 @@ static ssize_t trans_mode_show(struct device *dev, struct device_attribute *attr static DEVICE_ATTR_RO(trans_mode); -static ssize_t congestion_ctrl_alg_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t congestion_ctrl_alg_show_cb(struct ubcore_device *ubc_dev, char *buf) { - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", - ubc_dev->attr.dev_cap.congestion_ctrl_alg); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%u\n", ubc_dev->attr.dev_cap.congestion_ctrl_alg); } static ssize_t congestion_ctrl_alg_show(struct device *dev, struct device_attribute *attr, @@ -487,19 +498,19 @@ static ssize_t congestion_ctrl_alg_store(struct device *dev, struct device_attri static DEVICE_ATTR_RW(congestion_ctrl_alg); // 0644 -static ssize_t comp_vector_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t ceq_cnt_show_cb(struct ubcore_device *ubc_dev, char *buf) { - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.comp_vector_cnt); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.ceq_cnt); } -static ssize_t comp_vector_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t ceq_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { - return uburma_show_dev_attr(dev, attr, buf, comp_vector_cnt_show_cb); + return uburma_show_dev_attr(dev, attr, buf, ceq_cnt_show_cb); } -static DEVICE_ATTR_RO(comp_vector_cnt); +static DEVICE_ATTR_RO(ceq_cnt); -static ssize_t utp_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t utp_cnt_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.utp_cnt); } @@ -511,7 +522,7 @@ static ssize_t utp_cnt_show(struct device *dev, struct device_attribute *attr, c static DEVICE_ATTR_RO(utp_cnt); -static ssize_t port_count_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t port_count_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.port_cnt); } @@ -523,32 +534,50 @@ static ssize_t port_count_show(struct device *dev, struct device_attribute *attr static DEVICE_ATTR_RO(port_count); -static ssize_t virtualization_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t virtualization_show_cb(struct ubcore_device *ubc_dev, char *buf) { - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%s\n", - ubc_dev->attr.virtualization ? "true" : "false"); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%s\n", ubc_dev->attr.virtualization ? "true" : "false"); } - static ssize_t virtualization_show(struct device *dev, struct device_attribute *attr, char *buf) { return uburma_show_dev_attr(dev, attr, buf, virtualization_show_cb); } - static DEVICE_ATTR_RO(virtualization); -static ssize_t vf_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t show_fe_cnt_cb(struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN - 1, "%u\n", ubc_dev->attr.fe_cnt); +} +static ssize_t fe_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, show_fe_cnt_cb); +} +static DEVICE_ATTR_RO(fe_cnt); + +static ssize_t show_dynamic_eid_cb(struct ubcore_device *ubc_dev, char *buf) { - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.vf_cnt); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%d\n", ubc_dev->dynamic_eid); } +static ssize_t dynamic_eid_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, show_dynamic_eid_cb); +} +static DEVICE_ATTR_RO(dynamic_eid); -static ssize_t vf_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t max_eid_cnt_show_cb(struct ubcore_device *ubc_dev, char *buf) { - return uburma_show_dev_attr(dev, attr, buf, vf_cnt_show_cb); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.max_eid_cnt); } -static DEVICE_ATTR_RO(vf_cnt); +static ssize_t max_eid_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_eid_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_eid_cnt); -static ssize_t transport_type_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t transport_type_show_cb(struct ubcore_device *ubc_dev, char *buf) { return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%d\n", (int)ubc_dev->transport_type); } @@ -560,7 +589,7 @@ static ssize_t transport_type_show(struct device *dev, struct device_attribute * static DEVICE_ATTR_RO(transport_type); -static ssize_t driver_name_show_cb(const struct ubcore_device *ubc_dev, char *buf) +static ssize_t driver_name_show_cb(struct ubcore_device *ubc_dev, char *buf) { if (ubc_dev->ops == NULL) return -EINVAL; @@ -577,7 +606,6 @@ static DEVICE_ATTR_RO(driver_name); static struct attribute *uburma_dev_attrs[] = { &dev_attr_ubdev.attr, - &dev_attr_eid.attr, &dev_attr_guid.attr, &dev_attr_max_upi_cnt.attr, &dev_attr_upi.attr, @@ -586,6 +614,8 @@ static struct attribute *uburma_dev_attrs[] = { &dev_attr_max_jfs.attr, &dev_attr_max_jfr.attr, &dev_attr_max_jetty.attr, + &dev_attr_max_jetty_grp.attr, + &dev_attr_max_jetty_in_jetty_grp.attr, &dev_attr_max_jfc_depth.attr, &dev_attr_max_jfs_depth.attr, &dev_attr_max_jfr_depth.attr, @@ -594,13 +624,17 @@ static struct attribute *uburma_dev_attrs[] = { &dev_attr_max_jfs_rsge.attr, &dev_attr_max_jfr_sge.attr, &dev_attr_max_msg_size.attr, + &dev_attr_max_atomic_size.attr, + &dev_attr_atomic_feat.attr, &dev_attr_max_rc_outstd_cnt.attr, &dev_attr_trans_mode.attr, &dev_attr_congestion_ctrl_alg.attr, - &dev_attr_comp_vector_cnt.attr, + &dev_attr_ceq_cnt.attr, &dev_attr_utp_cnt.attr, &dev_attr_port_count.attr, - &dev_attr_vf_cnt.attr, + &dev_attr_fe_cnt.attr, + &dev_attr_max_eid_cnt.attr, + &dev_attr_dynamic_eid.attr, &dev_attr_virtualization.attr, &dev_attr_transport_type.attr, &dev_attr_driver_name.attr, @@ -636,10 +670,10 @@ static ssize_t uburma_show_port_attr(struct uburma_port *p, struct uburma_port_a return ret; } -static ssize_t max_mtu_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) +static ssize_t max_mtu_show_cb(struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) { - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%d\n", - (int)ubc_dev->attr.port_attr[port_num].max_mtu); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%d\n", (int)ubc_dev->attr.port_attr[port_num].max_mtu); } static ssize_t max_mtu_show(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf) @@ -649,7 +683,7 @@ static ssize_t max_mtu_show(struct uburma_port *p, struct uburma_port_attribute static PORT_ATTR_RO(max_mtu); -static ssize_t state_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) +static ssize_t state_show_cb(struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) { struct ubcore_device_status status; @@ -658,8 +692,8 @@ static ssize_t state_show_cb(const struct ubcore_device *ubc_dev, char *buf, uin return -EPERM; } - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", - (uint32_t)status.port_status[port_num].state); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%u\n", (uint32_t)status.port_status[port_num].state); } static ssize_t state_show(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf) @@ -669,7 +703,7 @@ static ssize_t state_show(struct uburma_port *p, struct uburma_port_attribute *a static PORT_ATTR_RO(state); -static ssize_t active_speed_show_cb(const struct ubcore_device *ubc_dev, char *buf, +static ssize_t active_speed_show_cb(struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) { struct ubcore_device_status status; @@ -679,8 +713,8 @@ static ssize_t active_speed_show_cb(const struct ubcore_device *ubc_dev, char *b return -EPERM; } - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", - status.port_status[port_num].active_speed); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%u\n", status.port_status[port_num].active_speed); } static ssize_t active_speed_show(struct uburma_port *p, struct uburma_port_attribute *attr, @@ -691,7 +725,7 @@ static ssize_t active_speed_show(struct uburma_port *p, struct uburma_port_attri static PORT_ATTR_RO(active_speed); -static ssize_t active_width_show_cb(const struct ubcore_device *ubc_dev, char *buf, +static ssize_t active_width_show_cb(struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) { struct ubcore_device_status status; @@ -701,8 +735,8 @@ static ssize_t active_width_show_cb(const struct ubcore_device *ubc_dev, char *b return -EPERM; } - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", - status.port_status[port_num].active_width); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%u\n", status.port_status[port_num].active_width); } static ssize_t active_width_show(struct uburma_port *p, struct uburma_port_attribute *attr, @@ -713,7 +747,7 @@ static ssize_t active_width_show(struct uburma_port *p, struct uburma_port_attri static PORT_ATTR_RO(active_width); -static ssize_t active_mtu_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) +static ssize_t active_mtu_show_cb(struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) { struct ubcore_device_status status; @@ -722,11 +756,12 @@ static ssize_t active_mtu_show_cb(const struct ubcore_device *ubc_dev, char *buf return -EPERM; } - return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", - (uint32_t)status.port_status[port_num].active_mtu); + return snprintf(buf, UBURMA_MAX_VALUE_LEN, + "%u\n", (uint32_t)status.port_status[port_num].active_mtu); } -static ssize_t active_mtu_show(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf) +static ssize_t active_mtu_show(struct uburma_port *p, struct uburma_port_attribute *attr, + char *buf) { return uburma_show_port_attr(p, attr, buf, active_mtu_show_cb); } @@ -750,8 +785,8 @@ static ssize_t uburma_port_attr_show(struct kobject *kobj, struct attribute *att return port_attr->show(p, port_attr, buf); } -static ssize_t uburma_port_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, - size_t count) +static ssize_t uburma_port_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) { struct uburma_port_attribute *port_attr = container_of(attr, struct uburma_port_attribute, attr); @@ -770,6 +805,7 @@ static void uburma_port_release(struct kobject *kobj) { } +// ATTRIBUTE_GROUPS defined in 3.11, but must be consistent with kobj_type->default_groups static const struct attribute_group uburma_port_groups = { .attrs = uburma_port_attrs, }; @@ -779,16 +815,16 @@ static struct kobj_type uburma_port_type = { .release = uburma_port_release, .default_attrs = uburma_port_attrs }; -static ssize_t uburma_show_vf_attr(struct uburma_vf *vf, struct uburma_vf_attribute *attr, - char *buf, uburma_show_vf_attr_cb show_cb) +static ssize_t uburma_show_fe_attr(struct uburma_fe *fe, struct uburma_fe_attribute *attr, + char *buf, uburma_show_fe_attr_cb show_cb) { - struct uburma_device *ubu_dev = vf->ubu_dev; + struct uburma_device *ubu_dev = fe->ubu_dev; struct ubcore_device *ubc_dev; int srcu_idx; ssize_t ret; if (!ubu_dev) { - uburma_log_err("Invalid argument in show_vf_attr.\n"); + uburma_log_err("Invalid argument in show_fe_attr.\n"); return -EINVAL; } @@ -799,21 +835,21 @@ static ssize_t uburma_show_vf_attr(struct uburma_vf *vf, struct uburma_vf_attrib return -ENODEV; } - ret = show_cb(ubc_dev, buf, vf->vf_idx); + ret = show_cb(ubc_dev, buf, fe->fe_idx); srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); return ret; } -static ssize_t uburma_store_vf_attr(struct uburma_vf *vf, struct uburma_vf_attribute *attr, - const char *buf, size_t len, uburma_store_vf_attr_cb store_cb) +static ssize_t uburma_store_fe_attr(struct uburma_fe *fe, struct uburma_fe_attribute *attr, + const char *buf, size_t len, uburma_store_fe_attr_cb store_cb) { - struct uburma_device *ubu_dev = vf->ubu_dev; + struct uburma_device *ubu_dev = fe->ubu_dev; struct ubcore_device *ubc_dev; int srcu_idx; ssize_t ret; if (!ubu_dev) { - uburma_log_err("Invalid argument in store_vf_attr.\n"); + uburma_log_err("Invalid argument in store_fe_attr.\n"); return -EINVAL; } @@ -824,110 +860,216 @@ static ssize_t uburma_store_vf_attr(struct uburma_vf *vf, struct uburma_vf_attri return -ENODEV; } - ret = store_cb(ubc_dev, buf, len, vf->vf_idx); + ret = store_cb(ubc_dev, buf, len, fe->fe_idx); srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); return ret; } -static ssize_t vf_upi_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint16_t vf_id) +static ssize_t fe_upi_show_cb(struct ubcore_device *ubc_dev, char *buf, uint16_t fe_idx) { - return uburma_query_upi(ubc_dev, buf, vf_id); + return uburma_query_upi(ubc_dev, buf, fe_idx); } -static ssize_t vf_upi_show(struct uburma_vf *vf, struct uburma_vf_attribute *attr, char *buf) +static ssize_t fe_upi_show(struct uburma_fe *fe, struct uburma_fe_attribute *attr, char *buf) { - return uburma_show_vf_attr(vf, attr, buf, vf_upi_show_cb); + return uburma_show_fe_attr(fe, attr, buf, fe_upi_show_cb); } -static ssize_t vf_upi_store_cb(struct ubcore_device *ubc_dev, const char *buf, size_t len, - uint16_t vf_id) +static ssize_t fe_upi_store_cb(struct ubcore_device *ubc_dev, const char *buf, + size_t len, uint16_t fe_idx) { if (ubc_dev == NULL || buf == NULL) return -EINVAL; - return uburma_upi_store(ubc_dev, buf, len, vf_id); + return uburma_set_upi(ubc_dev, buf, len, fe_idx); +} + +static ssize_t fe_upi_store(struct uburma_fe *fe, struct uburma_fe_attribute *attr, + const char *buf, size_t len) +{ + return uburma_store_fe_attr(fe, attr, buf, len, fe_upi_store_cb); +} + +static FE_ATTR(upi, 0644, fe_upi_show, fe_upi_store); + +static struct attribute *uburma_fe_attrs[] = { + &fe_attr_upi.attr, + NULL, +}; + +static ssize_t uburma_fe_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct uburma_fe_attribute *fe_attr = container_of(attr, struct uburma_fe_attribute, attr); + struct uburma_fe *fe = container_of(kobj, struct uburma_fe, kobj); + + if (!fe_attr->show) + return -EIO; + + return fe_attr->show(fe, fe_attr, buf); +} + +static ssize_t uburma_fe_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct uburma_fe_attribute *fe_attr = container_of(attr, struct uburma_fe_attribute, attr); + struct uburma_fe *fe = container_of(kobj, struct uburma_fe, kobj); + + if (!fe_attr->store) + return -EIO; + return fe_attr->store(fe, fe_attr, buf, count); +} + +static const struct sysfs_ops uburma_fe_sysfs_ops = { + .show = uburma_fe_attr_show, + .store = uburma_fe_attr_store +}; + +static void uburma_fe_release(struct kobject *kobj) +{ +} + +// ATTRIBUTE_GROUPS defined in 3.11, but must be consistent with kobj_type->default_groups +static const struct attribute_group uburma_fe_groups = { + .attrs = uburma_fe_attrs, +}; + +static struct kobj_type uburma_fe_type = { + .release = uburma_fe_release, + .sysfs_ops = &uburma_fe_sysfs_ops, + .default_attrs = uburma_fe_attrs +}; + +static ssize_t uburma_show_eid_attr(struct uburma_eid *eid, struct uburma_eid_attribute *attr, + char *buf, uburma_show_eid_attr_cb show_cb) +{ + struct uburma_device *ubu_dev = eid->ubu_dev; + struct ubcore_device *ubc_dev; + int srcu_idx; + ssize_t ret; + + if (!ubu_dev) { + uburma_log_err("Invalid argument in show_fe_attr.\n"); + return -EINVAL; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return -ENODEV; + } + + ret = show_cb(ubc_dev, buf, eid->eid_idx); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return ret; } -static ssize_t vf_upi_store(struct uburma_vf *vf, struct uburma_vf_attribute *attr, const char *buf, - size_t len) +static ssize_t show_eid_cb(struct ubcore_device *ubc_dev, char *buf, uint16_t idx) { - return uburma_store_vf_attr(vf, attr, buf, len, vf_upi_store_cb); + return snprintf(buf, (UBCORE_EID_STR_LEN + 1) + 1, EID_FMT"\n", + EID_ARGS(ubc_dev->eid_table.eid_entries[idx].eid)); } -static VF_ATTR(upi, 0644, vf_upi_show, vf_upi_store); +static ssize_t eid_show(struct uburma_eid *eid, struct uburma_eid_attribute *attr, char *buf) +{ + return uburma_show_eid_attr(eid, attr, buf, show_eid_cb); +} -static struct attribute *uburma_vf_attrs[] = { - &vf_attr_upi.attr, +static EID_ATTR_RO(eid); + +static struct attribute *uburma_eid_attrs[] = { + &eid_attr_eid.attr, NULL, }; -static ssize_t uburma_vf_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +static ssize_t uburma_eid_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { - struct uburma_vf_attribute *vf_attr = container_of(attr, struct uburma_vf_attribute, attr); - struct uburma_vf *vf = container_of(kobj, struct uburma_vf, kobj); + struct uburma_eid_attribute *eid_attr = + container_of(attr, struct uburma_eid_attribute, attr); + struct uburma_eid *eid = container_of(kobj, struct uburma_eid, kobj); - if (!vf_attr->show) + if (!eid_attr->show) return -EIO; - return vf_attr->show(vf, vf_attr, buf); + return eid_attr->show(eid, eid_attr, buf); } -static ssize_t uburma_vf_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, - size_t count) +static ssize_t uburma_eid_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) { - struct uburma_vf_attribute *vf_attr = container_of(attr, struct uburma_vf_attribute, attr); - struct uburma_vf *vf = container_of(kobj, struct uburma_vf, kobj); + struct uburma_eid_attribute *eid_attr = + container_of(attr, struct uburma_eid_attribute, attr); + struct uburma_eid *eid = container_of(kobj, struct uburma_eid, kobj); - if (!vf_attr->store) + if (!eid_attr->store) return -EIO; - return vf_attr->store(vf, vf_attr, buf, count); + return eid_attr->store(eid, eid_attr, buf, count); } -static const struct sysfs_ops uburma_vf_sysfs_ops = { .show = uburma_vf_attr_show, - .store = uburma_vf_attr_store }; +static const struct sysfs_ops uburma_eid_sysfs_ops = { + .show = uburma_eid_attr_show, + .store = uburma_eid_attr_store +}; -static void uburma_vf_release(struct kobject *kobj) +static void uburma_eid_release(struct kobject *kobj) { } -static const struct attribute_group uburma_vf_groups = { - .attrs = uburma_vf_attrs, +// ATTRIBUTE_GROUPS defined in 3.11, but must be consistent with kobj_type->default_groups +static const struct attribute_group uburma_eid_groups = { + .attrs = uburma_eid_attrs, }; -static struct kobj_type uburma_vf_type = { .release = uburma_vf_release, - .sysfs_ops = &uburma_vf_sysfs_ops, - .default_attrs = uburma_vf_attrs +static struct kobj_type uburma_eid_type = { + .release = uburma_eid_release, + .sysfs_ops = &uburma_eid_sysfs_ops, + .default_attrs = uburma_eid_attrs }; -int uburma_create_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num) +int uburma_create_port_attr_files(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, uint8_t port_num) { struct uburma_port *p; - p = &ubu_dev->port[port_num]; + p = &ldev->port[port_num]; p->ubu_dev = ubu_dev; p->port_num = port_num; - return kobject_init_and_add(&p->kobj, &uburma_port_type, &ubu_dev->dev->kobj, "port%hhu", - port_num); + return kobject_init_and_add(&p->kobj, &uburma_port_type, &ldev->dev->kobj, + "port%hhu", port_num); } -int uburma_create_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num) +int uburma_create_fe_attr_files(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, uint16_t fe_num) { - struct uburma_vf *vf; + struct uburma_fe *fe; - vf = &ubu_dev->vf[vf_num]; - vf->ubu_dev = ubu_dev; - vf->vf_idx = vf_num; + fe = &ldev->fe[fe_num]; + fe->ubu_dev = ubu_dev; + fe->fe_idx = fe_num; - return kobject_init_and_add(&vf->kobj, &uburma_vf_type, &ubu_dev->dev->kobj, "vf%u", - vf_num); + return kobject_init_and_add(&fe->kobj, &uburma_fe_type, &ldev->dev->kobj, + "fe%hu", fe_num); } -int uburma_create_dev_attr_files(struct uburma_device *ubu_dev) +int uburma_create_eid_attr_files(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, uint32_t eid_num) +{ + struct uburma_eid *eid; + + eid = &ldev->eid[eid_num]; + eid->ubu_dev = ubu_dev; + eid->eid_idx = eid_num; + + return kobject_init_and_add(&eid->kobj, &uburma_eid_type, &ldev->dev->kobj, + "eid%u", eid_num); +} + +int uburma_create_dev_attr_files(struct uburma_logic_device *ldev) { int ret; - ret = sysfs_create_group(&ubu_dev->dev->kobj, &uburma_dev_attr_group); + ret = sysfs_create_group(&ldev->dev->kobj, &uburma_dev_attr_group); if (ret != 0) { uburma_log_err("sysfs create group failed, ret:%d.\n", ret); return -1; @@ -936,17 +1078,22 @@ int uburma_create_dev_attr_files(struct uburma_device *ubu_dev) return 0; } -void uburma_remove_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num) +void uburma_remove_port_attr_files(struct uburma_logic_device *ldev, uint8_t port_num) +{ + kobject_put(&ldev->port[port_num].kobj); +} + +void uburma_remove_fe_attr_files(struct uburma_logic_device *ldev, uint16_t fe_num) { - kobject_put(&ubu_dev->port[port_num].kobj); + kobject_put(&ldev->fe[fe_num].kobj); } -void uburma_remove_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num) +void uburma_remove_eid_attr_files(struct uburma_logic_device *ldev, uint32_t eid_num) { - kobject_put(&ubu_dev->vf[vf_num].kobj); + kobject_put(&ldev->eid[eid_num].kobj); } -void uburma_remove_dev_attr_files(struct uburma_device *ubu_dev) +void uburma_remove_dev_attr_files(struct uburma_logic_device *ldev) { - sysfs_remove_group(&ubu_dev->dev->kobj, &uburma_dev_attr_group); + sysfs_remove_group(&ldev->dev->kobj, &uburma_dev_attr_group); } diff --git a/drivers/ub/urma/uburma/uburma_cdev_file.h b/drivers/ub/urma/uburma/uburma_cdev_file.h index c0a4483ce2e7..695a38938441 100644 --- a/drivers/ub/urma/uburma/uburma_cdev_file.h +++ b/drivers/ub/urma/uburma/uburma_cdev_file.h @@ -26,8 +26,8 @@ struct uburma_port_attribute { struct attribute attr; ssize_t (*show)(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf); - ssize_t (*store)(struct uburma_port *p, struct uburma_port_attribute *attr, const char *buf, - size_t count); + ssize_t (*store)(struct uburma_port *p, struct uburma_port_attribute *attr, + const char *buf, size_t count); }; #define PORT_ATTR(_name, _mode, _show, _store) \ @@ -35,23 +35,43 @@ struct uburma_port_attribute { #define PORT_ATTR_RO(_name) struct uburma_port_attribute port_attr_##_name = __ATTR_RO(_name) -struct uburma_vf_attribute { +struct uburma_fe_attribute { struct attribute attr; - ssize_t (*show)(struct uburma_vf *vf, struct uburma_vf_attribute *attr, char *buf); - ssize_t (*store)(struct uburma_vf *vf, struct uburma_vf_attribute *attr, const char *buf, - size_t count); + ssize_t (*show)(struct uburma_fe *fe, struct uburma_fe_attribute *attr, char *buf); + ssize_t (*store)(struct uburma_fe *fe, struct uburma_fe_attribute *attr, + const char *buf, size_t count); }; -#define VF_ATTR(_name, _mode, _show, _store) \ - struct uburma_vf_attribute vf_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define FE_ATTR(_name, _mode, _show, _store) \ +struct uburma_fe_attribute fe_attr_##_name = __ATTR(_name, _mode, _show, _store) -#define VF_ATTR_RO(_name) struct uburma_vf_attribute vf_attr_##_name = __ATTR_RO(_name) +#define FE_ATTR_RO(_name) \ +struct uburma_fe_attribute fe_attr_##_name = __ATTR_RO(_name) -int uburma_create_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num); -int uburma_create_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num); -int uburma_create_dev_attr_files(struct uburma_device *ubu_dev); -void uburma_remove_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num); -void uburma_remove_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num); -void uburma_remove_dev_attr_files(struct uburma_device *ubu_dev); +struct uburma_eid_attribute { + struct attribute attr; + ssize_t (*show)(struct uburma_eid *eid, struct uburma_eid_attribute *attr, char *buf); + ssize_t (*store)(struct uburma_eid *eid, struct uburma_eid_attribute *attr, + const char *buf, size_t count); +}; + +#define EID_ATTR(_name, _mode, _show, _store) \ +struct uburma_eid_attribute eid_attr_##_name = __ATTR(_name, _mode, _show, _store) + +#define EID_ATTR_RO(_name) \ +struct uburma_eid_attribute eid_attr_##_name = __ATTR_RO(_name) + +int uburma_create_port_attr_files(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, uint8_t port_num); +int uburma_create_fe_attr_files(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, uint16_t fe_num); +int uburma_create_eid_attr_files(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, uint32_t eid_num); +int uburma_create_dev_attr_files(struct uburma_logic_device *ldev); + +void uburma_remove_port_attr_files(struct uburma_logic_device *ldev, uint8_t port_num); +void uburma_remove_fe_attr_files(struct uburma_logic_device *ldev, uint16_t fe_num); +void uburma_remove_eid_attr_files(struct uburma_logic_device *ldev, uint32_t eid_num); +void uburma_remove_dev_attr_files(struct uburma_logic_device *ldev); #endif /* UBURMA_CDEV_FILE_H */ diff --git a/drivers/ub/urma/uburma/uburma_cmd.c b/drivers/ub/urma/uburma/uburma_cmd.c index af21dc76fc3f..bf9d962ba475 100644 --- a/drivers/ub/urma/uburma/uburma_cmd.c +++ b/drivers/ub/urma/uburma/uburma_cmd.c @@ -63,22 +63,24 @@ static int uburma_cmd_create_ctx(struct ubcore_device *ubc_dev, struct uburma_fi struct uburma_cmd_create_ctx arg; struct uburma_uobj *uobj; struct uburma_jfae_uobj *jfae; + union ubcore_eid eid; int ret; ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, - sizeof(struct uburma_cmd_create_ctx)); + sizeof(struct uburma_cmd_create_ctx)); if (ret != 0) return ret; mutex_lock(&file->mutex); - ucontext = ubcore_alloc_ucontext(ubc_dev, arg.in.uasid, - (struct ubcore_udrv_priv *)(void *)&arg.udata); + (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); + ucontext = ubcore_alloc_ucontext(ubc_dev, arg.in.eid_index, + (struct ubcore_udrv_priv *)(void *)&arg.udata); if (IS_ERR_OR_NULL(ucontext)) { mutex_unlock(&file->mutex); return -EPERM; } - + ucontext->eid = eid; uobj = uobj_alloc(UOBJ_CLASS_JFAE, file); if (IS_ERR(uobj)) { ret = PTR_ERR(uobj); @@ -92,7 +94,7 @@ static int uburma_cmd_create_ctx(struct ubcore_device *ubc_dev, struct uburma_fi file->ucontext = ucontext; ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, - sizeof(struct uburma_cmd_create_ctx)); + sizeof(struct uburma_cmd_create_ctx)); if (ret != 0) goto free_jfae; @@ -130,76 +132,83 @@ static void uburma_fill_attr(struct ubcore_seg_cfg *cfg, struct uburma_cmd_regis cfg->va = arg->in.va; cfg->len = arg->in.len; cfg->flag.value = arg->in.flag; - cfg->ukey.key = arg->in.key; + cfg->token_value.token = arg->in.token; cfg->iova = arg->in.va; } -static int uburma_cmd_alloc_key_id(struct ubcore_device *ubc_dev, struct uburma_file *file, +static int uburma_cmd_alloc_token_id(struct ubcore_device *ubc_dev, struct uburma_file *file, struct uburma_cmd_hdr *hdr) { - struct uburma_cmd_alloc_key_id arg; + struct uburma_cmd_alloc_token_id arg; struct ubcore_udata udata = { 0 }; - struct ubcore_key_id *key; + struct ubcore_token_id *token_id; struct uburma_uobj *uobj; int ret; ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, - sizeof(struct uburma_cmd_alloc_key_id)); + sizeof(struct uburma_cmd_alloc_token_id)); if (ret != 0) return ret; fill_udata(&udata, file->ucontext, &arg.udata); - uobj = uobj_alloc(UOBJ_CLASS_KEY, file); + uobj = uobj_alloc(UOBJ_CLASS_TOKEN, file); if (IS_ERR(uobj)) { - uburma_log_err("UOBJ_CLASS_KEY alloc fail!\n"); + uburma_log_err("UOBJ_CLASS_TOKEN alloc fail!\n"); return -ENOMEM; } - key = ubcore_alloc_key_id(ubc_dev, &udata); - if (IS_ERR_OR_NULL(key)) { - uburma_log_err("ubcore alloc key id failed.\n"); + token_id = ubcore_alloc_token_id(ubc_dev, &udata); + if (IS_ERR_OR_NULL(token_id)) { + uburma_log_err("ubcore alloc token_id id failed.\n"); ret = -EPERM; goto err_free_uobj; } - uobj->object = key; - arg.out.key_id = key->key_id; + uobj->object = token_id; + arg.out.token_id = token_id->token_id; arg.out.handle = uobj->id; ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, - sizeof(struct uburma_cmd_alloc_key_id)); + sizeof(struct uburma_cmd_alloc_token_id)); if (ret != 0) - goto err_free_key; + goto err_free_token_id; return uobj_alloc_commit(uobj); -err_free_key: - (void)ubcore_free_key_id(key); +err_free_token_id: + (void)ubcore_free_token_id(token_id); err_free_uobj: uobj_alloc_abort(uobj); return ret; } -static int uburma_cmd_free_key_id(struct ubcore_device *ubc_dev, struct uburma_file *file, +static int uburma_cmd_free_token_id(struct ubcore_device *ubc_dev, struct uburma_file *file, struct uburma_cmd_hdr *hdr) { - struct uburma_cmd_free_key_id arg; + struct uburma_cmd_free_token_id arg; + struct ubcore_token_id *token; struct uburma_uobj *uobj; int ret; ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, - sizeof(struct uburma_cmd_free_key_id)); + sizeof(struct uburma_cmd_free_token_id)); if (ret != 0) return ret; - uobj = uobj_get_del(UOBJ_CLASS_KEY, (int)arg.in.handle, file); + uobj = uobj_get_del(UOBJ_CLASS_TOKEN, (int)arg.in.handle, file); if (IS_ERR(uobj)) { - uburma_log_err("failed to find key id.\n"); + uburma_log_err("failed to find token id.\n"); return -EINVAL; } + + token = (struct ubcore_token_id *)uobj->object; + if (arg.in.token_id != token->token_id) { + uburma_log_err("ubcore remove token_id failed: non-consistent.\n"); + return -EPERM; + } ret = uobj_remove_commit(uobj); if (ret != 0) - uburma_log_err("ubcore remove commit keyid failed.\n"); + uburma_log_err("ubcore remove commit token_id failed.\n"); return ret; } @@ -211,7 +220,7 @@ static int uburma_cmd_register_seg(struct ubcore_device *ubc_dev, struct uburma_ struct ubcore_target_seg *seg; struct ubcore_udata udata = { 0 }; struct uburma_uobj *uobj; - struct uburma_uobj *keyid_uobj; + struct uburma_uobj *token_id_uobj; int ret; ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, @@ -219,18 +228,19 @@ static int uburma_cmd_register_seg(struct ubcore_device *ubc_dev, struct uburma_ if (ret != 0) return ret; - keyid_uobj = uobj_get_read(UOBJ_CLASS_KEY, (int)arg.in.keyid_handle, file); - if (!IS_ERR(keyid_uobj)) - cfg.keyid = (struct ubcore_key_id *)keyid_uobj->object; + token_id_uobj = uobj_get_read(UOBJ_CLASS_TOKEN, (int)arg.in.token_id_handle, file); + if (!IS_ERR(token_id_uobj)) + cfg.token_id = (struct ubcore_token_id *)token_id_uobj->object; uburma_fill_attr(&cfg, &arg); + cfg.eid_index = file->ucontext->eid_index; fill_udata(&udata, file->ucontext, &arg.udata); uobj = uobj_alloc(UOBJ_CLASS_SEG, file); if (IS_ERR(uobj)) { uburma_log_err("UOBJ_CLASS_SEG alloc fail!\n"); ret = -ENOMEM; - goto err_put_keyid; + goto err_put_token_id; } seg = ubcore_register_seg(ubc_dev, &cfg, &udata); @@ -240,7 +250,7 @@ static int uburma_cmd_register_seg(struct ubcore_device *ubc_dev, struct uburma_ goto err_free_uobj; } uobj->object = seg; - arg.out.key_id = seg->seg.key_id; + arg.out.token_id = seg->seg.token_id; arg.out.handle = uobj->id; ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, @@ -248,8 +258,8 @@ static int uburma_cmd_register_seg(struct ubcore_device *ubc_dev, struct uburma_ if (ret != 0) goto err_delete_seg; - if (!IS_ERR(keyid_uobj)) - uobj_put_read(keyid_uobj); + if (!IS_ERR(token_id_uobj)) + uobj_put_read(token_id_uobj); uobj_alloc_commit(uobj); return 0; @@ -257,9 +267,9 @@ static int uburma_cmd_register_seg(struct ubcore_device *ubc_dev, struct uburma_ ubcore_unregister_seg(seg); err_free_uobj: uobj_alloc_abort(uobj); -err_put_keyid: - if (!IS_ERR(keyid_uobj)) - uobj_put_read(keyid_uobj); +err_put_token_id: + if (!IS_ERR(token_id_uobj)) + uobj_put_read(token_id_uobj); return ret; } @@ -350,8 +360,21 @@ void uburma_jetty_event_cb(struct ubcore_event *event, struct ubcore_ucontext *c &jetty_uobj->async_event_list, &jetty_uobj->async_events_reported); } -static int uburma_cmd_create_jfs(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +void uburma_jetty_grp_event_cb(struct ubcore_event *event, struct ubcore_ucontext *ctx) +{ + struct uburma_jetty_grp_uobj *jetty_grp_uobj; + + if (event->element.jetty_grp == NULL) + return; + + jetty_grp_uobj = + (struct uburma_jetty_grp_uobj *)event->element.jetty_grp->jetty_grp_cfg.user_ctx; + uburma_write_async_event(ctx, event->element.jetty_grp->urma_jetty_grp, event->event_type, + &jetty_grp_uobj->async_event_list, &jetty_grp_uobj->async_events_reported); +} + +static int uburma_cmd_create_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_create_jfs arg; struct ubcore_jfs_cfg cfg = { 0 }; @@ -368,11 +391,11 @@ static int uburma_cmd_create_jfs(struct ubcore_device *ubc_dev, struct uburma_fi cfg.depth = arg.in.depth; cfg.flag.value = arg.in.flag; + cfg.eid_index = file->ucontext->eid_index; cfg.trans_mode = arg.in.trans_mode; cfg.max_sge = arg.in.max_sge; cfg.max_rsge = arg.in.max_rsge; cfg.max_inline_data = arg.in.max_inline_data; - cfg.retry_cnt = arg.in.retry_cnt; cfg.rnr_retry = arg.in.rnr_retry; cfg.err_timeout = arg.in.err_timeout; cfg.priority = arg.in.priority; @@ -436,8 +459,93 @@ static int uburma_cmd_create_jfs(struct ubcore_device *ubc_dev, struct uburma_fi return ret; } -static int uburma_cmd_delete_jfs(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_modify_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jfs arg; + struct ubcore_jfs_attr attr = {0}; + struct uburma_uobj *uobj; + struct ubcore_udata udata; + struct ubcore_jfs *jfs; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_modify_jfs)); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.state = arg.in.state; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JFS, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfs.\n"); + return -EINVAL; + } + + jfs = (struct ubcore_jfs *)uobj->object; + ret = ubcore_modify_jfs(jfs, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jfs failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(uburma_cmd_modify_jfs)); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_query_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_query_jfs arg; + struct ubcore_jfs_attr attr = {0}; + struct ubcore_jfs_cfg cfg = {0}; + struct uburma_uobj *uobj; + struct ubcore_jfs *jfs; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_query_jfs)); + if (ret != 0) + return ret; + + uobj = uobj_get_write(UOBJ_CLASS_JFS, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfs.\n"); + return -EINVAL; + } + + jfs = (struct ubcore_jfs *)uobj->object; + ret = ubcore_query_jfs(jfs, &cfg, &attr); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("query jfs failed, ret:%d.\n", ret); + return ret; + } + + arg.out.depth = cfg.depth; + arg.out.flag = cfg.flag.value; + arg.out.trans_mode = (uint32_t)cfg.trans_mode; + arg.out.priority = cfg.priority; + arg.out.max_sge = cfg.max_sge; + arg.out.max_rsge = cfg.max_rsge; + arg.out.max_inline_data = cfg.max_inline_data; + arg.out.rnr_retry = cfg.rnr_retry; + arg.out.err_timeout = cfg.err_timeout; + arg.out.state = (uint32_t)attr.state; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_query_jfs)); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_delete_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_delete_jfs arg; struct uburma_jfs_uobj *jfs_uobj; @@ -472,8 +580,8 @@ static int uburma_cmd_delete_jfs(struct ubcore_device *ubc_dev, struct uburma_fi sizeof(struct uburma_cmd_delete_jfs)); } -static int uburma_cmd_import_seg(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_import_seg(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_import_seg arg; struct ubcore_target_seg_cfg cfg = { 0 }; @@ -494,11 +602,10 @@ static int uburma_cmd_import_seg(struct ubcore_device *ubc_dev, struct uburma_fi } (void)memcpy(cfg.seg.ubva.eid.raw, arg.in.eid, UBCORE_EID_SIZE); - cfg.seg.ubva.uasid = arg.in.uasid; cfg.seg.ubva.va = arg.in.va; cfg.seg.len = arg.in.len; cfg.seg.attr.value = arg.in.flag; - cfg.seg.key_id = arg.in.key_id; + cfg.seg.token_id = arg.in.token_id; fill_udata(&udata, file->ucontext, &arg.udata); tseg = ubcore_import_seg(ubc_dev, &cfg, &udata); @@ -522,8 +629,8 @@ static int uburma_cmd_import_seg(struct ubcore_device *ubc_dev, struct uburma_fi return ret; } -static int uburma_cmd_unimport_seg(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_unimport_seg(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_unimport_seg arg; struct uburma_uobj *uobj; @@ -546,8 +653,8 @@ static int uburma_cmd_unimport_seg(struct ubcore_device *ubc_dev, struct uburma_ return ret; } -static int uburma_cmd_create_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_create_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_create_jfr arg; struct uburma_uobj *jfc_uobj; @@ -563,12 +670,13 @@ static int uburma_cmd_create_jfr(struct ubcore_device *ubc_dev, struct uburma_fi return ret; cfg.id = arg.in.id; - cfg.flag.value = arg.in.flag; - cfg.trans_mode = arg.in.trans_mode; cfg.depth = arg.in.depth; + cfg.eid_index = file->ucontext->eid_index; + cfg.flag.value = arg.in.flag; cfg.max_sge = arg.in.max_sge; cfg.min_rnr_timer = arg.in.min_rnr_timer; - cfg.ukey.key = arg.in.key; + cfg.trans_mode = arg.in.trans_mode; + cfg.token_value.token = arg.in.token; fill_udata(&udata, file->ucontext, &arg.udata); jfr_uobj = (struct uburma_jfr_uobj *)uobj_alloc(UOBJ_CLASS_JFR, file); @@ -627,8 +735,8 @@ static int uburma_cmd_create_jfr(struct ubcore_device *ubc_dev, struct uburma_fi return ret; } -static int uburma_cmd_modify_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_modify_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_modify_jfr arg; struct uburma_uobj *uobj; @@ -644,6 +752,7 @@ static int uburma_cmd_modify_jfr(struct ubcore_device *ubc_dev, struct uburma_fi attr.mask = arg.in.mask; attr.rx_threshold = arg.in.rx_threshold; + attr.state = (enum ubcore_jfr_state)arg.in.state; fill_udata(&udata, file->ucontext, &arg.udata); uobj = uobj_get_write(UOBJ_CLASS_JFR, arg.in.handle, file); @@ -666,8 +775,54 @@ static int uburma_cmd_modify_jfr(struct ubcore_device *ubc_dev, struct uburma_fi return ret; } -static int uburma_cmd_delete_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_query_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) +{ + struct unurma_cmd_query_jfr arg; + struct ubcore_jfr_attr attr = {0}; + struct ubcore_jfr_cfg cfg = {0}; + struct uburma_uobj *uobj; + struct ubcore_jfr *jfr; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct unurma_cmd_query_jfr)); + if (ret != 0) + return ret; + + uobj = uobj_get_write(UOBJ_CLASS_JFR, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfr.\n"); + return -EINVAL; + } + + jfr = (struct ubcore_jfr *)uobj->object; + ret = ubcore_query_jfr(jfr, &cfg, &attr); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("query jfr failed, ret:%d.\n", ret); + return ret; + } + + arg.out.depth = cfg.depth; + arg.out.flag = cfg.flag.value; + arg.out.trans_mode = (uint32_t)cfg.trans_mode; + arg.out.max_sge = cfg.max_sge; + arg.out.min_rnr_timer = cfg.min_rnr_timer; + arg.out.token = cfg.token_value.token; + arg.out.id = cfg.id; + + arg.out.rx_threshold = attr.rx_threshold; + arg.out.state = (uint32_t)attr.state; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct unurma_cmd_query_jfr)); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_delete_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_delete_jfr arg; struct uburma_jfr_uobj *jfr_uobj; @@ -702,8 +857,8 @@ static int uburma_cmd_delete_jfr(struct ubcore_device *ubc_dev, struct uburma_fi sizeof(struct uburma_cmd_delete_jfr)); } -static int uburma_cmd_create_jfc(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_create_jfc(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_create_jfc arg; struct uburma_jfc_uobj *jfc_uobj; @@ -781,8 +936,8 @@ static int uburma_cmd_create_jfc(struct ubcore_device *ubc_dev, struct uburma_fi return ret; } -static int uburma_cmd_modify_jfc(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_modify_jfc(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_modify_jfc arg; struct uburma_uobj *uobj; @@ -821,8 +976,8 @@ static int uburma_cmd_modify_jfc(struct ubcore_device *ubc_dev, struct uburma_fi return ret; } -static int uburma_cmd_delete_jfc(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_delete_jfc(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_delete_jfc arg; struct uburma_uobj *uobj; @@ -859,26 +1014,29 @@ static int uburma_cmd_delete_jfc(struct ubcore_device *ubc_dev, struct uburma_fi } static void fill_create_jetty_attr(struct ubcore_jetty_cfg *cfg, - const struct uburma_cmd_create_jetty *arg) + struct uburma_cmd_create_jetty *arg) { cfg->id = arg->in.id; cfg->jfs_depth = arg->in.jfs_depth; cfg->jfr_depth = arg->in.jfr_depth; - cfg->flag.value = arg->in.flag; - cfg->trans_mode = arg->in.trans_mode; + cfg->flag.bs.share_jfr = arg->in.jetty_flag & 0x1; // see urma_jetty_flag + cfg->flag.bs.lock_free = ((union ubcore_jfs_flag)arg->in.jfs_flag).bs.lock_free; + cfg->flag.bs.error_suspend = ((union ubcore_jfs_flag)arg->in.jfs_flag).bs.error_suspend; + cfg->flag.bs.outorder_comp = ((union ubcore_jfs_flag)arg->in.jfs_flag).bs.outorder_comp; + cfg->max_send_sge = arg->in.max_send_sge; cfg->max_send_rsge = arg->in.max_send_rsge; cfg->max_recv_sge = arg->in.max_recv_sge; cfg->max_inline_data = arg->in.max_inline_data; cfg->priority = arg->in.priority; - cfg->retry_cnt = arg->in.retry_cnt; cfg->rnr_retry = arg->in.rnr_retry; cfg->err_timeout = arg->in.err_timeout; cfg->min_rnr_timer = arg->in.min_rnr_timer; + cfg->trans_mode = arg->in.trans_mode; } static void fill_create_jetty_out(struct uburma_cmd_create_jetty *arg, - const struct ubcore_jetty *jetty) + struct ubcore_jetty *jetty) { arg->out.id = jetty->id; arg->out.jfs_depth = jetty->jetty_cfg.jfs_depth; @@ -889,14 +1047,15 @@ static void fill_create_jetty_out(struct uburma_cmd_create_jetty *arg, arg->out.max_inline_data = jetty->jetty_cfg.max_inline_data; } -static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_create_jetty arg; struct uburma_uobj *send_jfc_uobj = ERR_PTR(-ENOENT); struct uburma_uobj *recv_jfc_uobj = ERR_PTR(-ENOENT); struct uburma_uobj *jfr_uobj = ERR_PTR(-ENOENT); - struct ubcore_jetty_cfg cfg = { 0 }; + struct uburma_uobj *jetty_grp_uobj = ERR_PTR(-ENOENT); + struct ubcore_jetty_cfg cfg = {0}; struct uburma_jetty_uobj *jetty_uobj; struct ubcore_udata udata; struct ubcore_jetty *jetty; @@ -917,6 +1076,7 @@ static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_ cfg.jetty_context = jetty_uobj; fill_create_jetty_attr(&cfg, &arg); + cfg.eid_index = file->ucontext->eid_index; send_jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.send_jfc_handle, file); recv_jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.recv_jfc_handle, file); if (IS_ERR(send_jfc_uobj) || IS_ERR(recv_jfc_uobj)) { @@ -927,7 +1087,7 @@ static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_ } cfg.send_jfc = send_jfc_uobj->object; cfg.recv_jfc = recv_jfc_uobj->object; - if (cfg.flag.bs.share_jfr != 0) { + if (arg.in.jfr_handle != 0) { jfr_uobj = uobj_get_read(UOBJ_CLASS_JFR, arg.in.jfr_handle, file); if (IS_ERR(jfr_uobj)) { uburma_log_err("failed to find jfr, jfr_handle:%llu.\n", arg.in.jfr_handle); @@ -935,8 +1095,19 @@ static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_ goto err_put; } cfg.jfr = jfr_uobj->object; + cfg.flag.bs.share_jfr = 1; } - cfg.ukey.key = arg.in.key; + if (arg.in.is_jetty_grp != 0) { + jetty_grp_uobj = uobj_get_read(UOBJ_CLASS_JETTY_GRP, arg.in.jetty_grp_handle, file); + if (IS_ERR(jetty_grp_uobj)) { + uburma_log_err("failed to find jetty_grp, jetty_grp_handle:%llu.\n", + arg.in.jetty_grp_handle); + ret = -EINVAL; + goto err_put; + } + cfg.jetty_grp = (struct ubcore_jetty_group *)jetty_grp_uobj->object; + } + cfg.token_value.token = arg.in.token; fill_udata(&udata, file->ucontext, &arg.udata); jetty = ubcore_create_jetty(ubc_dev, &cfg, uburma_jetty_event_cb, &udata); @@ -961,6 +1132,8 @@ static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_ if (ret != 0) goto err_put_jfae; + if (cfg.jetty_grp) + uobj_put_read(jetty_grp_uobj); if (cfg.jfr) uobj_put_read(jfr_uobj); uobj_put_read(send_jfc_uobj); @@ -973,6 +1146,8 @@ static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_ err_delete_jetty: (void)ubcore_delete_jetty(jetty); err_put: + if (!IS_ERR(jetty_grp_uobj)) + uobj_put_read(jetty_grp_uobj); if (!IS_ERR(jfr_uobj)) uobj_put_read(jfr_uobj); if (!IS_ERR(recv_jfc_uobj)) @@ -983,8 +1158,8 @@ static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_ return ret; } -static int uburma_cmd_modify_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_modify_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_modify_jetty arg; struct uburma_uobj *uobj; @@ -1000,6 +1175,7 @@ static int uburma_cmd_modify_jetty(struct ubcore_device *ubc_dev, struct uburma_ attr.mask = arg.in.mask; attr.rx_threshold = arg.in.rx_threshold; + attr.state = (enum ubcore_jetty_state)arg.in.state; fill_udata(&udata, file->ucontext, &arg.udata); uobj = uobj_get_write(UOBJ_CLASS_JETTY, arg.in.handle, file); @@ -1022,8 +1198,67 @@ static int uburma_cmd_modify_jetty(struct ubcore_device *ubc_dev, struct uburma_ return ret; } -static int uburma_cmd_delete_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_query_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_query_jetty arg; + struct ubcore_jetty_attr attr = {0}; + struct ubcore_jetty_cfg cfg = {0}; + struct uburma_uobj *uobj; + struct ubcore_jetty *jetty; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_query_jetty)); + if (ret != 0) + return ret; + + uobj = uobj_get_write(UOBJ_CLASS_JETTY, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jetty.\n"); + return -EINVAL; + } + + jetty = (struct ubcore_jetty *)uobj->object; + ret = ubcore_query_jetty(jetty, &cfg, &attr); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("query jetty failed, ret:%d.\n", ret); + return ret; + } + + arg.out.id = cfg.id; + arg.out.jetty_flag = cfg.flag.value; + + arg.out.jfs_depth = cfg.jfs_depth; + arg.out.jfs_flag = 0; // todo + arg.out.trans_mode = (uint32_t)cfg.trans_mode; + arg.out.priority = cfg.priority; + arg.out.max_send_sge = cfg.max_send_sge; + arg.out.max_send_rsge = cfg.max_send_rsge; + arg.out.max_inline_data = cfg.max_inline_data; + arg.out.rnr_retry = cfg.rnr_retry; + arg.out.err_timeout = cfg.err_timeout; + + if (cfg.flag.bs.share_jfr == 1) { + arg.out.jfr_depth = cfg.jfr_depth; + arg.out.jfr_flag = 0; // todo + arg.out.max_recv_sge = cfg.max_recv_sge; + arg.out.min_rnr_timer = cfg.min_rnr_timer; + arg.out.token = cfg.token_value.token; + arg.out.jfr_id = 0; // todo + } + + arg.out.rx_threshold = attr.rx_threshold; + arg.out.state = (uint32_t)attr.state; + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_query_jetty)); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_delete_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_delete_jetty arg; struct uburma_jetty_uobj *jetty_uobj; @@ -1058,8 +1293,8 @@ static int uburma_cmd_delete_jetty(struct ubcore_device *ubc_dev, struct uburma_ sizeof(struct uburma_cmd_delete_jetty)); } -static int uburma_cmd_create_jfce(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_create_jfce(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_create_jfce arg; struct uburma_jfce_uobj *jfce; @@ -1090,8 +1325,8 @@ static int uburma_cmd_create_jfce(struct ubcore_device *ubc_dev, struct uburma_f return ret; } -static int uburma_cmd_import_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_import_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_import_jfr arg; struct ubcore_tjetty_cfg cfg = { 0 }; @@ -1112,10 +1347,10 @@ static int uburma_cmd_import_jfr(struct ubcore_device *ubc_dev, struct uburma_fi } (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); - cfg.id.uasid = arg.in.uasid; cfg.id.id = arg.in.id; - cfg.ukey.key = arg.in.key; + cfg.token_value.token = arg.in.token; cfg.trans_mode = arg.in.trans_mode; + cfg.eid_index = file->ucontext->eid_index; fill_udata(&udata, file->ucontext, &arg.udata); tjfr = ubcore_import_jfr(ubc_dev, &cfg, &udata); @@ -1127,12 +1362,12 @@ static int uburma_cmd_import_jfr(struct ubcore_device *ubc_dev, struct uburma_fi uobj->object = tjfr; arg.out.handle = uobj->id; - if (tjfr->tp != NULL) { - arg.out.tp_type = 1; + if (tjfr->vtpn != NULL) + arg.out.tpn = tjfr->vtpn->vtpn; + else if (tjfr->tp != NULL) arg.out.tpn = tjfr->tp->tpn; - } else { + else arg.out.tpn = UBURMA_INVALID_TPN; - } ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, sizeof(struct uburma_cmd_import_jfr)); @@ -1145,8 +1380,8 @@ static int uburma_cmd_import_jfr(struct ubcore_device *ubc_dev, struct uburma_fi return 0; } -static int uburma_cmd_unimport_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_unimport_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_unimport_jfr arg; struct uburma_uobj *uobj; @@ -1168,8 +1403,8 @@ static int uburma_cmd_unimport_jfr(struct ubcore_device *ubc_dev, struct uburma_ return ret; } -static int uburma_cmd_import_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_import_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_import_jetty arg; struct ubcore_tjetty_cfg cfg = { 0 }; @@ -1190,10 +1425,12 @@ static int uburma_cmd_import_jetty(struct ubcore_device *ubc_dev, struct uburma_ } (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); - cfg.id.uasid = arg.in.uasid; cfg.id.id = arg.in.id; - cfg.ukey.key = arg.in.key; + cfg.token_value.token = arg.in.token; cfg.trans_mode = (enum ubcore_transport_mode)arg.in.trans_mode; + cfg.policy = (enum ubcore_jetty_grp_policy)arg.in.policy; + cfg.type = (enum ubcore_target_type)arg.in.type; + cfg.eid_index = file->ucontext->eid_index; fill_udata(&udata, file->ucontext, &arg.udata); tjetty = ubcore_import_jetty(ubc_dev, &cfg, &udata); @@ -1205,12 +1442,12 @@ static int uburma_cmd_import_jetty(struct ubcore_device *ubc_dev, struct uburma_ uobj->object = tjetty; arg.out.handle = uobj->id; - if (tjetty->tp != NULL) { - arg.out.tp_type = 1; + if (tjetty->vtpn != NULL) + arg.out.tpn = tjetty->vtpn->vtpn; + else if (tjetty->tp != NULL) arg.out.tpn = tjetty->tp->tpn; - } else { + else arg.out.tpn = UBURMA_INVALID_TPN; - } ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, sizeof(struct uburma_cmd_import_jetty)); @@ -1223,8 +1460,8 @@ static int uburma_cmd_import_jetty(struct ubcore_device *ubc_dev, struct uburma_ return 0; } -static int uburma_cmd_unimport_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_unimport_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_unimport_jetty arg; struct uburma_uobj *uobj; @@ -1247,8 +1484,7 @@ static int uburma_cmd_unimport_jetty(struct ubcore_device *ubc_dev, struct uburm } static int uburma_get_jetty_tjetty_objs(struct uburma_file *file, uint64_t jetty_handle, - uint64_t tjetty_handle, struct uburma_uobj **jetty_uobj, - struct uburma_uobj **tjetty_uobj) + uint64_t tjetty_handle, struct uburma_uobj **jetty_uobj, struct uburma_uobj **tjetty_uobj) { *jetty_uobj = uobj_get_read(UOBJ_CLASS_JETTY, jetty_handle, file); if (IS_ERR(*jetty_uobj)) { @@ -1273,8 +1509,8 @@ static inline void uburma_put_jetty_tjetty_objs(struct uburma_uobj *jetty_uobj, } static int uburma_get_jfs_tjfr_objs(struct uburma_file *file, uint64_t jetty_handle, - uint64_t tjetty_handle, struct uburma_uobj **jetty_uobj, - struct uburma_uobj **tjetty_uobj) + uint64_t tjetty_handle, struct uburma_uobj **jetty_uobj, + struct uburma_uobj **tjetty_uobj) { *jetty_uobj = uobj_get_read(UOBJ_CLASS_JFS, jetty_handle, file); if (IS_ERR(*jetty_uobj)) { @@ -1378,8 +1614,8 @@ static int uburma_cmd_advise_jetty(struct ubcore_device *ubc_dev, struct uburma_ return ret; } -static int uburma_cmd_unadvise_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_unadvise_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_unadvise_jetty arg; struct uburma_uobj *tjetty_uobj; @@ -1403,17 +1639,18 @@ static int uburma_cmd_unadvise_jetty(struct ubcore_device *ubc_dev, struct uburm return ret; } -static int uburma_cmd_bind_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_bind_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { - struct uburma_cmd_advise_jetty arg; + struct uburma_cmd_bind_jetty arg; struct uburma_uobj *tjetty_uobj; struct uburma_uobj *jetty_uobj; + struct ubcore_tjetty *tjetty; struct ubcore_udata udata; int ret; - ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, - sizeof(struct uburma_cmd_advise_jetty)); + ret = uburma_copy_from_user(&arg, + (void __user *)(uintptr_t)hdr->args_addr, sizeof(struct uburma_cmd_bind_jetty)); if (ret != 0) return ret; @@ -1423,16 +1660,32 @@ static int uburma_cmd_bind_jetty(struct ubcore_device *ubc_dev, struct uburma_fi fill_udata(&udata, file->ucontext, &arg.udata); - ret = ubcore_bind_jetty(jetty_uobj->object, tjetty_uobj->object, &udata); - if (ret != 0) + tjetty = (struct ubcore_tjetty *)tjetty_uobj->object; + ret = ubcore_bind_jetty(jetty_uobj->object, tjetty, &udata); + if (ret != 0) { uburma_log_err("bind jetty failed.\n"); + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; + } + + if (tjetty->vtpn != NULL) + arg.out.tpn = tjetty->vtpn->vtpn; + else if (tjetty->tp != NULL) + arg.out.tpn = tjetty->tp->tpn; + else + arg.out.tpn = UBURMA_INVALID_TPN; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_bind_jetty)); + if (ret != 0) + (void)ubcore_unbind_jetty(jetty_uobj->object); uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); return ret; } -static int uburma_cmd_unbind_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_unbind_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct uburma_cmd_unadvise_jetty arg; struct uburma_uobj *tjetty_uobj; @@ -1448,7 +1701,7 @@ static int uburma_cmd_unbind_jetty(struct ubcore_device *ubc_dev, struct uburma_ &jetty_uobj, &tjetty_uobj)) return -EINVAL; - ret = ubcore_unbind_jetty(jetty_uobj->object, tjetty_uobj->object); + ret = ubcore_unbind_jetty(jetty_uobj->object); if (ret != 0) uburma_log_err("failed to unbind jetty, ret: %d.\n", ret); @@ -1456,9 +1709,107 @@ static int uburma_cmd_unbind_jetty(struct ubcore_device *ubc_dev, struct uburma_ return ret; } +static int uburma_cmd_create_jetty_grp(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jetty_grp arg; + struct uburma_jetty_grp_uobj *jetty_grp_uobj; + struct ubcore_jetty_grp_cfg cfg = {0}; + struct ubcore_udata udata; + struct ubcore_jetty_group *jetty_grp; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_create_jetty_grp)); + if (ret != 0) + return ret; + + (void)memcpy(cfg.name, arg.in.name, UBCORE_JETTY_GRP_MAX_NAME); + cfg.token_value.token = arg.in.token; + cfg.id = arg.in.id; + cfg.policy = (enum ubcore_jetty_grp_policy)arg.in.policy; + fill_udata(&udata, file->ucontext, &arg.udata); + + jetty_grp_uobj = (struct uburma_jetty_grp_uobj *)uobj_alloc(UOBJ_CLASS_JETTY_GRP, file); + if (IS_ERR(jetty_grp_uobj)) { + uburma_log_err("UOBJ_CLASS_JETTY_GRP alloc fail!\n"); + return -ENOMEM; + } + jetty_grp_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jetty_grp_uobj->async_event_list); + cfg.user_ctx = (uint64_t)jetty_grp_uobj; + + jetty_grp = ubcore_create_jetty_grp(ubc_dev, &cfg, uburma_jetty_grp_event_cb, &udata); + if (IS_ERR_OR_NULL(jetty_grp)) { + uburma_log_err("create jetty_grp failed.\n"); + ret = -EPERM; + goto err_alloc_abort; + } + jetty_grp_uobj->uobj.object = jetty_grp; + jetty_grp->urma_jetty_grp = arg.in.urma_jetty_grp; + + /* Do not release jfae fd until jetty_grp is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jetty_grp; + + arg.out.id = jetty_grp->id; + arg.out.handle = jetty_grp_uobj->uobj.id; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_create_jetty_grp)); + if (ret != 0) + goto err_put_jfae; + + (void)uobj_alloc_commit(&jetty_grp_uobj->uobj); + return ret; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jetty_grp: + (void)ubcore_delete_jetty_grp(jetty_grp); +err_alloc_abort: + uobj_alloc_abort(&jetty_grp_uobj->uobj); + return ret; +} +static int uburma_cmd_delete_jetty_grp(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jetty_grp arg; + struct uburma_jetty_grp_uobj *jetty_grp_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_delete_jetty_grp)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JETTY_GRP, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jetty group"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jetty_grp_uobj = container_of(uobj, struct uburma_jetty_grp_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfr failed, ret:%d.\n", ret); + uobj_put(uobj); + return ret; + } + + arg.out.async_events_reported = jetty_grp_uobj->async_events_reported; + uobj_put(uobj); + return uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_delete_jetty_grp)); +} + static int uburma_fill_user_ctl_info(struct ubcore_ucontext *ctx, - struct uburma_cmd_user_ctl *user_ctl, - struct ubcore_user_ctl *k_user_ctl) + struct uburma_cmd_user_ctl *user_ctl, struct ubcore_user_ctl *k_user_ctl) { if (ctx == NULL) { uburma_log_err("parameter invalid with ctx nullptr.\n"); @@ -1481,8 +1832,8 @@ static int uburma_fill_user_ctl_info(struct ubcore_ucontext *ctx, return 0; } -static int uburma_cmd_user_ctl(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr) +static int uburma_cmd_user_ctl(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr) { struct ubcore_user_ctl k_user_ctl = { 0 }; struct uburma_cmd_user_ctl user_ctl; @@ -1504,23 +1855,26 @@ static int uburma_cmd_user_ctl(struct ubcore_device *ubc_dev, struct uburma_file return 0; } -typedef int (*uburma_cmd_handler)(struct ubcore_device *ubc_dev, struct uburma_file *file, - struct uburma_cmd_hdr *hdr); +typedef int (*uburma_cmd_handler)(struct ubcore_device *ubc_dev, + struct uburma_file *file, struct uburma_cmd_hdr *hdr); static uburma_cmd_handler g_uburma_cmd_handlers[] = { [0] = NULL, [UBURMA_CMD_CREATE_CTX] = uburma_cmd_create_ctx, [UBURMA_CMD_DESTROY_CTX] = uburma_cmd_destroy_ctx, - [UBURMA_CMD_ALLOC_KEY_ID] = uburma_cmd_alloc_key_id, - [UBURMA_CMD_FREE_KEY_ID] = uburma_cmd_free_key_id, + [UBURMA_CMD_ALLOC_TOKEN_ID] = uburma_cmd_alloc_token_id, + [UBURMA_CMD_FREE_TOKEN_ID] = uburma_cmd_free_token_id, [UBURMA_CMD_REGISTER_SEG] = uburma_cmd_register_seg, [UBURMA_CMD_UNREGISTER_SEG] = uburma_cmd_unregister_seg, [UBURMA_CMD_IMPORT_SEG] = uburma_cmd_import_seg, [UBURMA_CMD_UNIMPORT_SEG] = uburma_cmd_unimport_seg, [UBURMA_CMD_CREATE_JFR] = uburma_cmd_create_jfr, [UBURMA_CMD_MODIFY_JFR] = uburma_cmd_modify_jfr, + [UBURMA_CMD_QUERY_JFR] = uburma_cmd_query_jfr, [UBURMA_CMD_DELETE_JFR] = uburma_cmd_delete_jfr, [UBURMA_CMD_CREATE_JFS] = uburma_cmd_create_jfs, + [UBURMA_CMD_MODIFY_JFS] = uburma_cmd_modify_jfs, + [UBURMA_CMD_QUERY_JFS] = uburma_cmd_query_jfs, [UBURMA_CMD_DELETE_JFS] = uburma_cmd_delete_jfs, [UBURMA_CMD_CREATE_JFC] = uburma_cmd_create_jfc, [UBURMA_CMD_MODIFY_JFC] = uburma_cmd_modify_jfc, @@ -1530,6 +1884,7 @@ static uburma_cmd_handler g_uburma_cmd_handlers[] = { [UBURMA_CMD_UNIMPORT_JFR] = uburma_cmd_unimport_jfr, [UBURMA_CMD_CREATE_JETTY] = uburma_cmd_create_jetty, [UBURMA_CMD_MODIFY_JETTY] = uburma_cmd_modify_jetty, + [UBURMA_CMD_QUERY_JETTY] = uburma_cmd_query_jetty, [UBURMA_CMD_DELETE_JETTY] = uburma_cmd_delete_jetty, [UBURMA_CMD_IMPORT_JETTY] = uburma_cmd_import_jetty, [UBURMA_CMD_UNIMPORT_JETTY] = uburma_cmd_unimport_jetty, @@ -1539,6 +1894,8 @@ static uburma_cmd_handler g_uburma_cmd_handlers[] = { [UBURMA_CMD_UNADVISE_JETTY] = uburma_cmd_unadvise_jetty, [UBURMA_CMD_BIND_JETTY] = uburma_cmd_bind_jetty, [UBURMA_CMD_UNBIND_JETTY] = uburma_cmd_unbind_jetty, + [UBURMA_CMD_CREATE_JETTY_GRP] = uburma_cmd_create_jetty_grp, + [UBURMA_CMD_DESTROY_JETTY_GRP] = uburma_cmd_delete_jetty_grp, [UBURMA_CMD_USER_CTL] = uburma_cmd_user_ctl }; diff --git a/drivers/ub/urma/uburma/uburma_cmd.h b/drivers/ub/urma/uburma/uburma_cmd.h index 631ba2dd103d..f110800bd0db 100644 --- a/drivers/ub/urma/uburma/uburma_cmd.h +++ b/drivers/ub/urma/uburma/uburma_cmd.h @@ -40,16 +40,19 @@ struct uburma_cmd_hdr { enum uburma_cmd { UBURMA_CMD_CREATE_CTX = 1, UBURMA_CMD_DESTROY_CTX, - UBURMA_CMD_ALLOC_KEY_ID, - UBURMA_CMD_FREE_KEY_ID, + UBURMA_CMD_ALLOC_TOKEN_ID, + UBURMA_CMD_FREE_TOKEN_ID, UBURMA_CMD_REGISTER_SEG, UBURMA_CMD_UNREGISTER_SEG, UBURMA_CMD_IMPORT_SEG, UBURMA_CMD_UNIMPORT_SEG, UBURMA_CMD_CREATE_JFS, + UBURMA_CMD_MODIFY_JFS, + UBURMA_CMD_QUERY_JFS, UBURMA_CMD_DELETE_JFS, UBURMA_CMD_CREATE_JFR, UBURMA_CMD_MODIFY_JFR, + UBURMA_CMD_QUERY_JFR, UBURMA_CMD_DELETE_JFR, UBURMA_CMD_CREATE_JFC, UBURMA_CMD_MODIFY_JFC, @@ -59,6 +62,7 @@ enum uburma_cmd { UBURMA_CMD_UNIMPORT_JFR, UBURMA_CMD_CREATE_JETTY, UBURMA_CMD_MODIFY_JETTY, + UBURMA_CMD_QUERY_JETTY, UBURMA_CMD_DELETE_JETTY, UBURMA_CMD_IMPORT_JETTY, UBURMA_CMD_UNIMPORT_JETTY, @@ -68,6 +72,8 @@ enum uburma_cmd { UBURMA_CMD_UNADVISE_JETTY, UBURMA_CMD_BIND_JETTY, UBURMA_CMD_UNBIND_JETTY, + UBURMA_CMD_CREATE_JETTY_GRP, + UBURMA_CMD_DESTROY_JETTY_GRP, UBURMA_CMD_USER_CTL }; @@ -80,7 +86,8 @@ struct uburma_cmd_udrv_priv { struct uburma_cmd_create_ctx { struct { - uint32_t uasid; + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t eid_index; } in; struct { int async_fd; @@ -88,17 +95,18 @@ struct uburma_cmd_create_ctx { struct uburma_cmd_udrv_priv udata; }; -struct uburma_cmd_alloc_key_id { +struct uburma_cmd_alloc_token_id { struct { - uint32_t key_id; - uint64_t handle; /* handle of the allocated key_id obj in kernel */ + uint32_t token_id; + uint64_t handle; /* handle of the allocated token_id obj in kernel */ } out; struct uburma_cmd_udrv_priv udata; }; -struct uburma_cmd_free_key_id { +struct uburma_cmd_free_token_id { struct { - uint64_t handle; /* handle of the allocated key_id obj in kernel */ + uint64_t handle; /* handle of the allocated token_id obj in kernel */ + uint32_t token_id; } in; }; @@ -106,13 +114,13 @@ struct uburma_cmd_register_seg { struct { uint64_t va; uint64_t len; - uint32_t key_id; - uint64_t keyid_handle; - uint32_t key; + uint32_t token_id; + uint64_t token_id_handle; + uint32_t token; uint32_t flag; } in; struct { - uint32_t key_id; + uint32_t token_id; uint64_t handle; /* handle of the allocated seg obj in kernel */ } out; struct uburma_cmd_udrv_priv udata; @@ -127,12 +135,11 @@ struct uburma_cmd_unregister_seg { struct uburma_cmd_import_seg { struct { uint8_t eid[UBCORE_EID_SIZE]; - uint32_t uasid; uint64_t va; uint64_t len; uint32_t flag; - uint32_t key; - uint32_t key_id; + uint32_t token; + uint32_t token_id; uint64_t mva; } in; struct { @@ -149,14 +156,14 @@ struct uburma_cmd_unimport_seg { struct uburma_cmd_create_jfr { struct { - uint32_t depth; /* in terms of WQEBB */ + uint32_t depth; uint32_t flag; uint32_t trans_mode; uint8_t max_sge; uint8_t min_rnr_timer; uint32_t jfc_id; uint64_t jfc_handle; - uint32_t key; + uint32_t token; uint32_t id; uint64_t urma_jfr; /* urma jfr pointer */ } in; @@ -174,10 +181,29 @@ struct uburma_cmd_modify_jfr { uint64_t handle; /* handle of jfr, used to find jfr obj in kernel */ uint32_t mask; /* see urma_jfr_attr_mask_t */ uint32_t rx_threshold; + uint32_t state; } in; struct uburma_cmd_udrv_priv udata; }; +struct unurma_cmd_query_jfr { + struct { + uint64_t handle; /* handle of the allocated jfr obj in kernel */ + } in; + struct { + uint32_t depth; + uint32_t flag; + uint32_t trans_mode; + uint8_t max_sge; + uint8_t min_rnr_timer; + uint32_t token; + uint32_t id; + + uint32_t rx_threshold; + uint32_t state; + } out; +}; + struct uburma_cmd_delete_jfr { struct { uint64_t handle; /* handle of jfr, used to find jfr obj in kernel */ @@ -189,7 +215,7 @@ struct uburma_cmd_delete_jfr { struct uburma_cmd_create_jfs { struct { - uint32_t depth; /* in terms of WQEBB */ + uint32_t depth; uint32_t flag; uint32_t trans_mode; uint8_t priority; @@ -214,6 +240,35 @@ struct uburma_cmd_create_jfs { struct uburma_cmd_udrv_priv udata; }; +struct uburma_cmd_modify_jfs { + struct { + uint64_t handle; /* handle of jfs, used to find jfs obj in kernel */ + uint32_t mask; /* see urma_jfs_attr_mask_t */ + uint32_t state; + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_query_jfs { + struct { + uint64_t handle; /* handle of the allocated jfs obj in kernel */ + } in; + struct { + uint32_t depth; + uint32_t flag; + uint32_t trans_mode; + uint8_t priority; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + + uint32_t state; + } out; +}; + struct uburma_cmd_delete_jfs { struct { uint64_t handle; /* handle of jfs, used to find jfs obj in kernel */ @@ -268,14 +323,12 @@ struct uburma_cmd_import_jfr { struct { /* correspond to urma_jfr_id */ uint8_t eid[UBCORE_EID_SIZE]; - uint32_t uasid; uint32_t id; - /* correspond to urma_key_t */ - uint32_t key; + /* correspond to urma_token_t */ + uint32_t token; uint32_t trans_mode; } in; struct { - uint8_t tp_type; /* TP or TPG */ uint32_t tpn; uint64_t handle; /* handle of the allocated tjfr obj in kernel */ } out; @@ -291,26 +344,35 @@ struct uburma_cmd_unimport_jfr { struct uburma_cmd_create_jetty { struct { uint32_t id; /* user may assign id */ + uint32_t jetty_flag; + uint32_t jfs_depth; - uint32_t jfr_depth; - uint32_t flag; + uint32_t jfs_flag; uint32_t trans_mode; - uint32_t send_jfc_id; - uint32_t recv_jfc_id; - uint32_t jfr_id; /* shared jfr */ + uint8_t priority; uint8_t max_send_sge; uint8_t max_send_rsge; - uint8_t max_recv_sge; uint32_t max_inline_data; - uint8_t priority; - uint8_t retry_cnt; uint8_t rnr_retry; uint8_t err_timeout; - uint8_t min_rnr_timer; - uint32_t key; + uint32_t send_jfc_id; uint64_t send_jfc_handle; /* handle of the related send jfc */ + + uint32_t jfr_depth; + uint32_t jfr_flag; + uint8_t max_recv_sge; + uint8_t min_rnr_timer; + + uint32_t recv_jfc_id; uint64_t recv_jfc_handle; /* handle of the related recv jfc */ + uint32_t token; + + uint32_t jfr_id; /* shared jfr */ uint64_t jfr_handle; /* handle of the shared jfr */ + + uint64_t jetty_grp_handle; /* handle of the jetty_grp */ + uint8_t is_jetty_grp; + uint64_t urma_jetty; /* urma jetty pointer */ } in; struct { @@ -331,10 +393,41 @@ struct uburma_cmd_modify_jetty { uint64_t handle; /* handle of jetty, used to find jetty obj in kernel */ uint32_t mask; /* see urma_jetty_attr_mask_t */ uint32_t rx_threshold; + uint32_t state; } in; struct uburma_cmd_udrv_priv udata; }; +struct uburma_cmd_query_jetty { + struct { + uint64_t handle; /* handle of the allocated jetty obj in kernel */ + } in; + struct { + uint32_t id; /* user may assign id */ + uint32_t jetty_flag; + + uint32_t jfs_depth; + uint32_t jfr_depth; + uint32_t jfs_flag; + uint32_t jfr_flag; + uint32_t trans_mode; + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint8_t max_recv_sge; + uint32_t max_inline_data; + uint8_t priority; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + uint8_t min_rnr_timer; + uint32_t jfr_id; + uint32_t token; + + uint32_t rx_threshold; + uint32_t state; + } out; +}; + struct uburma_cmd_delete_jetty { struct { uint64_t handle; /* handle of jetty, used to find jetty obj in kernel */ @@ -348,15 +441,15 @@ struct uburma_cmd_import_jetty { struct { /* correspond to urma_jetty_id */ uint8_t eid[UBCORE_EID_SIZE]; - uint32_t uasid; uint32_t id; uint32_t flag; - /* correspond to urma_key_t */ - uint32_t key; + /* correspond to urma_token_t */ + uint32_t token; uint32_t trans_mode; + uint32_t policy; + uint32_t type; } in; struct { - uint8_t tp_type; /* TP or TPG */ uint32_t tpn; uint64_t handle; /* handle of the allocated tjetty obj in kernel */ } out; @@ -384,6 +477,47 @@ struct uburma_cmd_unadvise_jetty { } in; }; +struct uburma_cmd_bind_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; + struct { + uint32_t tpn; + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unbind_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + } in; +}; + +struct uburma_cmd_create_jetty_grp { + struct { + char name[UBCORE_JETTY_GRP_MAX_NAME]; + uint32_t token; + uint32_t id; + uint32_t policy; + uint64_t urma_jetty_grp; /* urma jetty group pointer */ + } in; + struct { + uint32_t id; /* jetty group id allocated by ubcore */ + uint64_t handle; /* handle of the allocated jetty group obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_delete_jetty_grp { + struct { + uint64_t handle; /* handle of jetty group, used to find jetty group obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + struct uburma_cmd_user_ctl { struct { uint64_t addr; @@ -431,10 +565,12 @@ struct uburma_cmd_async_event { }; /* copy from user_space addr to kernel args */ -static inline int uburma_copy_from_user(void *args, const void *args_addr, unsigned long args_size) +static inline int uburma_copy_from_user(void *args, const void *args_addr, + unsigned long args_size) { - int ret = (int)copy_from_user(args, args_addr, args_size); + int ret; + ret = (int)copy_from_user(args, args_addr, args_size); if (ret != 0) { uburma_log_err("copy from user failed, ret:%d.\n", ret); return -EFAULT; @@ -443,10 +579,12 @@ static inline int uburma_copy_from_user(void *args, const void *args_addr, unsig } /* copy kernel args to user_space addr */ -static inline int uburma_copy_to_user(void *args_addr, const void *args, unsigned long args_size) +static inline int uburma_copy_to_user(void *args_addr, const void *args, + unsigned long args_size) { - int ret = (int)copy_to_user(args_addr, args, args_size); + int ret; + ret = (int)copy_to_user(args_addr, args, args_size); if (ret != 0) { uburma_log_err("copy to user failed ret:%d.\n", ret); return -EFAULT; diff --git a/drivers/ub/urma/uburma/uburma_dev_ops.c b/drivers/ub/urma/uburma/uburma_dev_ops.c index 625193d2a04a..c2c3c4e0adb0 100644 --- a/drivers/ub/urma/uburma/uburma_dev_ops.c +++ b/drivers/ub/urma/uburma/uburma_dev_ops.c @@ -96,7 +96,7 @@ int uburma_open(struct inode *inode, struct file *filp) srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); mutex_lock(&ubu_dev->lists_mutex); ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); - if (ubc_dev == NULL) { + if (ubc_dev == NULL || ubc_dev->dev_name == NULL) { uburma_log_err("can not find ubcore device.\n"); ret = EIO; goto err; diff --git a/drivers/ub/urma/uburma/uburma_event.c b/drivers/ub/urma/uburma/uburma_event.c index b8ef510968e8..15992c918840 100644 --- a/drivers/ub/urma/uburma/uburma_event.c +++ b/drivers/ub/urma/uburma/uburma_event.c @@ -144,10 +144,7 @@ static uint32_t uburma_read_jfe_event(struct uburma_jfe *jfe, uint32_t event_cnt uint32_t cnt = 0; spin_lock_irq(&jfe->lock); - if (jfe->deleting) { - spin_unlock_irq(&jfe->lock); - return 0; - } + list_for_each_safe(p, next, &jfe->event_list) { if (cnt == event_cnt) break; @@ -165,8 +162,7 @@ static uint32_t uburma_read_jfe_event(struct uburma_jfe *jfe, uint32_t event_cnt } static int uburma_wait_event_timeout(struct uburma_jfe *jfe, unsigned long max_timeout, - uint32_t max_event_cnt, uint32_t *event_cnt, - struct list_head *event_list) + uint32_t max_event_cnt, uint32_t *event_cnt, struct list_head *event_list) { long timeout = (long)max_timeout; @@ -201,16 +197,12 @@ static int uburma_wait_event(struct uburma_jfe *jfe, bool nonblock, uint32_t max int ret; *event_cnt = 0; - while (!jfe->deleting) { - asm volatile("" : : : "memory"); - *event_cnt = uburma_read_jfe_event(jfe, max_event_cnt, event_list); - /* Stop waiting once we have read at least one event */ - if (jfe->deleting) - return -EIO; - else if (nonblock && *event_cnt == 0) - return 0; - else if (*event_cnt > 0) - break; + spin_lock_irq(&jfe->lock); + while (list_empty(&jfe->event_list)) { + spin_unlock_irq(&jfe->lock); + if (nonblock) + return -EAGAIN; + /* The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ @@ -218,7 +210,16 @@ static int uburma_wait_event(struct uburma_jfe *jfe, bool nonblock, uint32_t max (!list_empty(&jfe->event_list) || jfe->deleting)); if (ret != 0) return ret; + + spin_lock_irq(&jfe->lock); + if (list_empty(&jfe->event_list) && jfe->deleting) { + spin_unlock_irq(&jfe->lock); + return -EIO; + } } + spin_unlock_irq(&jfe->lock); + *event_cnt = uburma_read_jfe_event(jfe, max_event_cnt, event_list); + return 0; } @@ -364,7 +365,7 @@ static int uburma_get_async_event(struct uburma_jfae_uobj *jfae, struct file *fi { struct uburma_cmd_async_event async_event = { 0 }; struct list_head event_list; - struct uburma_jfe_event *event; + struct uburma_jfe_event *event = NULL; uint32_t event_cnt; int ret; @@ -377,6 +378,9 @@ static int uburma_get_async_event(struct uburma_jfae_uobj *jfae, struct file *fi return ret; event = list_first_entry(&event_list, struct uburma_jfe_event, node); + if (event == NULL) + return -EIO; + uburma_set_async_event(&async_event, event); list_del(&event->node); kfree(event); @@ -428,14 +432,12 @@ static void uburma_async_event_callback(struct ubcore_event *event, uburma_write_event(&jfae->jfe, event->element.port_id, event->event_type, NULL, NULL); } - static inline void uburma_init_jfae_handler(struct ubcore_event_handler *handler) { INIT_LIST_HEAD(&handler->node); handler->event_callback = uburma_async_event_callback; } - void uburma_init_jfae(struct uburma_jfae_uobj *jfae, struct ubcore_device *ubc_dev) { uburma_init_jfe(&jfae->jfe); diff --git a/drivers/ub/urma/uburma/uburma_main.c b/drivers/ub/urma/uburma/uburma_main.c index ce013da5e8ab..1aca6e120c8e 100644 --- a/drivers/ub/urma/uburma/uburma_main.c +++ b/drivers/ub/urma/uburma/uburma_main.c @@ -18,7 +18,6 @@ * History: 2021-08-03: Create file */ -#include #include #include #include @@ -47,7 +46,46 @@ static DECLARE_BITMAP(g_dev_bitmap, UBURMA_MAX_DEVICE); static dev_t g_dynamic_uburma_dev; -static struct class *g_uburma_class; + +static const void *uburma_net_namespace(struct device *dev) +{ + struct uburma_logic_device *ldev = dev_get_drvdata(dev); + struct uburma_device *ubu_dev; + struct ubcore_device *ubc_dev; + + if (ldev == NULL || ldev->ubu_dev == NULL || ldev->ubu_dev->ubc_dev == NULL) { + uburma_log_info("init net %p", ldev); + return &init_net; + } + + ubu_dev = ldev->ubu_dev; + ubc_dev = ubu_dev->ubc_dev; + + if (ubc_dev->transport_type == UBCORE_TRANSPORT_UB) { + return read_pnet(&ldev->net); + } else if (ubc_dev->transport_type == UBCORE_TRANSPORT_IP) { + if (ubc_dev->netdev) + return dev_net(ubc_dev->netdev); + else + return &init_net; + } else { /* URMA IB device not support namespace yet */ + return &init_net; + } +} +static char *uburma_devnode(struct device *dev, umode_t *mode) +{ + if (mode) + *mode = UBURMA_DEVNODE_MODE; + + return kasprintf(GFP_KERNEL, "uburma/%s", dev_name(dev)); +} + +static struct class g_uburma_class = { + .name = UBURMA_MODULE_NAME, + .devnode = uburma_devnode, + .ns_type = &net_ns_type_operations, + .namespace = uburma_net_namespace +}; static const struct file_operations g_uburma_fops = { .owner = THIS_MODULE, @@ -60,6 +98,19 @@ static const struct file_operations g_uburma_fops = { .compat_ioctl = uburma_ioctl, }; +static LIST_HEAD(g_uburma_device_list); +static DECLARE_RWSEM(g_uburma_device_rwsem); + +static unsigned int g_uburma_net_id; +static LIST_HEAD(g_uburma_net_list); +static DEFINE_SPINLOCK(g_uburma_net_lock); +static DECLARE_RWSEM(g_uburma_net_rwsem); + +struct uburma_net { + possible_net_t net; + struct list_head node; +}; + static int uburma_add_device(struct ubcore_device *ubc_dev); static void uburma_remove_device(struct ubcore_device *ubc_dev, void *client_ctx); static struct ubcore_client g_urma_client = { @@ -83,8 +134,9 @@ static struct kobj_type uburma_dev_ktype = { static int uburma_get_devt(dev_t *devt) { - unsigned int devnum = (unsigned int)find_first_zero_bit(g_dev_bitmap, UBURMA_MAX_DEVICE); + unsigned int devnum; + devnum = (unsigned int)find_first_zero_bit(g_dev_bitmap, UBURMA_MAX_DEVICE); if (devnum >= UBURMA_MAX_DEVICE) { uburma_log_err("Invalid argument.\n"); return -ENOMEM; @@ -94,63 +146,263 @@ static int uburma_get_devt(dev_t *devt) return 0; } -static int uburma_device_create(struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) +static int uburma_create_eid_table(struct uburma_logic_device *ldev, struct ubcore_device *ubc_dev) { - uint8_t i, j, k; + struct uburma_eid *eid_list; - /* create /dev/uburma/dev_name> */ - ubu_dev->dev = device_create(g_uburma_class, ubc_dev->dev.parent, ubu_dev->cdev.dev, - ubu_dev, "%s", ubc_dev->dev_name); - if (IS_ERR(ubu_dev->dev)) { - uburma_log_err("device create failed, device:%s.\n", ubc_dev->dev_name); + eid_list = kcalloc(1, ubc_dev->attr.max_eid_cnt * sizeof(struct uburma_eid), GFP_ATOMIC); + if (eid_list == NULL) return -ENOMEM; + + ldev->eid = eid_list; + return 0; +} + +static void uburma_destroy_eid_table(struct uburma_logic_device *ldev) +{ + if (ldev->eid != NULL) { + kfree(ldev->eid); + ldev->eid = NULL; } +} + +static int uburma_fill_logic_device_attr(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) +{ + uint32_t e1, e2; /* eid */ + uint16_t f1, f2; /* fe */ + uint8_t p1, p2; /* port */ - if (uburma_create_dev_attr_files(ubu_dev) != 0) { + if (uburma_create_dev_attr_files(ldev) != 0) { uburma_log_err("failed to fill attributes, device:%s.\n", ubc_dev->dev_name); - goto destroy_dev; + return -EPERM; } - /* create /dev/uburma/dev_name>/port* */ - for (i = 0; i < ubc_dev->attr.port_cnt; i++) { - if (uburma_create_port_attr_files(ubu_dev, i) != 0) + /* create /sys/class/uburma/dev_name>/port* */ + for (p1 = 0; p1 < ubc_dev->attr.port_cnt; p1++) { + if (uburma_create_port_attr_files(ldev, ubu_dev, p1) != 0) goto err_port_attr; } - /* create /dev/uburma/dev_name>/vf* */ - for (k = 0; k < ubc_dev->attr.vf_cnt; k++) { - if (uburma_create_vf_attr_files(ubu_dev, k) != 0) - goto err_vf_attr; + /* create /sys/class/uburma/dev_name>/fe* */ + for (f1 = 0; f1 < ubc_dev->attr.fe_cnt; f1++) { + if (uburma_create_fe_attr_files(ldev, ubu_dev, f1) != 0) + goto err_fe_attr; } + /* create /sys/class/uburma/dev_name>/eid* */ + if (uburma_create_eid_table(ldev, ubc_dev) != 0) + goto err_fe_attr; + + for (e1 = 0; e1 < ubc_dev->attr.max_eid_cnt; e1++) { + if (uburma_create_eid_attr_files(ldev, ubu_dev, e1) != 0) + goto err_eid_attr; + } return 0; -err_vf_attr: - for (j = 0; j < k; j++) - uburma_remove_vf_attr_files(ubu_dev, j); +err_eid_attr: + for (e2 = 0; e2 < e1; e2++) + uburma_remove_eid_attr_files(ldev, e2); + + uburma_destroy_eid_table(ldev); +err_fe_attr: + for (f2 = 0; f2 < f1; f2++) + uburma_remove_fe_attr_files(ldev, f2); err_port_attr: - for (j = 0; j < i; j++) - uburma_remove_port_attr_files(ubu_dev, j); + for (p2 = 0; p2 < p1; p2++) + uburma_remove_port_attr_files(ldev, p2); - uburma_remove_dev_attr_files(ubu_dev); -destroy_dev: - device_destroy(g_uburma_class, ubu_dev->cdev.dev); + uburma_remove_dev_attr_files(ldev); return -EPERM; } -static void uburma_device_destroy(struct uburma_device *ubu_dev, - const struct ubcore_device *ubc_dev) + +static void uburma_unfill_logic_device_attr(struct uburma_logic_device *ldev, + struct ubcore_device *ubc_dev) +{ + uint32_t e; + uint16_t f; + uint8_t p; + + for (e = 0; e < ubc_dev->attr.max_eid_cnt; e++) + uburma_remove_eid_attr_files(ldev, e); + + uburma_destroy_eid_table(ldev); + + for (f = 0; f < ubc_dev->attr.fe_cnt; f++) + uburma_remove_fe_attr_files(ldev, f); + + for (p = 0; p < ubc_dev->attr.port_cnt; p++) + uburma_remove_port_attr_files(ldev, p); + + uburma_remove_dev_attr_files(ldev); +} + +static int uburma_device_create(struct uburma_logic_device *ldev, + struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev, struct net *net) +{ + /* create /sys/class/uburma/dev_name> */ + write_pnet(&ldev->net, net); + ldev->ubu_dev = ubu_dev; + + /* Two devices have same char device devt will cause duplicate file name + * error in sysfs_create_link, although they are in different namespaces + */ + if (net_eq(net, &init_net)) + ldev->dev = device_create(&g_uburma_class, ubc_dev->dev.parent, + ubu_dev->cdev.dev, ldev, "%s", ubc_dev->dev_name); + else + ldev->dev = device_create(&g_uburma_class, ubc_dev->dev.parent, + MKDEV(0, 0), ldev, "%s", ubc_dev->dev_name); + if (IS_ERR(ldev->dev)) { + uburma_log_err("device create failed, device:%s.\n", ubc_dev->dev_name); + return -ENOMEM; + } + + if (uburma_fill_logic_device_attr(ldev, ubu_dev, ubc_dev) != 0) { + device_unregister(ldev->dev); + ldev->dev = NULL; + uburma_log_err("failed to fill attributes, device:%s.\n", ubc_dev->dev_name); + return -EPERM; + } + + return 0; +} + +static void uburma_device_destroy(struct uburma_logic_device *ldev, struct ubcore_device *ubc_dev) { - uint8_t i; + uburma_unfill_logic_device_attr(ldev, ubc_dev); + device_unregister(ldev->dev); + ldev->dev = NULL; +} + +static void uburma_remove_one_logic_device(struct uburma_device *ubu_dev, struct net *net) +{ + struct uburma_logic_device *ldev, *tmp; + struct ubcore_device *ubc_dev; + int srcu_idx; + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (!ubc_dev) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return; + } + + if (ubc_dev->transport_type != UBCORE_TRANSPORT_UB) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return; + } + + mutex_lock(&ubu_dev->ldev_mutex); + list_for_each_entry_safe(ldev, tmp, &ubu_dev->ldev_list, node) { + if (net_eq(read_pnet(&ldev->net), net)) { + uburma_device_destroy(ldev, ubc_dev); + list_del(&ldev->node); + kfree(ldev); + break; + } + } + mutex_unlock(&ubu_dev->ldev_mutex); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); +} - for (i = 0; i < ubc_dev->attr.vf_cnt; i++) - uburma_remove_vf_attr_files(ubu_dev, i); +static void uburma_remove_logic_devices(struct uburma_device *ubu_dev, + struct ubcore_device *ubc_dev) +{ + struct uburma_logic_device *ldev, *tmp; - for (i = 0; i < ubc_dev->attr.port_cnt; i++) - uburma_remove_port_attr_files(ubu_dev, i); + if (ubc_dev->transport_type != UBCORE_TRANSPORT_UB) + return; - uburma_remove_dev_attr_files(ubu_dev); - device_destroy(g_uburma_class, ubu_dev->cdev.dev); + mutex_lock(&ubu_dev->ldev_mutex); + list_for_each_entry_safe(ldev, tmp, &ubu_dev->ldev_list, node) { + uburma_device_destroy(ldev, ubc_dev); + list_del(&ldev->node); + kfree(ldev); + } + mutex_unlock(&ubu_dev->ldev_mutex); +} + +static int uburma_create_one_logic_device(struct uburma_device *ubu_dev, + struct ubcore_device *ubc_dev, struct net *net) +{ + struct uburma_logic_device *ldev; + int ret; + + mutex_lock(&ubu_dev->ldev_mutex); + list_for_each_entry(ldev, &ubu_dev->ldev_list, node) { + if (net_eq(read_pnet(&ubu_dev->ldev.net), net)) { + mutex_unlock(&ubu_dev->ldev_mutex); + return 0; + } + } + + ldev = kzalloc(sizeof(struct uburma_logic_device), GFP_KERNEL); + if (ldev == NULL) { + mutex_unlock(&ubu_dev->ldev_mutex); + return -ENOMEM; + } + + ret = uburma_device_create(ldev, ubu_dev, ubc_dev, net); + if (ret) { + kfree(ldev); + mutex_unlock(&ubu_dev->ldev_mutex); + uburma_log_err("add device failed %s in net %u", ubc_dev->dev_name, net->ns.inum); + return ret; + } + + list_add_tail(&ldev->node, &ubu_dev->ldev_list); + mutex_unlock(&ubu_dev->ldev_mutex); + uburma_log_info("add device %s in net %u", ubc_dev->dev_name, net->ns.inum); + return 0; +} + +static int uburma_add_one_logic_device(struct uburma_device *ubu_dev, struct net *net) +{ + struct ubcore_device *ubc_dev; + int srcu_idx; + int ret; + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (!ubc_dev) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return 0; + } + + if (ubc_dev->transport_type != UBCORE_TRANSPORT_UB) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return 0; + } + + ret = uburma_create_one_logic_device(ubu_dev, ubc_dev, net); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return ret; +} + +static int uburma_copy_logic_devices(struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) +{ + struct uburma_net *unet; + int ret = 0; + + if (ubc_dev->transport_type != UBCORE_TRANSPORT_UB) + return 0; + + down_read(&g_uburma_net_rwsem); + list_for_each_entry(unet, &g_uburma_net_list, node) { + if (net_eq(read_pnet(&unet->net), read_pnet(&ubu_dev->ldev.net))) + continue; + ret = uburma_create_one_logic_device(ubu_dev, ubc_dev, read_pnet(&unet->net)); + if (ret != 0) + break; + } + up_read(&g_uburma_net_rwsem); + + if (ret) + uburma_remove_logic_devices(ubu_dev, ubc_dev); + + return ret; } static int uburma_cdev_create(struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) @@ -169,16 +421,23 @@ static int uburma_cdev_create(struct uburma_device *ubu_dev, struct ubcore_devic ubu_dev->cdev.kobj.parent = &ubu_dev->kobj; (void)kobject_set_name(&ubu_dev->cdev.kobj, "%s", ubc_dev->dev_name); - /* create /sys/class/uburma/dev_name> */ + /* create /dev/uburma/dev_name> */ if (cdev_add(&ubu_dev->cdev, base, 1)) goto free_bit; - if (uburma_device_create(ubu_dev, ubc_dev) != 0) { + if (uburma_device_create(&ubu_dev->ldev, ubu_dev, ubc_dev, &init_net) != 0) { uburma_log_err("device create failed, device:%s.\n", ubc_dev->dev_name); goto del_cdev; } + + if (uburma_copy_logic_devices(ubu_dev, ubc_dev) != 0) { + uburma_log_err("copy logic device failed, device:%s.\n", ubc_dev->dev_name); + goto destroy_device; + } return 0; +destroy_device: + uburma_device_destroy(&ubu_dev->ldev, ubc_dev); del_cdev: cdev_del(&ubu_dev->cdev); free_bit: @@ -186,6 +445,20 @@ static int uburma_cdev_create(struct uburma_device *ubu_dev, struct ubcore_devic return -EPERM; } +static void uburma_list_add_device(struct uburma_device *ubu_dev) +{ + down_write(&g_uburma_device_rwsem); + list_add_tail(&ubu_dev->node, &g_uburma_device_list); + up_write(&g_uburma_device_rwsem); +} + +static void uburma_list_remove_device(struct uburma_device *ubu_dev) +{ + down_write(&g_uburma_device_rwsem); + list_del_init(&ubu_dev->node); + up_write(&g_uburma_device_rwsem); +} + static int uburma_add_device(struct ubcore_device *ubc_dev) { struct uburma_device *ubu_dev; @@ -212,8 +485,10 @@ static int uburma_add_device(struct ubcore_device *ubc_dev) mutex_init(&ubu_dev->lists_mutex); INIT_LIST_HEAD(&ubu_dev->uburma_file_list); + mutex_init(&ubu_dev->ldev_mutex); + INIT_LIST_HEAD(&ubu_dev->ldev_list); + rcu_assign_pointer(ubu_dev->ubc_dev, ubc_dev); - ubu_dev->num_comp_vectors = ubc_dev->num_comp_vectors; if (uburma_cdev_create(ubu_dev, ubc_dev) != 0) { uburma_log_err("can not create cdev.\n"); @@ -221,6 +496,7 @@ static int uburma_add_device(struct ubcore_device *ubc_dev) } ubcore_set_client_ctx_data(ubc_dev, &g_urma_client, ubu_dev); + uburma_list_add_device(ubu_dev); return 0; err: @@ -271,7 +547,9 @@ static void uburma_remove_device(struct ubcore_device *ubc_dev, void *client_ctx if (ubu_dev == NULL) return; - uburma_device_destroy(ubu_dev, ubc_dev); + uburma_list_remove_device(ubu_dev); + uburma_remove_logic_devices(ubu_dev, ubc_dev); + uburma_device_destroy(&ubu_dev->ldev, ubc_dev); cdev_del(&ubu_dev->cdev); clear_bit(ubu_dev->devnum, g_dev_bitmap); @@ -302,30 +580,6 @@ static void uburma_unregister_client(void) uburma_log_info("unregister client succeed.\n"); } -static char *uburma_devnode(struct device *dev, umode_t *mode) -{ - if (mode) - *mode = UBURMA_DEVNODE_MODE; - - return kasprintf(GFP_KERNEL, "uburma/%s", dev_name(dev)); -} - -static const void *uburma_net_namespace(struct device *dev) -{ - struct uburma_device *ubu_dev = dev_get_drvdata(dev); - struct ubcore_device *ubc_dev; - - if (ubu_dev == NULL) - return &init_net; - - ubc_dev = ubu_dev->ubc_dev; - - if (ubc_dev->netdev) - return dev_net(ubc_dev->netdev); - else - return &init_net; -} - static int uburma_class_create(void) { int ret; @@ -338,15 +592,12 @@ static int uburma_class_create(void) } /* create /sys/class/uburma */ - g_uburma_class = class_create(THIS_MODULE, UBURMA_MODULE_NAME); - if (IS_ERR(g_uburma_class)) { - ret = (int)PTR_ERR(g_uburma_class); + ret = class_register(&g_uburma_class); + if (ret) { uburma_log_err("couldn't create class %s.\n", UBURMA_MODULE_NAME); goto out_chrdev; } - g_uburma_class->devnode = uburma_devnode; - g_uburma_class->ns_type = &net_ns_type_operations; - g_uburma_class->namespace = uburma_net_namespace; + /* * to do class_create_file */ @@ -360,10 +611,82 @@ static int uburma_class_create(void) static void uburma_class_destroy(void) { - class_destroy(g_uburma_class); + class_unregister(&g_uburma_class); unregister_chrdev_region(g_dynamic_uburma_dev, UBURMA_DYNAMIC_MINOR_NUM); } +static void uburma_net_exit(struct net *net) +{ + struct uburma_net *unet = net_generic(net, g_uburma_net_id); + struct uburma_device *ubu_dev; + unsigned long flags; + + if (unet == NULL) + return; + + uburma_log_info("net exit %u", net->ns.inum); + down_write(&g_uburma_net_rwsem); + spin_lock_irqsave(&g_uburma_net_lock, flags); + if (list_empty(&unet->node)) { + spin_unlock_irqrestore(&g_uburma_net_lock, flags); + up_write(&g_uburma_net_rwsem); + return; + } + list_del_init(&unet->node); + spin_unlock_irqrestore(&g_uburma_net_lock, flags); + up_write(&g_uburma_net_rwsem); + + down_read(&g_uburma_device_rwsem); + list_for_each_entry(ubu_dev, &g_uburma_device_list, node) { + uburma_remove_one_logic_device(ubu_dev, net); + } + up_read(&g_uburma_device_rwsem); +} + +static int uburma_net_init(struct net *net) +{ + struct uburma_net *unet = net_generic(net, g_uburma_net_id); + struct uburma_device *ubu_dev; + unsigned long flags; + int ret = 0; + + if (unet == NULL) + return 0; + + uburma_log_info("net init %u", net->ns.inum); + write_pnet(&unet->net, net); + if (net_eq(net, &init_net)) { + INIT_LIST_HEAD(&unet->node); + return 0; + } + + spin_lock_irqsave(&g_uburma_net_lock, flags); + list_add_tail(&unet->node, &g_uburma_net_list); + spin_unlock_irqrestore(&g_uburma_net_lock, flags); + + down_read(&g_uburma_device_rwsem); + list_for_each_entry(ubu_dev, &g_uburma_device_list, node) { + down_read(&g_uburma_net_rwsem); + ret = uburma_add_one_logic_device(ubu_dev, net); + up_read(&g_uburma_net_rwsem); + if (ret) + break; + } + up_read(&g_uburma_device_rwsem); + if (ret) + uburma_net_exit(net); + + /* return ret will cause error starting a container */ + return 0; +} + +static struct pernet_operations g_uburma_net_ops = { + .init = uburma_net_init, + .exit = uburma_net_exit, + .id = &g_uburma_net_id, + .size = sizeof(struct uburma_net) +}; + static int __init uburma_init(void) { int ret; @@ -375,12 +698,21 @@ static int __init uburma_init(void) } uburma_register_client(); + + ret = register_pernet_device(&g_uburma_net_ops); + if (ret != 0) { + uburma_unregister_client(); + uburma_class_destroy(); + uburma_log_err("register_pernet_device failed"); + return ret; + } uburma_log_info("uburma module init success.\n"); return 0; } static void __exit uburma_exit(void) { + unregister_pernet_device(&g_uburma_net_ops); uburma_unregister_client(); uburma_class_destroy(); uburma_log_info("uburma module exits.\n"); diff --git a/drivers/ub/urma/uburma/uburma_types.h b/drivers/ub/urma/uburma/uburma_types.h index 4cbaa5c16776..130100a4bae4 100644 --- a/drivers/ub/urma/uburma/uburma_types.h +++ b/drivers/ub/urma/uburma/uburma_types.h @@ -21,6 +21,8 @@ #ifndef UBURMA_TYPES_H #define UBURMA_TYPES_H +#include +#include #include #include #include @@ -65,29 +67,44 @@ struct uburma_port { uint8_t port_num; }; -struct uburma_vf { +struct uburma_fe { struct kobject kobj; struct uburma_device *ubu_dev; - uint32_t vf_idx; + uint16_t fe_idx; }; +struct uburma_eid { + struct kobject kobj; + struct uburma_device *ubu_dev; + uint32_t eid_idx; +}; + +struct uburma_logic_device { + struct device *dev; + struct uburma_port port[UBCORE_MAX_PORT_CNT]; + struct uburma_fe fe[UBCORE_MAX_FE_CNT]; + struct uburma_eid *eid; + struct list_head node; /* add to ldev list */ + possible_net_t net; + struct uburma_device *ubu_dev; +}; struct uburma_device { - atomic_t refcnt; + atomic_t refcnt; struct completion comp; /* When refcnt becomes 0, it will wake up */ atomic_t cmdcnt; /* number of unfinished ioctl and mmap cmds */ struct completion cmddone; /* When cmdcnt becomes 0, cmddone will wake up */ - int num_comp_vectors; unsigned int devnum; struct cdev cdev; - struct device *dev; - struct uburma_port port[UBCORE_MAX_PORT_CNT]; - struct uburma_vf vf[UBCORE_MAX_VF_CNT]; + struct uburma_logic_device ldev; struct ubcore_device *__rcu ubc_dev; - struct srcu_struct ubc_dev_srcu; /* protect ubc_dev */ - struct kobject kobj; /* when equal to 0 , free uburma_device. */ - struct mutex lists_mutex; /* protect lists */ + struct srcu_struct ubc_dev_srcu; /* protect ubc_dev */ + struct kobject kobj; /* when equal to 0 , free uburma_device. */ + struct mutex lists_mutex; /* protect lists */ struct list_head uburma_file_list; + struct list_head node; /* add to uburma_device_list */ + struct mutex ldev_mutex; + struct list_head ldev_list; }; #endif /* UBURMA_TYPES_H */ diff --git a/drivers/ub/urma/uburma/uburma_uobj.c b/drivers/ub/urma/uburma/uburma_uobj.c index 72bf6ead5d77..3536274f6d56 100644 --- a/drivers/ub/urma/uburma/uburma_uobj.c +++ b/drivers/ub/urma/uburma/uburma_uobj.c @@ -32,6 +32,8 @@ #include "uburma_event.h" #include "uburma_uobj.h" +static bool g_is_zero_fd; + static void uobj_free(struct kref *ref) { kfree_rcu(container_of(ref, struct uburma_uobj, ref), rcu); @@ -117,8 +119,8 @@ static int uobj_alloc_idr(struct uburma_uobj *uobj) spin_lock(&uobj->ufile->idr_lock); /* Alloc idr pointing to NULL. Will replace it once we commit. */ - ret = idr_alloc(&uobj->ufile->idr, NULL, 0, min_t(unsigned long, U32_MAX - 1U, INT_MAX), - GFP_NOWAIT); + ret = idr_alloc(&uobj->ufile->idr, NULL, 1, + min_t(unsigned long, U32_MAX - 1U, INT_MAX), GFP_NOWAIT); if (ret >= 0) uobj->id = ret; @@ -277,6 +279,13 @@ static struct uburma_uobj *uobj_fd_alloc_begin(const struct uobj_type *type, if (new_fd < 0) return ERR_PTR(new_fd); + if (new_fd == 0) { + new_fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (new_fd < 0) + return ERR_PTR(new_fd); + g_is_zero_fd = true; + } + uobj = alloc_uobj(ufile, type); if (IS_ERR(uobj)) { put_unused_fd(new_fd); @@ -320,8 +329,7 @@ static void uobj_fd_alloc_abort(struct uburma_uobj *uobj) } static struct uburma_uobj *uobj_fd_lookup_get(const struct uobj_type *type, - struct uburma_file *ufile, int id, - enum uobj_access flag) + struct uburma_file *ufile, int id, enum uobj_access flag) { const struct uobj_fd_type *fd_type = container_of(type, struct uobj_fd_type, type); struct uburma_uobj *uobj; @@ -376,8 +384,8 @@ static int __must_check uobj_fd_remove_commit(struct uburma_uobj *uobj, return ret; } -struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, struct uburma_file *ufile, int id, - enum uobj_access flag) +struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, int id, enum uobj_access flag) { struct ubcore_device *ubc_dev; struct uburma_uobj *uobj; @@ -452,6 +460,7 @@ int __must_check uobj_remove_commit(struct uburma_uobj *uobj) void uburma_init_uobj_context(struct uburma_file *ufile) { + g_is_zero_fd = false; ufile->cleanup_reason = 0; idr_init(&ufile->idr); spin_lock_init(&ufile->idr_lock); @@ -499,13 +508,16 @@ void uburma_cleanup_uobjs(struct uburma_file *ufile, enum uburma_remove_reason w mutex_unlock(&ufile->uobjects_lock); cur_order = next_order; } - + if (g_is_zero_fd == true) { + put_unused_fd(0); + g_is_zero_fd = false; + } up_write(&ufile->cleanup_rwsem); } -static int uburma_free_key(struct uburma_uobj *uobj, enum uburma_remove_reason why) +static int uburma_free_token_id(struct uburma_uobj *uobj, enum uburma_remove_reason why) { - return ubcore_free_key_id((struct ubcore_key_id *)uobj->object); + return ubcore_free_token_id((struct ubcore_token_id *)uobj->object); } static int uburma_free_seg(struct uburma_uobj *uobj, enum uburma_remove_reason why) @@ -518,8 +530,9 @@ static int uburma_free_jfc(struct uburma_uobj *uobj, enum uburma_remove_reason w struct uburma_jfc_uobj *jfc_uobj = container_of(uobj, struct uburma_jfc_uobj, uobj); struct ubcore_jfc *jfc = (struct ubcore_jfc *)uobj->object; struct uburma_jfce_uobj *jfce_uobj; - int ret = ubcore_delete_jfc(jfc); + int ret; + ret = ubcore_delete_jfc(jfc); if (ret) return ret; @@ -536,8 +549,9 @@ static int uburma_free_jfc(struct uburma_uobj *uobj, enum uburma_remove_reason w static int uburma_free_jfs(struct uburma_uobj *uobj, enum uburma_remove_reason why) { struct uburma_jfs_uobj *jfs_uobj = container_of(uobj, struct uburma_jfs_uobj, uobj); - int ret = ubcore_delete_jfs((struct ubcore_jfs *)uobj->object); + int ret; + ret = ubcore_delete_jfs((struct ubcore_jfs *)uobj->object); if (ret) return ret; @@ -548,8 +562,9 @@ static int uburma_free_jfs(struct uburma_uobj *uobj, enum uburma_remove_reason w static int uburma_free_jfr(struct uburma_uobj *uobj, enum uburma_remove_reason why) { struct uburma_jfr_uobj *jfr_uobj = container_of(uobj, struct uburma_jfr_uobj, uobj); - int ret = ubcore_delete_jfr((struct ubcore_jfr *)uobj->object); + int ret; + ret = ubcore_delete_jfr((struct ubcore_jfr *)uobj->object); if (ret) return ret; @@ -560,8 +575,9 @@ static int uburma_free_jfr(struct uburma_uobj *uobj, enum uburma_remove_reason w static int uburma_free_jetty(struct uburma_uobj *uobj, enum uburma_remove_reason why) { struct uburma_jetty_uobj *jetty_uobj = container_of(uobj, struct uburma_jetty_uobj, uobj); - int ret = ubcore_delete_jetty((struct ubcore_jetty *)uobj->object); + int ret; + ret = ubcore_delete_jetty((struct ubcore_jetty *)uobj->object); if (ret) return ret; @@ -569,6 +585,20 @@ static int uburma_free_jetty(struct uburma_uobj *uobj, enum uburma_remove_reason return ret; } +static int uburma_free_jetty_grp(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_jetty_grp_uobj *jetty_grp_uobj = + container_of(uobj, struct uburma_jetty_grp_uobj, uobj); + int ret; + + ret = ubcore_delete_jetty_grp((struct ubcore_jetty_group *)uobj->object); + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, &jetty_grp_uobj->async_event_list); + return ret; +} + static int uburma_free_tjfr(struct uburma_uobj *uobj, enum uburma_remove_reason why) { return ubcore_unimport_jfr((struct ubcore_tjetty *)uobj->object); @@ -641,14 +671,16 @@ static int uburma_hot_unplug_jfae(struct uburma_uobj *uobj, enum uburma_remove_r spin_unlock_irq(&jfe->lock); return 0; } - jfe->deleting = true; spin_unlock_irq(&jfe->lock); - ubcore_unregister_event_handler(jfae->dev, &jfae->event_handler); - if (why == UBURMA_REMOVE_DRIVER_REMOVE) uburma_write_event(&jfae->jfe, 0, UBCORE_EVENT_DEV_FATAL, NULL, NULL); + spin_lock_irq(&jfe->lock); + jfe->deleting = true; + ubcore_unregister_event_handler(jfae->dev, &jfae->event_handler); + spin_unlock_irq(&jfe->lock); + return 0; } @@ -676,24 +708,26 @@ declare_uobj_class(UOBJ_CLASS_JFCE, &uburma_jfce_fops, "[jfce]", O_RDWR | O_CLOEXEC)); declare_uobj_class(UOBJ_CLASS_JFAE, - &uobj_type_alloc_fd(3, sizeof(struct uburma_jfae_uobj), uburma_hot_unplug_jfae, - &uburma_jfae_fops, "[jfae]", O_RDWR | O_CLOEXEC)); - -declare_uobj_class(UOBJ_CLASS_JFC, - &uobj_type_alloc_idr(sizeof(struct uburma_jfc_uobj), 2, uburma_free_jfc)); -declare_uobj_class(UOBJ_CLASS_KEY, - &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, uburma_free_key)); -declare_uobj_class(UOBJ_CLASS_SEG, - &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, uburma_free_seg)); -declare_uobj_class(UOBJ_CLASS_JFS, - &uobj_type_alloc_idr(sizeof(struct uburma_jfs_uobj), 1, uburma_free_jfs)); -declare_uobj_class(UOBJ_CLASS_JFR, - &uobj_type_alloc_idr(sizeof(struct uburma_jfr_uobj), 1, uburma_free_jfr)); -declare_uobj_class(UOBJ_CLASS_JETTY, - &uobj_type_alloc_idr(sizeof(struct uburma_jetty_uobj), 1, uburma_free_jetty)); -declare_uobj_class(UOBJ_CLASS_TARGET_JFR, - &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, uburma_free_tjfr)); -declare_uobj_class(UOBJ_CLASS_TARGET_JETTY, - &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, uburma_free_tjetty)); -declare_uobj_class(UOBJ_CLASS_TARGET_SEG, - &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, uburma_free_tseg)); + &uobj_type_alloc_fd(3, sizeof(struct uburma_jfae_uobj), uburma_hot_unplug_jfae, + &uburma_jfae_fops, "[jfae]", O_RDWR | O_CLOEXEC)); + +declare_uobj_class(UOBJ_CLASS_JFC, &uobj_type_alloc_idr(sizeof(struct uburma_jfc_uobj), 2, + uburma_free_jfc)); +declare_uobj_class(UOBJ_CLASS_TOKEN, &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, + uburma_free_token_id)); +declare_uobj_class(UOBJ_CLASS_SEG, &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, + uburma_free_seg)); +declare_uobj_class(UOBJ_CLASS_JFS, &uobj_type_alloc_idr(sizeof(struct uburma_jfs_uobj), 1, + uburma_free_jfs)); +declare_uobj_class(UOBJ_CLASS_JFR, &uobj_type_alloc_idr(sizeof(struct uburma_jfr_uobj), 1, + uburma_free_jfr)); +declare_uobj_class(UOBJ_CLASS_JETTY, &uobj_type_alloc_idr(sizeof(struct uburma_jetty_uobj), 1, + uburma_free_jetty)); +declare_uobj_class(UOBJ_CLASS_JETTY_GRP, &uobj_type_alloc_idr( + sizeof(struct uburma_jetty_grp_uobj), 1, uburma_free_jetty_grp)); +declare_uobj_class(UOBJ_CLASS_TARGET_JFR, &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, + uburma_free_tjfr)); +declare_uobj_class(UOBJ_CLASS_TARGET_JETTY, &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, + uburma_free_tjetty)); +declare_uobj_class(UOBJ_CLASS_TARGET_SEG, &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, + uburma_free_tseg)); diff --git a/drivers/ub/urma/uburma/uburma_uobj.h b/drivers/ub/urma/uburma/uburma_uobj.h index 5a92c0025f2c..6565489036f0 100644 --- a/drivers/ub/urma/uburma/uburma_uobj.h +++ b/drivers/ub/urma/uburma/uburma_uobj.h @@ -25,7 +25,7 @@ enum UOBJ_CLASS_ID { UOBJ_CLASS_ROOT, /* used by framework */ - UOBJ_CLASS_KEY, + UOBJ_CLASS_TOKEN, UOBJ_CLASS_SEG, UOBJ_CLASS_TARGET_SEG, UOBJ_CLASS_JFR, @@ -35,7 +35,8 @@ enum UOBJ_CLASS_ID { UOBJ_CLASS_JFAE, UOBJ_CLASS_TARGET_JFR, UOBJ_CLASS_JETTY, - UOBJ_CLASS_TARGET_JETTY + UOBJ_CLASS_TARGET_JETTY, + UOBJ_CLASS_JETTY_GRP }; enum uobj_access { @@ -94,7 +95,6 @@ struct uburma_jfe { spinlock_t lock; struct list_head event_list; wait_queue_head_t poll_wait; - bool deleting; }; @@ -130,6 +130,12 @@ struct uburma_jetty_uobj { uint32_t async_events_reported; }; +struct uburma_jetty_grp_uobj { + struct uburma_uobj uobj; /* base uobj struct */ + struct list_head async_event_list; + uint32_t async_events_reported; +}; + struct uburma_jfae_uobj { struct uburma_uobj uobj; struct uburma_jfe jfe; @@ -144,8 +150,8 @@ extern const struct uobj_type_class uobj_fd_type_class; struct uburma_uobj *uobj_alloc_begin(const struct uobj_type *type, struct uburma_file *ufile); int uobj_alloc_commit(struct uburma_uobj *uobj); void uobj_alloc_abort(struct uburma_uobj *uobj); -struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, struct uburma_file *ufile, int id, - enum uobj_access flag); +struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, int id, enum uobj_access flag); void uobj_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag); int __must_check uobj_remove_commit(struct uburma_uobj *uobj); void uobj_get(struct uburma_uobj *uobj); @@ -161,17 +167,16 @@ void uburma_close_uobj_fd(struct file *f); #define uobj_get_type(class_id) uobj_class_name(class_id).type_attrs -#define _uobj_class_set(_id, _type_attrs) \ +#define _uobj_class_set(_id, _type_attrs) \ ((const struct uobj_class_def){ .id = (_id), .type_attrs = (_type_attrs) }) -#define _declare_uobj_class(_name, _id, _type_attrs) \ +#define _declare_uobj_class(_name, _id, _type_attrs) \ const struct uobj_class_def _name = _uobj_class_set(_id, _type_attrs) -#define declare_uobj_class(class_id, ...) \ +#define declare_uobj_class(class_id, ...) \ _declare_uobj_class(uobj_class_name(class_id), class_id, ##__VA_ARGS__) - -#define uobj_type_alloc_idr(_size, _order, _destroy_func) \ +#define uobj_type_alloc_idr(_size, _order, _destroy_func) \ ((&((const struct uobj_idr_type) { \ .type = { \ .type_class = &uobj_idr_type_class, \ @@ -181,17 +186,17 @@ void uburma_close_uobj_fd(struct file *f); .destroy_func = (_destroy_func), \ }))->type) -#define uobj_type_alloc_fd(_order, _obj_size, _context_closed, _fops, _name, _flags) \ - ((&((const struct uobj_fd_type) { \ - .type = { \ - .destroy_order = (_order), \ - .type_class = &uobj_fd_type_class, \ - .obj_size = (_obj_size), \ - }, \ - .context_closed = (_context_closed), \ - .fops = (_fops), \ - .name = (_name), \ - .flags = (_flags) \ +#define uobj_type_alloc_fd(_order, _obj_size, _context_closed, _fops, _name, _flags) \ + ((&((const struct uobj_fd_type) { \ + .type = { \ + .destroy_order = (_order), \ + .type_class = &uobj_fd_type_class, \ + .obj_size = (_obj_size), \ + }, \ + .context_closed = (_context_closed), \ + .fops = (_fops), \ + .name = (_name), \ + .flags = (_flags) \ }))->type) static inline bool uobj_type_is_fd(const struct uburma_uobj *uobj) @@ -201,21 +206,21 @@ static inline bool uobj_type_is_fd(const struct uburma_uobj *uobj) #define uobj_alloc(class_id, ufile) uobj_alloc_begin(uobj_get_type(class_id), ufile) -#define uobj_get_read(class_id, _id, ufile) \ +#define uobj_get_read(class_id, _id, ufile) \ uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_READ) #define uobj_put_read(uobj) uobj_lookup_put(uobj, UOBJ_ACCESS_READ) -#define uobj_get_write(class_id, _id, ufile) \ +#define uobj_get_write(class_id, _id, ufile) \ uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_WRITE) #define uobj_put_write(uobj) uobj_lookup_put(uobj, UOBJ_ACCESS_WRITE) /* Do not lock uobj without cleanup_rwsem locked */ -#define uobj_get_del(class_id, _id, ufile) \ +#define uobj_get_del(class_id, _id, ufile) \ uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_NOLOCK) -extern const struct uobj_class_def uobj_class_UOBJ_CLASS_KEY; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TOKEN; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_SEG; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFCE; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFAE; @@ -223,6 +228,7 @@ extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFC; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFR; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFS; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JETTY; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JETTY_GRP; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_JFR; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_SEG; extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_JETTY; diff --git a/include/urma/ubcore_api.h b/include/urma/ubcore_api.h index 3273fbb6cebb..3132c05e47fa 100644 --- a/include/urma/ubcore_api.h +++ b/include/urma/ubcore_api.h @@ -21,7 +21,7 @@ #ifndef UBCORE_API_H #define UBCORE_API_H -#include +#include "ubcore_types.h" /** * Register a device to ubcore @@ -65,7 +65,7 @@ void ubcore_umem_release(struct ubcore_umem *umem); * @return: tp pointer on success, NULL on error */ struct ubcore_tp *ubcore_create_vtp(struct ubcore_device *dev, - const union ubcore_eid *remote_eid, + union ubcore_eid *remote_eid, enum ubcore_transport_mode trans_mode, struct ubcore_udata *udata); @@ -84,4 +84,61 @@ int ubcore_destroy_vtp(struct ubcore_tp *vtp); */ enum ubcore_mtu ubcore_get_mtu(int mtu); +/** + * Invoke create virtual tp on a PF device, called only by driver + * @param[in] dev: the ubcore device; + * @param[in] msg: received msg + * @return: 0 on success, other value on error + */ +int ubcore_recv_msg(struct ubcore_device *dev, struct ubcore_msg *msg); + +/** + * Invoke ndev bind port_id, called only by driver + * @param[in] dev: the ubcore device; + * @param[in] ndev: The netdev corresponding to the initial port + * @param[in] port_id: The physical port_id is the same as the port_id presented in the sysfs file, + * and port_id is configured in TP during link establishment. + * @return: 0 on success, other value on error + */ +int ubcore_set_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + unsigned int port_id); + +/** + * Invoke ndev unbind port_id, called only by driver + * @param[in] dev: the ubcore device; + * @return: void + */ +void ubcore_put_port_netdev(struct ubcore_device *dev); + +/** + * Invoke The management system calls ubcore interface through uvs_admin to set the device name + * and add sip information used for link establishment. + * @param[in] sip: Specify the sip information used to establish the link, including device name, + * sip, mac, vlan, physical port list. + * @return: 0 on success, other value on error + */ +int ubcore_add_sip(struct ubcore_sip_info *sip); + +/** + * Invoke The management system calls ubcore interface through UVS to delete the sip information. + * @param[in] sip: Specify the sip information used to establish the link, including device name, + * sip, mac, vlan, physical port list. + * @return: 0 on success, other value on error + */ +int ubcore_delete_sip(struct ubcore_sip_info *sip); + +/** + * Invoke get eid list + * @param[in] dev: the ubcore device; + * @param[out] cnt: eid cnt + * @return: eid info on success, NULL on error + */ +struct ubcore_eid_info *ubcore_get_eid_list(struct ubcore_device *dev, uint32_t *cnt); + +/** + * Release umem allocated + * @param[in] eid_list: the eid list to be freed + */ +void ubcore_free_eid_list(struct ubcore_eid_info *eid_list); + #endif diff --git a/include/urma/ubcore_opcode.h b/include/urma/ubcore_opcode.h index 515c7710755d..85e3490ef67b 100644 --- a/include/urma/ubcore_opcode.h +++ b/include/urma/ubcore_opcode.h @@ -21,22 +21,65 @@ #ifndef UBCORE_OPCODE_H #define UBCORE_OPCODE_H +/* Indicates the verification policy of the key. */ +#define UBCORE_TOKEN_NONE 0 +#define UBCORE_TOKEN_PLAIN_TEXT 1 +#define UBCORE_TOKEN_SIGNED 2 +#define UBCORE_TOKEN_ALL_ENCRYPTED 3 +#define UBCORE_TOKEN_RESERVED 4 + +#define UBCORE_TOKEN_ID_INVALID 0 +#define UBCORE_TOKEN_ID_VALID 1 + +/* Indicates whether the segment can be cached by multiple hosts. */ +#define UBCORE_NON_CACHEABLE 0 +#define UBCORE_CACHEABLE 1 + +/* Indicates that the current process has mapped this segment */ +#define UBCORE_SEG_NOMAP 0 +#define UBCORE_SEG_MAPPED 1 + +/* Notify the source after the task is completed. */ +#define UBCORE_COMPLETE_ENABLE 1 +/* Do not notify the source after the task is complete. */ +#define UBCORE_COMPLETE_DISABLE 0 + +/* There is no interruption when notifying through JFC. */ +#define UBCORE_SOLICITED_DISABLE 0 +/* Interrupt occurred while notifying via JFC. */ +#define UBCORE_SOLICITED_ENABLE 1 + +/* There is no fence. */ +#define UBCORE_FENCE_DISABLE 0 +/* Fence with previous WRs. */ +#define UBCORE_FENCE_ENABLE 1 + +/* The data is generated by source_address assignment. */ +#define UBCORE_INLINE_DISABLE 0 +/* The data is carried in the command. */ +#define UBCORE_INLINE_ENABLE 1 + +#define UBCORE_NO_SHARE_JFR 0 +#define UBCORE_SHARE_JFR 1 + /* opcode definition */ /* Must be consistent with urma_opcode_t */ enum ubcore_opcode { - UBCORE_OPC_WRITE = 0x00, - UBCORE_OPC_WRITE_IMM = 0x01, - UBCORE_OPC_WRITE_NOTIFY = 0x02, // not support result - // will return for UBCORE_OPC_WRITE_NOTIFY - UBCORE_OPC_READ = 0x10, - UBCORE_OPC_CAS = 0x20, - UBCORE_OPC_FAA = 0x21, - UBCORE_OPC_CAS_WITH_MASK = 0x24, - UBCORE_OPC_FAA_WITH_MASK = 0x25, - UBCORE_OPC_SEND = 0x40, // remote JFR/jetty ID - UBCORE_OPC_SEND_IMM = 0x41, // remote JFR/jetty ID - UBCORE_OPC_SEND_INVALIDATE = 0x42, // remote JFR/jetty ID and seg token id - UBCORE_OPC_NOP = 0x51, + UBCORE_OPC_WRITE = 0x00, + UBCORE_OPC_WRITE_IMM = 0x01, + UBCORE_OPC_WRITE_NOTIFY = 0x02, + UBCORE_OPC_READ = 0x10, + UBCORE_OPC_CAS = 0x20, + UBCORE_OPC_SWAP = 0x21, + UBCORE_OPC_FADD = 0x22, + UBCORE_OPC_FSUB = 0x23, + UBCORE_OPC_FAND = 0x24, + UBCORE_OPC_FOR = 0x25, + UBCORE_OPC_FXOR = 0x26, + UBCORE_OPC_SEND = 0x40, // remote JFR/jetty ID + UBCORE_OPC_SEND_IMM = 0x41, // remote JFR/jetty ID + UBCORE_OPC_SEND_INVALIDATE = 0x42, // remote JFR/jetty ID and seg token id + UBCORE_OPC_NOP = 0x51, UBCORE_OPC_LAST }; @@ -44,21 +87,22 @@ enum ubcore_opcode { /* Must be consistent with urma_cr_status_t */ enum ubcore_cr_status { // completion record status UBCORE_CR_SUCCESS = 0, - UBCORE_CR_LOC_LEN_ERR, // Local data too long error - UBCORE_CR_LOC_OPERATION_ERR, // Local operation err - UBCORE_CR_LOC_PROTECTION_ERR, // Local memory protection error - UBCORE_CR_LOC_ACCESS_ERR, // Access to local memory error when WRITE_WITH_IMM - UBCORE_CR_REM_INVALID_REQ_ERR, - UBCORE_CR_REM_ACCESS_ERR, // Memory access protection error occurred in the remote node + UBCORE_CR_UNSUPPORTED_OPCODE_ERR, + UBCORE_CR_LOC_LEN_ERR, // Local data too long error + UBCORE_CR_LOC_OPERATION_ERR, // Local operation err + UBCORE_CR_LOC_ACCESS_ERR, // Access to local memory error when WRITE_WITH_IMM + UBCORE_CR_REM_RESP_LEN_ERR, + UBCORE_CR_REM_UNSUPPORTED_REQ_ERR, UBCORE_CR_REM_OPERATION_ERR, - UBCORE_CR_RETRY_CNT_EXC_ERR, // Retransmission exceeds the maximum number of times - UBCORE_CR_RNR_RETRY_CNT_EXC_ERR, // RNR retries exceeded the maximum number: - // remote jfr has no buffer - UBCORE_CR_FATAL_ERR, - UBCORE_CR_WR_FLUSH_ERR, - UBCORE_CR_RESP_TIMEOUT_ERR, - UBCORE_CR_MORE_TO_POLL_ERR, - UBCORE_CR_GENERAL_ERR + /* Memory access protection error occurred in the remote node */ + UBCORE_CR_REM_ACCESS_ABORT_ERR, + UBCORE_CR_ACK_TIMEOUT_ERR, + /* RNR retries exceeded the maximum number: remote jfr has no buffer */ + UBCORE_CR_RNR_RETRY_CNT_EXC_ERR, + UBCORE_CR_FLUSH_ERR, + UBCORE_CR_WR_SUSPEND_DONE, + UBCORE_CR_WR_FLUSH_ERR_DONE, + UBCORE_CR_WR_UNHANDLED }; /* Must be consistent with urma_cr_opcode_t */ @@ -69,4 +113,11 @@ enum ubcore_cr_opcode { UBCORE_CR_OPC_WRITE_WITH_IMM }; +enum ubcore_slice { + UBCORE_SLICE_32K = 1 << 15, + UBCORE_SLICE_64K = 1 << 16, + UBCORE_SLICE_128K = 1 << 17, + UBCORE_SLICE_256K = 1 << 18 +}; + #endif diff --git a/include/urma/ubcore_types.h b/include/urma/ubcore_types.h index c80a072680bf..d17e41e33183 100644 --- a/include/urma/ubcore_types.h +++ b/include/urma/ubcore_types.h @@ -28,21 +28,24 @@ #include #include #include +#include #include -#include +#include "ubcore_opcode.h" -#define UBCORE_MAX_PORT_CNT 8 -#define UBCORE_MAX_VF_CNT 1024 -#define UBCORE_SEG_MAPPED 1 +#define UBCORE_GET_VERSION(a, b) (((a) << 16) + ((b) > 65535 ? 65535 : (b))) +#define UBCORE_API_VERSION ((0 << 16) + 9) // Current Version: 0.9 + +#define UBCORE_MAX_PORT_CNT 16 +#define UBCORE_MAX_FE_CNT 1024 #define UBCORE_MAX_DEV_NAME 64 #define UBCORE_MAX_DRIVER_NAME 64 -#define UBCORE_HASH_TABLE_SIZE 64 +#define UBCORE_HASH_TABLE_SIZE 10240 #define UBCORE_NET_ADDR_BYTES (16) #define UBCORE_MAC_BYTES 6 #define UBCORE_MAX_ATTR_GROUP 3 #define UBCORE_EID_SIZE (16) #define UBCORE_EID_STR_LEN (39) -#define EID_FMT \ +#define EID_FMT \ "%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x" #define EID_UNPACK(...) __VA_ARGS__ #define EID_RAW_ARGS(eid) EID_UNPACK(eid[0], eid[1], eid[2], eid[3], eid[4], eid[5], eid[6], \ @@ -50,7 +53,12 @@ #define EID_ARGS(eid) EID_RAW_ARGS((eid).raw) #define UBCORE_MAX_UPI_CNT 1000 -#define UBCORE_OWN_VF_ID (0xffff) +#define UBCORE_OWN_FE_IDX (0xffff) +#define UBCORE_JETTY_GRP_MAX_NAME 64 +#define UBCORE_MAX_TP_CNT_IN_GRP 32 +/* support 8 priorities and 8 algorithms */ +/* same as URMA_CC_IDX_TABLE_SIZE */ +#define UBCORE_CC_IDX_TABLE_SIZE 64 enum ubcore_transport_type { UBCORE_TRANSPORT_INVALID = -1, @@ -60,16 +68,18 @@ enum ubcore_transport_type { UBCORE_TRANSPORT_MAX }; -#define UBCORE_ACCESS_LOCAL_WRITE (0x1 << 0) -#define UBCORE_ACCESS_REMOTE_READ (0x1 << 1) -#define UBCORE_ACCESS_REMOTE_WRITE (0x1 << 2) +#define UBCORE_ACCESS_LOCAL_WRITE 0x1 +#define UBCORE_ACCESS_REMOTE_READ (0x1 << 1) +#define UBCORE_ACCESS_REMOTE_WRITE (0x1 << 2) #define UBCORE_ACCESS_REMOTE_ATOMIC (0x1 << 3) #define UBCORE_ACCESS_REMOTE_INVALIDATE (0x1 << 4) +#define UBCORE_SEG_TOKEN_ID_INVALID UINT_MAX + union ubcore_eid { uint8_t raw[UBCORE_EID_SIZE]; struct { - uint64_t resv; + uint64_t reserved; uint32_t prefix; uint32_t addr; } in4; @@ -79,20 +89,24 @@ union ubcore_eid { } in6; }; +struct ubcore_eid_info { + union ubcore_eid eid; + uint32_t eid_index; +}; + struct ubcore_ueid_cfg { union ubcore_eid eid; uint32_t upi; + uint32_t eid_index; }; struct ubcore_jetty_id { union ubcore_eid eid; - uint32_t uasid; uint32_t id; }; struct ubcore_ubva { union ubcore_eid eid; - uint32_t uasid; uint64_t va; } __packed; @@ -108,6 +122,10 @@ struct ubcore_ht_param { struct ubcore_hash_table { struct ubcore_ht_param p; struct hlist_head *head; + /* Prevent the same jetty + * from being bound by different tjetty + */ + struct ubcore_jetty_id rc_tjetty_id; spinlock_t lock; struct kref kref; }; @@ -123,25 +141,23 @@ union ubcore_jfc_flag { union ubcore_jfs_flag { struct { - /* 0: IDC_MODE. - * 1: DC_MODE. - * 2: LS_MODE - */ - uint32_t mode : 2; - uint32_t lock_free : 1; - uint32_t reserved : 29; + uint32_t lock_free : 1; + uint32_t error_suspend : 1; + uint32_t outorder_comp : 1; + uint32_t reserved : 29; } bs; uint32_t value; }; union ubcore_jfr_flag { struct { - uint32_t key_policy : 3; /* 0: UBCORE_KEY_NONE - * 1: UBCORE_KEY_PLAIN_TEXT - * 2: UBCORE_KEY_SIGNED - * 3: UBCORE_KEY_ALL_ENCRYPTED - * 4: UBCORE_KEY_RESERVED - */ + /* 0: UBCORE_TOKEN_NONE + * 1: UBCORE_TOKEN_PLAIN_TEXT + * 2: UBCORE_TOKEN_SIGNED + * 3: UBCORE_TOKEN_ALL_ENCRYPTED + * 4: UBCORE_TOKEN_RESERVED + */ + uint32_t token_policy : 3; uint32_t tag_matching : 1; uint32_t lock_free : 1; uint32_t reserved : 27; @@ -173,19 +189,36 @@ enum ubcore_jetty_state { UBCORE_JETTY_STATE_ERROR }; +enum ubcore_jfr_state { + UBCORE_JFR_STATE_RESET = 0, + UBCORE_JFR_STATE_READY, + UBCORE_JFR_STATE_ERROR +}; + +enum ubcore_jfs_attr_mask { + UBCORE_JFS_STATE = 0x1 +}; + struct ubcore_jfs_attr { - uint32_t mask; /* mask value refer to ubcore_jfs_attr_mask_t */ + uint32_t mask; /* mask value refer to ubcore_jfs_attr_mask_t */ enum ubcore_jetty_state state; }; -enum ubcore_jfr_attr_mask { UBCORE_JFR_RX_THRESHOLD = 0x1 }; +enum ubcore_jfr_attr_mask { + UBCORE_JFR_RX_THRESHOLD = 0x1, + UBCORE_JFR_STATE = 0x1 << 1 +}; struct ubcore_jfr_attr { uint32_t mask; /* mask value refer to enum ubcore_jfr_attr_mask */ uint32_t rx_threshold; + enum ubcore_jfr_state state; }; -enum ubcore_jetty_attr_mask { UBCORE_JETTY_RX_THRESHOLD = 0x1 }; +enum ubcore_jetty_attr_mask { + UBCORE_JETTY_RX_THRESHOLD = 0x1, + UBCORE_JETTY_STATE = 0x1 << 1 +}; struct ubcore_jetty_attr { uint32_t mask; /* mask value refer to enum ubcore_jetty_attr_mask */ @@ -205,27 +238,29 @@ union ubcore_import_seg_flag { union ubcore_reg_seg_flag { struct { - uint32_t key_policy : 3; - uint32_t cacheable : 1; - uint32_t dsva : 1; - uint32_t access : 6; - uint32_t non_pin : 1; - uint32_t user_iova : 1; - uint32_t reserved : 19; + uint32_t token_policy : 3; + uint32_t cacheable : 1; + uint32_t dsva : 1; + uint32_t access : 6; + uint32_t non_pin : 1; + uint32_t user_iova : 1; + uint32_t token_id_valid : 1; + uint32_t reserved : 18; } bs; uint32_t value; }; struct ubcore_udrv_priv { - uintptr_t in_addr; + uint64_t in_addr; uint32_t in_len; - uintptr_t out_addr; + uint64_t out_addr; uint32_t out_len; }; struct ubcore_ucontext { struct ubcore_device *ub_dev; - uint32_t uasid; + union ubcore_eid eid; + uint32_t eid_index; void *jfae; /* jfae uobj */ atomic_t use_cnt; }; @@ -235,61 +270,29 @@ struct ubcore_udata { struct ubcore_udrv_priv *udrv_data; }; -struct ubcore_jfc; -typedef void (*ubcore_comp_callback_t)(struct ubcore_jfc *jfc); +struct ubcore_token { + uint32_t token; +}; enum ubcore_event_type { UBCORE_EVENT_JFC_ERR, - UBCORE_EVENT_JFS_FATAL, - UBCORE_EVENT_JFS_ACCESS_ERR, - UBCORE_EVENT_JFR_FATAL, - UBCORE_EVENT_JFR_ACCESS_ERR, - UBCORE_EVENT_JETTY_FATAL, - UBCORE_EVENT_JETTY_ACCESS_ERR, + UBCORE_EVENT_JFS_ERR, + UBCORE_EVENT_JFR_ERR, + UBCORE_EVENT_JFR_LIMIT_REACHED, + UBCORE_EVENT_JETTY_ERR, + UBCORE_EVENT_JETTY_LIMIT_REACHED, + UBCORE_EVENT_JETTY_GRP_ERR, UBCORE_EVENT_PORT_ACTIVE, - UBCORE_EVENT_PORT_ERR, + UBCORE_EVENT_PORT_DOWN, UBCORE_EVENT_DEV_FATAL, - UBCORE_EVENT_ID_CHANGE, - UBCORE_EVENT_TP_ERR -}; - -struct ubcore_event { - struct ubcore_device *ub_dev; - union { - struct ubcore_jfc *jfc; - struct ubcore_jfs *jfs; - struct ubcore_jfr *jfr; - struct ubcore_jetty *jetty; - struct ubcore_tp *tp; - uint32_t port_id; - } element; - enum ubcore_event_type event_type; -}; - -typedef void (*ubcore_event_callback_t)(struct ubcore_event *event, struct ubcore_ucontext *ctx); - -struct ubcore_event_handler { - void (*event_callback)(struct ubcore_event *event, struct ubcore_event_handler *handler); - struct list_head node; -}; - -struct ubcore_jfc_cfg { - uint32_t depth; - union ubcore_jfc_flag flag; - void *jfc_context; - uint32_t eq_id; -}; - -struct ubcore_jfc { - struct ubcore_device *ub_dev; - struct ubcore_ucontext *uctx; - struct ubcore_jfc_cfg jfc_cfg; - uint32_t id; /* allocated by driver */ - ubcore_comp_callback_t jfce_handler; - ubcore_event_callback_t jfae_handler; - uint64_t urma_jfc; /* user space jfc pointer */ - struct hlist_node hnode; - atomic_t use_cnt; + UBCORE_EVENT_EID_CHANGE, + UBCORE_EVENT_TP_ERR, + UBCORE_EVENT_TP_SUSPEND, + UBCORE_EVENT_TP_FLUSH_DONE, + UBCORE_EVENT_ELR_ERR, + UBCORE_EVENT_ELR_DONE, + UBCORE_EVENT_MIGRATE_VTP_SWITCH, + UBCORE_EVENT_MIGRATE_VTP_ROLLBACK }; /* transport mode */ @@ -299,149 +302,45 @@ enum ubcore_transport_mode { UBCORE_TP_UM = 0x1 << 2 /* Unreliable message */ }; -struct ubcore_jfs_cfg { - uint32_t depth; - union ubcore_jfs_flag flag; - uint8_t priority; - uint8_t max_sge; - uint8_t max_rsge; - uint32_t max_inline_data; - uint8_t retry_cnt; - uint8_t rnr_retry; - uint8_t err_timeout; - void *jfs_context; - struct ubcore_jfc *jfc; - enum ubcore_transport_mode trans_mode; -}; - -struct ubcore_jfs { - struct ubcore_device *ub_dev; - struct ubcore_ucontext *uctx; - struct ubcore_jfs_cfg jfs_cfg; - uint32_t id; /* allocted by driver */ - ubcore_event_callback_t jfae_handler; - uint64_t urma_jfs; /* user space jfs pointer */ - struct hlist_node hnode; - atomic_t use_cnt; - struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ -}; - -struct ubcore_key { - uint32_t key; +enum ubcore_jetty_grp_policy { + UBCORE_JETTY_GRP_POLICY_RR = 0, + UBCORE_JETTY_GRP_POLICY_HASH_HINT = 1 }; -struct ubcore_jfr_cfg { - uint32_t id; /* user may assign id */ - uint32_t depth; - union ubcore_jfr_flag flag; - uint8_t max_sge; - uint8_t min_rnr_timer; - enum ubcore_transport_mode trans_mode; - struct ubcore_jfc *jfc; - struct ubcore_key ukey; - void *jfr_context; +enum ubcore_target_type { + UBCORE_JFR = 0, + UBCORE_JETTY, + UBCORE_JETTY_GROUP }; -struct ubcore_jfr { +struct ubcore_token_id { struct ubcore_device *ub_dev; struct ubcore_ucontext *uctx; - struct ubcore_jfr_cfg jfr_cfg; - uint32_t id; /* allocted by driver */ - ubcore_event_callback_t jfae_handler; - uint64_t urma_jfr; /* user space jfr pointer */ - struct hlist_node hnode; - atomic_t use_cnt; - struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ -}; - -union ubcore_jetty_flag { - struct { - uint32_t share_jfr : 1; /* 0: URMA_NO_SHARE_JFR. - * 1: URMA_SHARE_JFR. - */ - uint32_t reserved : 31; - } bs; - uint32_t value; -}; - -struct ubcore_jetty_cfg { - uint32_t id; /* user may assign id */ - uint32_t jfs_depth; - uint32_t jfr_depth; - union ubcore_jetty_flag flag; - struct ubcore_jfc *send_jfc; - struct ubcore_jfc *recv_jfc; - struct ubcore_jfr *jfr; /* shared jfr */ - uint8_t max_send_sge; - uint8_t max_send_rsge; - uint8_t max_recv_sge; - uint32_t max_inline_data; - uint8_t priority; - uint8_t retry_cnt; - uint8_t rnr_retry; - uint8_t err_timeout; - uint8_t min_rnr_timer; - enum ubcore_transport_mode trans_mode; - struct ubcore_key ukey; - void *jetty_context; -}; - -struct ubcore_tjetty_cfg { - struct ubcore_jetty_id id; /* jfr, jetty or jetty group id to be imported */ - enum ubcore_transport_mode trans_mode; - struct ubcore_key ukey; /* jfr, jetty or jetty group ukey value to be imported */ -}; - -enum ubcore_target_type { UBCORE_JFR = 0, UBCORE_JETTY, UBCORE_JFR_GROUP, UBCORE_JETTY_GROUP }; - -struct ubcore_tjetty { - struct ubcore_device *ub_dev; - struct ubcore_ucontext *uctx; - enum ubcore_target_type type; - struct ubcore_tjetty_cfg cfg; - struct ubcore_tp *tp; /* for UB transport device */ - atomic_t use_cnt; - struct mutex lock; -}; - -struct ubcore_jetty { - struct ubcore_device *ub_dev; - struct ubcore_ucontext *uctx; - struct ubcore_jetty_cfg jetty_cfg; - uint32_t id; /* allocted by driver */ - struct ubcore_tjetty *remote_jetty; // bind to remote jetty - ubcore_event_callback_t jfae_handler; - uint64_t urma_jetty; /* user space jetty pointer */ - struct hlist_node hnode; - atomic_t use_cnt; - struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ -}; - -struct ubcore_key_id { - struct ubcore_device *ub_dev; - struct ubcore_ucontext *uctx; - uint32_t key_id; + uint32_t token_id; atomic_t use_cnt; }; struct ubcore_seg_cfg { uint64_t va; uint64_t len; - struct ubcore_key_id *keyid; - struct ubcore_key ukey; + uint32_t eid_index; + struct ubcore_token_id *token_id; + struct ubcore_token token_value; union ubcore_reg_seg_flag flag; + uint64_t user_ctx; uint64_t iova; }; union ubcore_seg_attr { struct { - uint32_t key_policy : 3; - uint32_t cacheable : 1; - uint32_t dsva : 1; - uint32_t access : 6; - uint32_t non_pin : 1; - uint32_t user_iova : 1; - uint32_t reserved : 19; + uint32_t token_policy : 3; + uint32_t cacheable : 1; + uint32_t dsva : 1; + uint32_t access : 6; + uint32_t non_pin : 1; + uint32_t user_iova : 1; + uint32_t user_token_id : 1; + uint32_t reserved : 18; } bs; uint32_t value; }; @@ -450,14 +349,14 @@ struct ubcore_seg { struct ubcore_ubva ubva; uint64_t len; union ubcore_seg_attr attr; - uint32_t key_id; + uint32_t token_id; }; struct ubcore_target_seg_cfg { struct ubcore_seg seg; union ubcore_import_seg_flag flag; uint64_t mva; /* optional */ - struct ubcore_key ukey; + struct ubcore_token token_value; }; struct ubcore_target_seg { @@ -465,7 +364,7 @@ struct ubcore_target_seg { struct ubcore_ucontext *uctx; struct ubcore_seg seg; uint64_t mva; - struct ubcore_key_id *keyid; + struct ubcore_token_id *token_id; atomic_t use_cnt; }; @@ -479,18 +378,19 @@ enum ubcore_mtu { }; enum ubcore_tp_cc_alg { - UBCORE_TP_CC_PFC = 0, + UBCORE_TP_CC_NONE = 0, UBCORE_TP_CC_DCQCN, UBCORE_TP_CC_DCQCN_AND_NETWORK_CC, UBCORE_TP_CC_LDCP, UBCORE_TP_CC_LDCP_AND_CAQM, UBCORE_TP_CC_LDCP_AND_OPEN_CC, UBCORE_TP_CC_HC3, - UBCORE_TP_CC_DIP + UBCORE_TP_CC_DIP, + UBCORE_TP_CC_NUM }; enum ubcore_congestion_ctrl_alg { - UBCORE_CC_PFC = 0x1 << UBCORE_TP_CC_PFC, + UBCORE_CC_PFC = 0x1 << UBCORE_TP_CC_NONE, UBCORE_CC_DCQCN = 0x1 << UBCORE_TP_CC_DCQCN, UBCORE_CC_DCQCN_AND_NETWORK_CC = 0x1 << UBCORE_TP_CC_DCQCN_AND_NETWORK_CC, UBCORE_CC_LDCP = 0x1 << UBCORE_TP_CC_LDCP, @@ -546,7 +446,28 @@ union ubcore_device_feat { uint32_t jfc_inline : 1; uint32_t spray_en : 1; uint32_t selective_retrans : 1; - uint32_t reserved : 23; + uint32_t live_migrate : 1; + uint32_t dca : 1; + uint32_t jetty_grp : 1; + uint32_t err_suspend : 1; + uint32_t outorder_comp : 1; + uint32_t mn : 1; + uint32_t clan : 1; + uint32_t reserved : 16; + } bs; + uint32_t value; +}; + +union ubcore_atomic_feat { + struct { + uint32_t cas : 1; + uint32_t swap : 1; + uint32_t fetch_and_add : 1; + uint32_t fetch_and_sub : 1; + uint32_t fetch_and_and : 1; + uint32_t fetch_and_or : 1; + uint32_t fetch_and_xor : 1; + uint32_t reserved : 25; } bs; uint32_t value; }; @@ -572,38 +493,62 @@ struct ubcore_device_cap { uint32_t max_jfs; uint32_t max_jfr; uint32_t max_jetty; + uint32_t max_jetty_grp; + uint32_t max_jetty_in_jetty_grp; + uint32_t max_rc; /* max rc queues */ uint32_t max_jfc_depth; uint32_t max_jfs_depth; uint32_t max_jfr_depth; + uint32_t max_rc_depth; /* max depth of each rc queue */ uint32_t max_jfs_inline_size; uint32_t max_jfs_sge; uint32_t max_jfs_rsge; uint32_t max_jfr_sge; uint64_t max_msg_size; - uint64_t max_rc_outstd_cnt; /* max read command outstanding count in the function entity */ - uint16_t trans_mode; /* one or more from enum ubcore_transport_mode */ - uint16_t congestion_ctrl_alg; /* one or more mode from enum ubcore_congestion_ctrl_alg */ - uint16_t comp_vector_cnt; /* completion vector count */ + /* max read command outstanding count in the function entity */ + uint64_t max_rc_outstd_cnt; + uint32_t max_atomic_size; /* in terms of bytes, e.g. 8 or 64 */ + union ubcore_atomic_feat atomic_feat; + uint32_t max_sip_cnt_per_fe; + uint32_t max_dip_cnt_per_fe; + uint32_t max_seid_cnt_per_fe; + uint16_t trans_mode; /* one or more from ubcore_transport_mode_t */ + uint16_t congestion_ctrl_alg; /* one or more mode from ubcore_congestion_ctrl_alg_t */ + uint16_t ceq_cnt; /* completion vector count */ uint32_t utp_cnt; + uint32_t max_oor_cnt; /* max OOR window size by packet */ + uint32_t min_slice; /* 32K (1823), 64K (1650) */ + uint32_t max_slice; /* 256K (1823), 64K (1650) */ }; struct ubcore_device_attr { - union ubcore_eid eid; // RW - uint32_t max_eid_cnt; uint64_t guid; + uint16_t fe_idx; + uint32_t max_eid_cnt; uint32_t max_upi_cnt; - uint32_t upi[UBCORE_MAX_UPI_CNT]; // VF or PF own UPIs struct ubcore_device_cap dev_cap; - uint8_t port_cnt; + uint16_t fe_cnt; /* PF: greater than or equal to 0; FE: must be 0 */ struct ubcore_port_attr port_attr[UBCORE_MAX_PORT_CNT]; + uint8_t port_cnt; bool virtualization; /* In VM or not, must set by driver when register device */ - uint16_t vf_cnt; /* PF: greater than or equal to 0; VF: must be 0 */ + bool tp_maintainer; /* device used to maintain TP resource */ + uint32_t max_netaddr_cnt; }; union ubcore_device_cfg_mask { struct { - uint32_t port_ets : 1; - uint32_t port_fec : 1; + uint32_t rc_cnt : 1; + uint32_t rc_depth : 1; + uint32_t slice : 1; + uint32_t pattern : 1; + uint32_t virtualization : 1; + uint32_t suspend_period : 1; + uint32_t suspend_cnt : 1; + uint32_t min_jetty_cnt : 1; + uint32_t max_jetty_cnt : 1; + uint32_t min_jfr_cnt : 1; + uint32_t max_jfr_cnt : 1; + uint32_t reserved : 21; } bs; uint32_t value; }; @@ -612,18 +557,24 @@ struct ubcore_congestion_control { uint32_t data; }; -struct ubcore_port_ets { - uint32_t data; -}; - -struct ubcore_port_fec { - uint32_t data; +struct ubcore_rc_cfg { + uint32_t rc_cnt; /* rc queue count */ + uint32_t depth; }; struct ubcore_device_cfg { + uint16_t fe_idx; /* vf id or pf id. e.g: bdf id */ union ubcore_device_cfg_mask mask; - struct ubcore_port_fec fec; - struct ubcore_port_ets ets; + struct ubcore_rc_cfg rc_cfg; + uint32_t slice; /* TA slice size byte */ + uint8_t pattern; /* 0: pattern1; 1: pattern3 */ + bool virtualization; + uint32_t suspend_period; /* us */ + uint32_t suspend_cnt; /* TP resend cnt */ + uint32_t min_jetty_cnt; + uint32_t max_jetty_cnt; + uint32_t min_jfr_cnt; + uint32_t max_jfr_cnt; }; /* struct [struct ubcore_user_ctl_in] should be consistent with [urma_user_ctl_in_t] */ @@ -637,7 +588,7 @@ struct ubcore_user_ctl_in { struct ubcore_user_ctl_out { uint64_t addr; uint32_t len; - uint32_t rsv; + uint32_t reserved; }; struct ubcore_user_ctl { @@ -647,12 +598,18 @@ struct ubcore_user_ctl { struct ubcore_udrv_priv udrv_data; }; +enum ubcore_net_addr_type { + UBCORE_NET_ADDR_TYPE_IPV4 = 0, + UBCORE_NET_ADDR_TYPE_IPV6 +}; + struct ubcore_net_addr { + enum ubcore_net_addr_type type; union { uint8_t raw[UBCORE_NET_ADDR_BYTES]; struct { - uint64_t resv1; - uint32_t resv2; + uint64_t reserved1; + uint32_t reserved2; uint32_t addr; } in4; struct { @@ -666,24 +623,37 @@ struct ubcore_net_addr { union ubcore_tp_cfg_flag { struct { - uint32_t target : 1; /* 0: initiator, 1: target */ - uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ - uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ - uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t target : 1; /* 0: initiator, 1: target */ + /* todo: delete start */ + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t cc_alg : 4; /* ubcore_tp_cc_alg_t */ uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ - uint32_t reserved : 27; + /* todo: delete end */ + uint32_t loopback : 1; + uint32_t ack_resp : 1; + uint32_t dca_enable : 1; + /* for the bonding case, the hardware selects the port + * ignoring the port of the tp context and + * selects the port based on the hash value + * along with the information in the bonding group table. + */ + uint32_t bonding : 1; + uint32_t reserved : 19; } bs; uint32_t value; }; union ubcore_tp_mod_flag { struct { - uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ - uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ - uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ - uint32_t cc_alg : 4; /* The value is enum ubcore_tp_cc_alg */ - uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ - uint32_t reserved : 24; + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t cc_alg : 4; /* The value is ubcore_tp_cc_alg_t */ + uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ + uint32_t dca_enable : 1; /* admin dynamic connection, * 0: disable 1: enable */ + uint32_t reserved : 23; } bs; uint32_t value; }; @@ -691,13 +661,17 @@ union ubcore_tp_mod_flag { /* The first bits must be consistent with union ubcore_tp_cfg_flag */ union ubcore_tp_flag { struct { - uint32_t target : 1; /* 0: initiator, 1: target */ - uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ - uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ - uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ - uint32_t cc_alg : 4; /* The value is enum ubcore_tp_cc_alg */ + uint32_t target : 1; /* 0: initiator, 1: target */ + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t cc_alg : 4; /* The value is ubcore_tp_cc_alg_t */ uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ - uint32_t reserved : 23; + uint32_t loopback : 1; + uint32_t ack_resp : 1; + uint32_t dca_enable : 1; + uint32_t bonding : 1; + uint32_t reserved : 19; } bs; uint32_t value; }; @@ -706,7 +680,8 @@ enum ubcore_tp_state { UBCORE_TP_STATE_RESET = 0, UBCORE_TP_STATE_RTR, UBCORE_TP_STATE_RTS, - UBCORE_TP_STATE_ERROR + UBCORE_TP_STATE_SUSPENDED, + UBCORE_TP_STATE_ERR }; enum ubcore_ta_type { @@ -726,26 +701,39 @@ struct ubcore_ta { struct ubcore_jetty_id tjetty_id; /* peer jetty id */ }; +struct ubcore_tpg; struct ubcore_tp_cfg { - struct ubcore_ta *ta; /* NULL for UB device */ - union ubcore_tp_cfg_flag flag; /* indicate initiator or target, etc */ - struct ubcore_net_addr local_net_addr; - struct ubcore_net_addr peer_net_addr; - union ubcore_eid local_eid; - union ubcore_eid peer_eid; + union ubcore_tp_cfg_flag flag; /* flag of initial tp */ + /* transaction layer attributes */ + struct ubcore_net_addr local_net_addr; /* todo: delete */ + struct ubcore_net_addr peer_net_addr; /* todo: delete */ + union { + union ubcore_eid local_eid; + struct ubcore_jetty_id local_jetty; + }; + uint16_t fe_idx; /* rc mode only */ + union { + union ubcore_eid peer_eid; + struct ubcore_jetty_id peer_jetty; + }; + /* tranport layer attributes */ enum ubcore_transport_mode trans_mode; - uint32_t rx_psn; - enum ubcore_mtu mtu; - uint16_t data_udp_start; /* src udp port start, for multipath data */ - uint16_t ack_udp_start; /* src udp port start, for multipath ack */ - uint8_t udp_range; /* src udp port range, for both multipath data and ack */ + uint16_t data_udp_start; /* todo: delete */ + uint16_t ack_udp_start; /* todo: delete */ + uint8_t udp_range; /* todo: delete */ + uint32_t rx_psn; /* todo: delete */ + uint32_t tx_psn; /* todo: delete */ + enum ubcore_mtu mtu; /* todo: delete */ uint8_t retry_num; + uint8_t retry_factor; /* for calculate the time slot to retry */ uint8_t ack_timeout; - uint8_t tc; /* traffic class */ + uint8_t dscp; /* priority */ + uint32_t oor_cnt; /* OOR window size: by packet */ + struct ubcore_tpg *tpg; /* NULL if no tpg, eg.UM mode */ }; struct ubcore_tp_ext { - uintptr_t addr; + uint64_t addr; uint32_t len; }; @@ -759,72 +747,515 @@ union ubcore_tp_attr_mask { uint32_t mtu : 1; uint32_t cc_pattern_idx : 1; uint32_t peer_ext : 1; - uint32_t reserved : 24; + uint32_t oos_cnt : 1; + uint32_t local_net_addr_idx : 1; + uint32_t peer_net_addr : 1; + uint32_t data_udp_start : 1; + uint32_t ack_udp_start : 1; + uint32_t udp_range : 1; + uint32_t hop_limit : 1; + uint32_t flow_label : 1; + uint32_t port_id : 1; + uint32_t mn : 1; + uint32_t reserved : 14; + } bs; + uint32_t value; +}; + +struct ubcore_tp_attr { + union ubcore_tp_mod_flag flag; + uint32_t peer_tpn; + enum ubcore_tp_state state; + uint32_t tx_psn; + uint32_t rx_psn; + enum ubcore_mtu mtu; + uint8_t cc_pattern_idx; + struct ubcore_tp_ext peer_ext; + uint32_t oos_cnt; /* out of standing packet cnt */ + uint32_t local_net_addr_idx; + struct ubcore_net_addr peer_net_addr; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint8_t udp_range; + uint8_t hop_limit; + uint32_t flow_label; + uint8_t port_id; + uint8_t mn; /* 0~15, a packet contains only one msg if mn is set as 0 */ +}; + +struct ubcore_tp { + uint32_t tpn; /* driver assgined in creating tp */ + uint32_t peer_tpn; + struct ubcore_device *ub_dev; + union ubcore_tp_flag flag; /* indicate initiator or target, etc */ + uint32_t local_net_addr_idx; + struct ubcore_net_addr local_net_addr; /* todo: delete */ + struct ubcore_net_addr peer_net_addr; + union { + union ubcore_eid local_eid; + struct ubcore_jetty_id local_jetty; + }; + union { + union ubcore_eid peer_eid; + struct ubcore_jetty_id peer_jetty; + }; + enum ubcore_transport_mode trans_mode; + enum ubcore_tp_state state; + uint32_t rx_psn; + uint32_t tx_psn; + enum ubcore_mtu mtu; + uint16_t data_udp_start; /* src udp port start, for multipath data */ + uint16_t ack_udp_start; /* src udp port start, for multipath ack */ + uint8_t udp_range; /* src udp port range, for both multipath data and ack */ + uint8_t retry_num; + uint8_t retry_factor; + uint8_t ack_timeout; + uint8_t dscp; + uint8_t cc_pattern_idx; + uint8_t hop_limit; + struct ubcore_tpg *tpg; /* NULL if no tpg, eg. UM mode */ + uint32_t oor_cnt; /* out of order window size for recv: packet cnt */ + uint32_t oos_cnt; /* out of order window size for send: packet cnt */ + struct ubcore_tp_ext tp_ext; /* driver fill in creating tp */ + struct ubcore_tp_ext peer_ext; /* ubcore fill before modifying tp */ + atomic_t use_cnt; + struct hlist_node hnode; /* driver inaccessible */ + void *priv; /* ubcore private data for tp management */ +}; + +struct ubcore_tpg_cfg { + /* transaction layer attributes */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + + /* tranport layer attributes */ + enum ubcore_transport_mode trans_mode; + uint8_t dscp; + enum ubcore_tp_cc_alg cc_alg; + uint8_t cc_pattern_idx; + uint32_t tp_cnt; +}; + +struct ubcore_tpg_ext { + uint64_t addr; + uint32_t len; +}; + +struct ubcore_tpg { + uint32_t tpgn; + struct ubcore_device *ub_dev; + struct ubcore_tpg_cfg tpg_cfg; /* filled by ubcore when creating tp */ + struct ubcore_tpg_ext tpg_ext; /* filled by ubn driver when creating tp */ + struct ubcore_tpg_ext peer_ext; /* filled by ubcore before modifying tp */ + struct ubcore_tp *tp_list[UBCORE_MAX_TP_CNT_IN_GRP]; // UBCORE_MAX_TP_CNT_IN_GRP=32 + atomic_t use_cnt; + struct hlist_node hnode; /* driver inaccessible */ +}; + +struct ubcore_cc_entry { + enum ubcore_tp_cc_alg alg; + uint8_t cc_pattern_idx; + uint8_t cc_priority; +} __packed; + +union ubcore_utp_cfg_flag { + struct { + uint32_t loopback : 1; + uint32_t spray_en : 1; + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +struct ubcore_utp_cfg { + /* transaction layer attributes */ + union ubcore_utp_cfg_flag flag; + uint16_t udp_start; // src udp port start + uint8_t udp_range; // src udp port range + uint32_t local_net_addr_idx; + struct ubcore_net_addr peer_net_addr; + uint32_t flow_label; + uint8_t dscp; + uint8_t hop_limit; + uint32_t port_id; + enum ubcore_mtu mtu; +}; + +struct ubcore_utp { + uint32_t utpn; /* driver fills */ + struct ubcore_device *ub_dev; + struct ubcore_utp_cfg utp_cfg; /* filled by ubcore when createing utp. */ + atomic_t use_cnt; + struct hlist_node hnode; +}; + +struct ubcore_ctp_cfg { + struct ubcore_net_addr peer_net_addr; + uint32_t cna_len; +}; + +struct ubcore_ctp { + uint32_t ctpn; /* driver fills */ + struct ubcore_device *ub_dev; + struct ubcore_ctp_cfg ctp_cfg; /* filled by ubcore when createing cp. */ + atomic_t use_cnt; + struct hlist_node hnode; +}; + +enum ubcore_vtp_state { + UBCORE_VTPS_CREATING = 0, + UBCORE_VTPS_READY, + UBCORE_VTPS_DELETING, + UBCORE_VTPS_DELETED +}; + +struct ubcore_vtpn { + uint32_t vtpn; /* driver fills */ + struct ubcore_device *ub_dev; + /* ubcore private, inaccessible to driver */ + enum ubcore_transport_mode trans_mode; + /* vtpn key start */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + /* vtpn key end */ + uint32_t eid_index; + uint32_t local_jetty; + uint32_t peer_jetty; + atomic_t state; + struct hlist_node hnode; + atomic_t use_cnt; +}; + +union ubcore_vtp_cfg_flag { + struct { + uint32_t clan_tp : 1; + uint32_t migrate : 1; + uint32_t reserve : 30; + } bs; + uint32_t value; +}; + +struct ubcore_vtp_cfg { + uint16_t fe_idx; // vfid or pfid + uint32_t vtpn; + uint32_t local_jetty; + /* key start */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t peer_jetty; + /* key end */ + union ubcore_vtp_cfg_flag flag; + enum ubcore_transport_mode trans_mode; + union { + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + struct ubcore_utp *utp; // idx of dip + struct ubcore_ctp *ctp; /* valid when clan is true */ + }; +}; + +struct ubcore_vtp { + struct ubcore_device *ub_dev; + struct ubcore_vtp_cfg cfg; /* driver fills */ + struct hlist_node hnode; /* driver inaccessible */ +}; + +struct ubcore_vtp_attr { + union { + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + struct ubcore_utp *utp; // idx of dip + struct ubcore_ctp *ctp; /* clan domain */ + } tp; +}; + +union ubcore_vtp_attr_mask { + struct { + uint32_t tp : 1; + uint32_t reserved : 31; + } bs; + uint32_t value; +}; + +enum ubcore_msg_opcode { + UBCORE_MSG_CREATE_VTP = 0, + UBCORE_MSG_DESTROY_VTP, + UBCORE_MSG_CONFIG_DEVICE, + UBCORE_MSG_ALLOC_EID, + UBCORE_MSG_DEALLOC_EID, + UBCORE_MSG_STOP_PROC_VTP_MSG = 0x10, /* should be all migrate op after this opcode */ + UBCORE_MSG_QUERY_VTP_MIG_STATUS, + UBCORE_MSG_FLOW_STOPPED, + UBCORE_MSG_MIG_ROLLBACK, + UBCORE_MSG_MIG_VM_START +}; + +enum ubcore_msg_type { + UBCORE_MSG_TYPE_FE2TPF = 0, // for create/delete vtp + UBCORE_MSG_TYPE_MPF2TPF, // for live migration + UBCORE_MSG_TYPE_TPF2FE, // for create/delete vtp + UBCORE_MSG_TYPE_TPF2MPF // for live migration +}; + +enum ubcore_pattern { + UBCORE_PATTERN_1 = 0, + UBCORE_PATTERN_3 +}; + +union ubcore_msg_ep { + uint16_t src_function_id; + uint16_t dst_function_id; +}; + +struct ubcore_msg_hdr { + enum ubcore_msg_type type; + union ubcore_msg_ep ep; + uint32_t len; // data len + uint32_t msg_id; + enum ubcore_msg_opcode opcode; +}; + +struct ubcore_msg { + struct ubcore_msg_hdr hdr; + uint8_t data[0]; +}; + +struct ubcore_event { + struct ubcore_device *ub_dev; + union { + struct ubcore_jfc *jfc; + struct ubcore_jfs *jfs; + struct ubcore_jfr *jfr; + struct ubcore_jetty *jetty; + struct ubcore_jetty_group *jetty_grp; + struct ubcore_tp *tp; + struct ubcore_vtp *vtp; + uint32_t port_id; + uint32_t eid_idx; + } element; + enum ubcore_event_type event_type; +}; + +typedef void (*ubcore_event_callback_t)(struct ubcore_event *event, struct ubcore_ucontext *ctx); + +struct ubcore_event_handler { + void (*event_callback)(struct ubcore_event *event, struct ubcore_event_handler *handler); + struct list_head node; +}; + +typedef void (*ubcore_comp_callback_t)(struct ubcore_jfc *jfc); + +struct ubcore_jfc_cfg { + uint32_t depth; + union ubcore_jfc_flag flag; + void *jfc_context; + uint32_t ceqn; +}; + +struct ubcore_jfc { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfc_cfg jfc_cfg; + uint32_t id; /* allocated by driver */ + ubcore_comp_callback_t jfce_handler; + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfc; /* user space jfc pointer */ + struct hlist_node hnode; + atomic_t use_cnt; +}; + +struct ubcore_jfs_cfg { + uint32_t depth; + union ubcore_jfs_flag flag; + uint32_t eid_index; + uint8_t priority; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint8_t rnr_retry; + uint8_t err_timeout; + void *jfs_context; + struct ubcore_jfc *jfc; + enum ubcore_transport_mode trans_mode; +}; + +struct ubcore_jfs { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfs_cfg jfs_cfg; + uint32_t id; /* allocted by driver */ + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfs; /* user space jfs pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ +}; + +struct ubcore_jfr_cfg { + uint32_t id; /* user may assign id */ + uint32_t depth; + uint32_t eid_index; + union ubcore_jfr_flag flag; + uint8_t max_sge; + uint8_t min_rnr_timer; + enum ubcore_transport_mode trans_mode; + struct ubcore_jfc *jfc; + struct ubcore_token token_value; + void *jfr_context; +}; + +struct ubcore_jfr { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfr_cfg jfr_cfg; + uint32_t id; /* allocted by driver */ + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfr; /* user space jfr pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ +}; + +union ubcore_jetty_flag { + struct { + uint32_t share_jfr : 1; /* 0: URMA_NO_SHARE_JFR. 1: URMA_SHARE_JFR. */ + uint32_t lock_free : 1; + uint32_t error_suspend : 1; + uint32_t outorder_comp : 1; + uint32_t reserved : 28; + } bs; + uint32_t value; +}; + +struct ubcore_jetty_cfg { + uint32_t id; /* user may assign id */ + union ubcore_jetty_flag flag; + enum ubcore_transport_mode trans_mode; + uint32_t eid_index; + uint32_t jfs_depth; + uint8_t priority; + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint32_t max_inline_data; + uint8_t rnr_retry; + uint8_t err_timeout; + uint32_t jfr_depth; /* deprecated */ + uint8_t min_rnr_timer; /* deprecated */ + uint8_t max_recv_sge; /* deprecated */ + struct ubcore_token token_value; /* deprecated */ + struct ubcore_jfc *send_jfc; + struct ubcore_jfc *recv_jfc; /* must set */ + struct ubcore_jfr *jfr; /* must set, shared jfr */ + struct ubcore_jetty_group *jetty_grp; /* [Optional] user specified jetty group */ + void *jetty_context; +}; + +union ubcore_import_jetty_flag { + struct { + uint32_t token_policy : 3; + uint32_t reserved : 29; } bs; uint32_t value; }; -struct ubcore_tp_attr { - union ubcore_tp_mod_flag flag; - uint32_t peer_tpn; - enum ubcore_tp_state state; - uint32_t tx_psn; - uint32_t rx_psn; /* modify both rx psn and tx psn when restore tp */ - enum ubcore_mtu mtu; - uint8_t cc_pattern_idx; - struct ubcore_tp_ext peer_ext; +struct ubcore_tjetty_cfg { + struct ubcore_jetty_id id; /* jfr, jetty or jetty group id to be imported */ + uint32_t eid_index; + enum ubcore_transport_mode trans_mode; + enum ubcore_jetty_grp_policy policy; + enum ubcore_target_type type; + union ubcore_import_jetty_flag flag; + struct ubcore_token token_value; /* jfr, jetty or jetty group token_value to be imported */ }; -struct ubcore_tp { - uint32_t tpn; /* driver assgined in creating tp */ - uint32_t peer_tpn; +struct ubcore_tjetty { struct ubcore_device *ub_dev; - union ubcore_tp_flag flag; /* indicate initiator or target, etc */ - struct ubcore_net_addr local_net_addr; - struct ubcore_net_addr peer_net_addr; - union ubcore_eid local_eid; - union ubcore_eid peer_eid; - enum ubcore_transport_mode trans_mode; - enum ubcore_tp_state state; - uint32_t rx_psn; - uint32_t tx_psn; - enum ubcore_mtu mtu; - uint16_t data_udp_start; /* src udp port start, for multipath data */ - uint16_t ack_udp_start; /* src udp port start, for multipath ack */ - uint8_t udp_range; /* src udp port range, for both multipath data and ack */ - uint8_t retry_num; - uint8_t ack_timeout; - uint8_t tc; /* traffic class */ - uint8_t cc_pattern_idx; - struct ubcore_tp_ext tp_ext; /* driver fill in creating tp */ - struct ubcore_tp_ext peer_ext; /* ubcore fill before modifying tp */ + struct ubcore_ucontext *uctx; + enum ubcore_target_type type; + struct ubcore_tjetty_cfg cfg; + struct ubcore_tp *tp; + struct ubcore_vtpn *vtpn; + atomic_t use_cnt; + struct mutex lock; +}; + +struct ubcore_jetty { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jetty_cfg jetty_cfg; + uint32_t id; /* allocted by driver */ + struct ubcore_tjetty *remote_jetty; // bind to remote jetty + ubcore_event_callback_t jfae_handler; + uint64_t urma_jetty; /* user space jetty pointer */ + struct hlist_node hnode; atomic_t use_cnt; - void *priv; /* ubcore private data for tp management */ + struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ +}; + +struct ubcore_jetty_grp_cfg { + char name[UBCORE_JETTY_GRP_MAX_NAME]; + struct ubcore_token token_value; + uint32_t id; + enum ubcore_jetty_grp_policy policy; + uint64_t user_ctx; +}; + +struct ubcore_jetty_group { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jetty_grp_cfg jetty_grp_cfg; + uint32_t id; /* allocated by driver */ + uint32_t jetty_cnt; /* current jetty cnt in the jetty group */ + struct ubcore_jetty **jetty; + ubcore_event_callback_t jfae_handler; + uint64_t urma_jetty_grp; /* user space jetty_grp pointer */ + struct mutex lock; /* Protect jetty array */ }; enum ubcore_res_key_type { - UBCORE_RES_KEY_UPI = 1, // key id: UPI ID - UBCORE_RES_KEY_TP, // key id: TPN - UBCORE_RES_KEY_TPG, // key id: TPGN, currently not supported - UBCORE_RES_KEY_UTP, // key id: UTP ID - UBCORE_RES_KEY_JFS, // key id: JFS ID - UBCORE_RES_KEY_JFR, // key id: JFR ID - UBCORE_RES_KEY_JETTY, // key id: JETTY ID - UBCORE_RES_KEY_JETTY_GROUP, // key id: JETTY GROUP ID, currently not supported - UBCORE_RES_KEY_JFC, // key id: JFC ID - UBCORE_RES_KEY_SEG, // key id: UKEY ID - UBCORE_RES_KEY_URMA_DEV // key id: EID + UBCORE_RES_KEY_UPI = 1, // key id: UPI ID + UBCORE_RES_KEY_VTP, // key id: VTPN + UBCORE_RES_KEY_TP, // key id: TPN + UBCORE_RES_KEY_TPG, // key id: TPGN + UBCORE_RES_KEY_UTP, // key id: UTP ID + UBCORE_RES_KEY_JFS, // key id: JFS ID + UBCORE_RES_KEY_JFR, // key id: JFR ID + UBCORE_RES_KEY_JETTY, // key id: JETTY ID + UBCORE_RES_KEY_JETTY_GROUP, // key id: JETTY GROUP ID + UBCORE_RES_KEY_JFC, // key id: JFC ID + UBCORE_RES_KEY_RC, // key id: RC ID + UBCORE_RES_KEY_SEG, // key id: UKEY ID + UBCORE_RES_KEY_URMA_DEV // key id: EID }; struct ubcore_res_upi_val { uint32_t upi; }; +struct ubcore_res_vtp_val { + uint16_t fe_idx; + uint32_t vtpn; + union ubcore_eid local_eid; + uint32_t local_jetty; + union ubcore_eid peer_eid; + uint32_t peer_jetty; + union ubcore_vtp_cfg_flag flag; + enum ubcore_transport_mode trans_mode; + union { + uint32_t tpgn; + uint32_t tpn; + uint32_t utpn; + uint32_t ctpn; + }; +}; + struct ubcore_res_tp_val { uint32_t tpn; - uint32_t psn; - uint8_t pri; - uint8_t oor; + uint32_t tx_psn; + uint32_t rx_psn; + uint8_t dscp; + uint8_t oor_en; + uint8_t selective_retrans_en; uint8_t state; uint16_t data_udp_start; uint16_t ack_udp_start; @@ -834,7 +1265,7 @@ struct ubcore_res_tp_val { struct ubcore_res_tpg_val { uint32_t tp_cnt; - uint8_t pri; + uint8_t dscp; uint32_t *tp_list; }; @@ -867,13 +1298,12 @@ struct ubcore_res_jetty_val { uint32_t recv_jfc_id; uint32_t jfr_id; uint32_t jfs_depth; - uint32_t jfr_depth; uint8_t state; uint8_t pri; }; struct ubcore_res_jetty_group_val { - uint16_t jetty_cnt; + uint32_t jetty_cnt; uint32_t *jetty_list; }; @@ -883,22 +1313,29 @@ struct ubcore_res_jfc_val { uint32_t depth; }; +struct ubcore_res_rc_val { + uint32_t type; // type of rc; read, ta-ack/ta-nak or atomic etc. + uint32_t rc_id; + uint16_t depth; + uint8_t state; +}; + struct ubcore_res_seg_val { struct ubcore_ubva ubva; uint64_t len; - uint32_t key_id; - struct ubcore_key ukey; + uint32_t token_id; + struct ubcore_token token_value; }; struct ubcore_seg_info { struct ubcore_ubva ubva; uint64_t len; - uint32_t key_id; + uint32_t token_id; }; struct ubcore_res_dev_val { uint32_t seg_cnt; - struct ubcore_seg_info *seg_list; // SEG key_id list + struct ubcore_seg_info *seg_list; // SEG token_id list uint32_t jfs_cnt; uint32_t *jfs_list; // JFS ID list uint32_t jfr_cnt; @@ -908,7 +1345,11 @@ struct ubcore_res_dev_val { uint32_t jetty_cnt; uint32_t *jetty_list; // Jetty ID list uint32_t jetty_group_cnt; - uint32_t *jetty_group_list; // Jetty group ID list + uint32_t *jetty_group_list; // Jetty group ID list + uint32_t rc_cnt; + uint32_t *rc_list; + uint32_t vtp_cnt; + uint32_t *vtp_list; uint32_t tp_cnt; uint32_t *tp_list; // RC uint32_t tpg_cnt; @@ -918,44 +1359,52 @@ struct ubcore_res_dev_val { }; struct ubcore_res_key { - uint8_t type; /* refer to enum struct ubcore_res_key_type */ - uint32_t key; /* as UPI, key is vf_id */ + uint8_t type; /* refer to ubcore_res_key_type_t */ + uint32_t key; /* as UPI, key is fe_idx */ + uint32_t key_ext; /* only for vtp */ + uint32_t key_cnt; /* only for rc */ }; struct ubcore_res_val { - uintptr_t addr; /* allocated and free by ubcore */ - uint32_t len; /* in&out. As a input parameter, - * it indicates the length allocated by the ubcore - * As a output parameter, it indicates the actual data length. - */ + uint64_t addr; /* allocated and free by ubcore */ + /* in&out. As a input parameter, + * it indicates the length allocated by the ubcore + * As a output parameter, it indicates the actual data length. + */ + uint32_t len; }; union ubcore_jfs_wr_flag { struct { - uint32_t place_order : 2; /* 0: There is no order with other WR. - * 1: relax order. - * 2: strong order. - * 3: reserve. - */ - uint32_t comp_order : 1; /* 0: There is no completion order with other WR - * 1: Completion order with previous WR. - */ - - uint32_t fence : 1; /* 0: There is no fence. - * 1: Fence with previous read and atomic WR - */ - uint32_t solicited_enable : 1; /* 0: not solicited. - * 1: solicited. It will trigger an event - * on remote side - */ - uint32_t complete_enable : 1; /* 0: Do not notify local process - * after the task is complete. - * 1: Notify local process - * after the task is completed. - */ - uint32_t inline_flag : 1; /* 0: No inline. - * 1: Inline data. - */ + /* 0: There is no order with other WR. + * 1: relax order. + * 2: strong order. + * 3: reserve. + */ + uint32_t place_order : 2; + /* 0: There is no completion order with other WR + * 1: Completion order with previous WR. + */ + uint32_t comp_order : 1; + /* 0: There is no fence. + * 1: Fence with previous read and atomic WR + */ + uint32_t fence : 1; + /* 0: not solicited. + * 1: solicited. It will trigger an event + * on remote side + */ + uint32_t solicited_enable : 1; + /* 0: Do not notify local process + * after the task is complete. + * 1: Notify local process + * after the task is completed. + */ + uint32_t complete_enable : 1; + /* 0: No inline. + * 1: Inline data. + */ + uint32_t inline_flag : 1; uint32_t reserved : 25; } bs; uint32_t value; @@ -975,88 +1424,93 @@ struct ubcore_sg { struct ubcore_rw_wr { struct ubcore_sg src; struct ubcore_sg dst; - struct ubcore_tjetty *tjetty; /* For write imm */ - uint64_t notify_data; /* notify data or immeditate data in host byte order */ + uint8_t target_hint; /* hint of jetty in a target jetty group */ + uint64_t notify_data; /* notify data or immeditate data in host byte order */ }; struct ubcore_send_wr { struct ubcore_sg src; - struct ubcore_tjetty *tjetty; uint8_t target_hint; /* hint of jetty in a target jetty group */ - uint64_t imm_data; /* immeditate data in host byte order */ + uint64_t imm_data; /* immeditate data in host byte order */ struct ubcore_target_seg *tseg; /* Used only when send with invalidate */ }; struct ubcore_cas_wr { - struct ubcore_sge *dst; /* len must be less or equal to 8 Bytes */ - struct ubcore_sge *src; /* Local address for destination original value written back */ - uint64_t cmp_data; /* Value compared with destination value */ - uint64_t swap_data; /* If destination value is the same as cmp_data, - * destination value will be change to swap_data - */ -}; - -struct ubcore_cas_mask_wr { - struct ubcore_cas_wr cas; - uint64_t cmp_msk; - uint64_t swap_msk; + struct ubcore_sge *dst; /* len is the data length of CAS operation, 8/16/32/64B */ + struct ubcore_sge *src; /* Local address for destination original value written back */ + union { + uint64_t cmp_data; /* When the len is 8B, it indicates the CMP value. */ + uint64_t cmp_addr; /* When the len is 16/32/64B, it indicates the data address. */ + }; + union { + /* If destination value is the same as cmp_data, + * destination value will be change to swap_data. + */ + uint64_t swap_data; + uint64_t swap_addr; + }; }; struct ubcore_faa_wr { struct ubcore_sge *dst; /* len in the sge is the length of faa at remote side */ struct ubcore_sge *src; /* Local address for destination original value written back */ - uint64_t operand; /* Addend */ -}; - -struct ubcore_faa_mask_wr { - struct ubcore_faa_wr faa; - uint64_t msk; + union { + uint64_t operand; /* Addend */ + uint64_t operand_addr; + }; }; struct ubcore_jfs_wr { enum ubcore_opcode opcode; union ubcore_jfs_wr_flag flag; - uintptr_t user_ctx; + uint64_t user_ctx; + struct ubcore_tjetty *tjetty; union { struct ubcore_rw_wr rw; struct ubcore_send_wr send; struct ubcore_cas_wr cas; - struct ubcore_cas_mask_wr cas_mask; struct ubcore_faa_wr faa; - struct ubcore_faa_mask_wr faa_mask; }; struct ubcore_jfs_wr *next; }; struct ubcore_jfr_wr { struct ubcore_sg src; - uintptr_t user_ctx; + uint64_t user_ctx; struct ubcore_jfr_wr *next; }; union ubcore_cr_flag { struct { - uint8_t inline_flag : 1; /* Indicate CR contains inline data or not */ - uint8_t s_r : 1; /* Indicate CR stands for sending or receiving */ - uint8_t jetty : 1; /* Indicate local_id or remote_id - * in the CR stands for jetty or JFS/JFR - */ + uint8_t s_r : 1; /* Indicate CR stands for sending or receiving */ + uint8_t jetty : 1; /* Indicate id in the CR stands for jetty or JFS/JFR */ + uint8_t suspend_done : 1; + uint8_t flush_err_done : 1; + uint8_t reserved : 4; } bs; uint8_t value; }; +struct ubcore_cr_token { + uint32_t token_id; + struct ubcore_token token_value; +}; + struct ubcore_cr { enum ubcore_cr_status status; - uintptr_t user_ctx; + uint64_t user_ctx; enum ubcore_cr_opcode opcode; union ubcore_cr_flag flag; uint32_t completion_len; /* The number of bytes transferred */ uint32_t local_id; /* Local jetty ID, or JFS ID, or JFR ID, depending on flag */ - struct ubcore_jetty_id remote_id; /* Valid only for receiving CR. - * The remote jetty where received msg comes from, - * may be jetty ID or JFS ID, depending on flag - */ - uint64_t imm_data; /* Valid only for received CR */ + /* Valid only for receiving CR. The remote jetty where received msg + * comes from, may be jetty ID or JFS ID, depending on flag. + */ + struct ubcore_jetty_id remote_id; + union { + uint64_t imm_data; /* Valid only for received CR */ + struct ubcore_cr_token invalid_token; + }; uint32_t tpn; uintptr_t user_data; /* Use as pointer to local jetty struct */ }; @@ -1085,20 +1539,21 @@ struct ubcore_stats_com_val { }; struct ubcore_stats_val { - uint64_t addr; /* this addr is alloc and free by ubcore, - * refer to struct ubcore_stats_com_val - */ - - uint32_t len; /* [in/out] real length filled when success - * to query and buffer length enough; - * expected length filled and return failure when buffer length not enough - */ + /* this addr is alloc and free by ubcore, + * refer to struct ubcore_stats_com_val + */ + uint64_t addr; + /* [in/out] real length filled when success + * to query and buffer length enough; + * expected length filled and return failure when buffer length not enough + */ + uint32_t len; }; union ubcore_utp_mod_flag { struct { - uint32_t spray_en : 1; // Whether to enable end-side port number hashing, - // 0 : disabled, 1 : enabled + uint32_t spray_en : 1; // Whether to enable end-side port number hashing, + // 0 : disabled, 1 : enabled uint32_t reserved : 31; } bs; uint32_t value; @@ -1120,6 +1575,20 @@ union ubcore_utp_attr_mask { uint32_t value; }; +/* live migration struct */ +enum ubcore_mig_state { + UBCORE_MIG_STATE_START, + UBCORE_MIG_STATE_ROLLBACK, + UBCORE_MIG_STATE_FINISH +}; + +enum ubcore_mig_resp_status { + UBCORE_MIG_MSG_PROC_SUCCESS, + UBCORE_MIG_MSG_PROC_FAILURE, + UBCORE_VTP_MIG_COMPLETE, + UBCORE_VTP_MIG_UNCOMPLETE +}; + struct ubcore_ops { struct module *owner; /* kernel driver module */ char driver_name[UBCORE_MAX_DRIVER_NAME]; /* user space driver name */ @@ -1131,45 +1600,35 @@ struct ubcore_ops { * @return: 0 on success, other value on error */ int (*set_eid)(struct ubcore_device *dev, union ubcore_eid eid); + /** * set upi * @param[in] dev: the ub device handle; - * @param[in] vf_id: vf_id; - * @param[in] idx: idx of upi in vf; - * @param[in] upi: upi of vf to set + * @param[in] fe_idx: fe_idx; + * @param[in] idx: idx of upi in fe; + * @param[in] upi: upi of fe to set * @return: 0 on success, other value on error */ - int (*set_upi)(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx, uint32_t upi); - /** - * add a function entity id (eid) to ub device - * @param[in] dev: the ubcore_device handle; - * @param[in] eid: function entity id (eid) to be added; - * @return: the index of eid, less than 0 indicating error - */ - int (*add_eid)(struct ubcore_device *dev, const union ubcore_eid *eid); - /** - * remove a function entity id (eid) specified by idx from ub device - * @param[in] dev: the ubcore_device handle; - * @param[in] idx: the idx of function entity id (eid) to be deleted; - * @return: 0 on success, other value on error - */ - int (*delete_eid_by_idx)(struct ubcore_device *dev, uint16_t idx); + int (*set_upi)(struct ubcore_device *dev, uint16_t fe_idx, uint16_t idx, uint32_t upi); + /** * add a function entity id (eid) to ub device (for uvs) * @param[in] dev: the ubcore_device handle; - * @param[in] vf_id: vf_id; - * @param[in] cfg: eid and the upi of vf to which the eid belongs can be specified; + * @param[in] fe_idx: fe_idx; + * @param[in] cfg: eid and the upi of fe to which the eid belongs can be specified; * @return: the index of eid/upi, less than 0 indicating error */ - int (*add_ueid)(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_ueid_cfg *cfg); + int (*add_ueid)(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg); + /** - * remove a function entity id (eid) specified by idx from ub device (for uvs) + * delete a function entity id (eid) to ub device (for uvs) * @param[in] dev: the ubcore_device handle; - * @param[in] vf_id: vf_id; - * @param[in] idx: the idx of function entity id (eid) to be deleted; + * @param[in] fe_idx: fe_idx; + * @param[in] cfg: eid and the upi of fe to which the eid belongs can be specified; * @return: 0 on success, other value on error */ - int (*delete_ueid_by_idx)(struct ubcore_device *dev, uint16_t vf_id, uint16_t idx); + int (*delete_ueid)(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg); + /** * query device attributes * @param[in] dev: the ub device handle; @@ -1177,14 +1636,15 @@ struct ubcore_ops { * @return: 0 on success, other value on error */ int (*query_device_attr)(struct ubcore_device *dev, struct ubcore_device_attr *attr); + /** * query device status * @param[in] dev: the ub device handle; * @param[out] status: status for the driver to fill in * @return: 0 on success, other value on error */ - int (*query_device_status)(const struct ubcore_device *dev, - struct ubcore_device_status *status); + int (*query_device_status)(struct ubcore_device *dev, struct ubcore_device_status *status); + /** * query resource * @param[in] dev: the ub device handle; @@ -1192,44 +1652,52 @@ struct ubcore_ops { * @param[in/out] val: addr and len of value * @return: 0 on success, other value on error */ - int (*query_res)(const struct ubcore_device *dev, struct ubcore_res_key *key, - struct ubcore_res_val *val); + int (*query_res)(struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val); + /** * config device * @param[in] dev: the ub device handle; * @param[in] cfg: device configuration * @return: 0 on success, other value on error */ - int (*config_device)(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg); + int (*config_device)(struct ubcore_device *dev, struct ubcore_device_cfg *cfg); + /** * set ub network address * @param[in] dev: the ub device handle; * @param[in] net_addr: net_addr to set + * @param[in] index: index by sip table * @return: 0 on success, other value on error */ - int (*set_net_addr)(struct ubcore_device *dev, const struct ubcore_net_addr *net_addr); + int (*add_net_addr)(struct ubcore_device *dev, struct ubcore_net_addr *net_addr, + uint32_t index); + /** * unset ub network address * @param[in] dev: the ub device handle; - * @param[in] net_addr: net_addr to unset + * @param[in] idx: net_addr idx by sip table entry * @return: 0 on success, other value on error */ - int (*unset_net_addr)(struct ubcore_device *dev, const struct ubcore_net_addr *net_addr); + int (*delete_net_addr)(struct ubcore_device *dev, uint32_t idx); + /** * allocate a context from ubep for a user process * @param[in] dev: the ub device handle; - * @param[in] uasid: uasid for the context to be allocated + * @param[in] eid: function entity id (eid) index to set; * @param[in] udrv_data: user space driver data * @return: pointer to user context on success, null or error, */ - struct ubcore_ucontext *(*alloc_ucontext)(struct ubcore_device *dev, uint32_t uasid, - struct ubcore_udrv_priv *udrv_data); + struct ubcore_ucontext *(*alloc_ucontext)(struct ubcore_device *dev, + uint32_t eid_index, struct ubcore_udrv_priv *udrv_data); + /** * free a context to ubep * @param[in] uctx: the user context created before; * @return: 0 on success, other value on error */ int (*free_ucontext)(struct ubcore_ucontext *uctx); + /** * mmap doorbell or jetty buffer, etc * @param[in] uctx: the user context created before; @@ -1239,19 +1707,19 @@ struct ubcore_ops { int (*mmap)(struct ubcore_ucontext *ctx, struct vm_area_struct *vma); /* segment part */ - /** alloc key id to ubep + /** alloc token id to ubep * @param[in] dev: the ub device handle; * @param[in] udata: ucontext and user space driver data - * @return: key id pointer on success, NULL on error + * @return: token id pointer on success, NULL on error */ - struct ubcore_key_id *(*alloc_key_id)(struct ubcore_device *dev, - struct ubcore_udata *udata); + struct ubcore_token_id *(*alloc_token_id)(struct ubcore_device *dev, + struct ubcore_udata *udata); /** free key id from ubep - * @param[in] key_id: the key id alloced before; + * @param[in] token_id: the token id alloced before; * @return: 0 on success, other value on error */ - int (*free_key_id)(struct ubcore_key_id *key_id); + int (*free_token_id)(struct ubcore_token_id *token_id); /** register segment to ubep * @param[in] dev: the ub device handle; @@ -1260,8 +1728,8 @@ struct ubcore_ops { * @return: target segment pointer on success, NULL on error */ struct ubcore_target_seg *(*register_seg)(struct ubcore_device *dev, - const struct ubcore_seg_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); /** unregister segment from ubep * @param[in] tseg: the segment registered before; @@ -1276,7 +1744,7 @@ struct ubcore_ops { * @return: target segment handle on success, NULL on error */ struct ubcore_target_seg *(*import_seg)(struct ubcore_device *dev, - const struct ubcore_target_seg_cfg *cfg, + struct ubcore_target_seg_cfg *cfg, struct ubcore_udata *udata); /** unimport seg from ubep @@ -1285,6 +1753,14 @@ struct ubcore_ops { */ int (*unimport_seg)(struct ubcore_target_seg *tseg); + /** add port for bound device + * @param[in] dev: the ub device handle; + * @param[in] port_cnt: port count + * @param[in] port_list: port list + * @return: target segment handle on success, NULL on error + */ + int (*add_port)(struct ubcore_device *dev, uint32_t port_cnt, uint32_t *port_list); + /* jetty part */ /** * create jfc with ubep. @@ -1294,7 +1770,7 @@ struct ubcore_ops { * @return: jfc pointer on success, NULL on error */ struct ubcore_jfc *(*create_jfc)(struct ubcore_device *dev, - const struct ubcore_jfc_cfg *cfg, + struct ubcore_jfc_cfg *cfg, struct ubcore_udata *udata); /** @@ -1304,7 +1780,7 @@ struct ubcore_ops { * @param[in] udata: ucontext and user space driver data * @return: 0 on success, other value on error */ - int (*modify_jfc)(struct ubcore_jfc *jfc, const struct ubcore_jfc_attr *attr, + int (*modify_jfc)(struct ubcore_jfc *jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); /** @@ -1330,7 +1806,7 @@ struct ubcore_ops { * @return: jfs pointer on success, NULL on error */ struct ubcore_jfs *(*create_jfs)(struct ubcore_device *dev, - const struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata); /** * modify jfs from ubep. @@ -1339,7 +1815,7 @@ struct ubcore_ops { * @param[in] udata: ucontext and user space driver data * @return: 0 on success, other value on error */ - int (*modify_jfs)(struct ubcore_jfs *jfs, const struct ubcore_jfs_attr *attr, + int (*modify_jfs)(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, struct ubcore_udata *udata); /** * query jfs from ubep. @@ -1350,21 +1826,20 @@ struct ubcore_ops { */ int (*query_jfs)(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, struct ubcore_jfs_attr *attr); - /** - * destroy jfs from ubep. - * @param[in] jfs: the jfs created before; - * @return: 0 on success, other value on error - */ - int (*destroy_jfs)(struct ubcore_jfs *jfs); /** * flush jfs from ubep. * @param[in] jfs: the jfs created before; * @param[in] cr_cnt: the maximum number of CRs expected to be returned; * @param[out] cr: the addr of returned CRs; - * @return: the number of completion record returned, - * 0 means no completion record returned, -1 on error + * @return: the number of CR returned, 0 means no completion record returned, -1 on error */ int (*flush_jfs)(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr); + /** + * destroy jfs from ubep. + * @param[in] jfs: the jfs created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jfs)(struct ubcore_jfs *jfs); /** * create jfr with ubep. @@ -1374,7 +1849,7 @@ struct ubcore_ops { * @return: jfr pointer on success, NULL on error */ struct ubcore_jfr *(*create_jfr)(struct ubcore_device *dev, - const struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_cfg *cfg, struct ubcore_udata *udata); /** * modify jfr from ubep. @@ -1383,7 +1858,7 @@ struct ubcore_ops { * @param[in] udata: ucontext and user space driver data * @return: 0 on success, other value on error */ - int (*modify_jfr)(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, + int (*modify_jfr)(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, struct ubcore_udata *udata); /** * query jfr from ubep. @@ -1409,7 +1884,7 @@ struct ubcore_ops { * @return: target jfr pointer on success, NULL on error */ struct ubcore_tjetty *(*import_jfr)(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata); /** * unimport jfr from ubep. @@ -1426,7 +1901,7 @@ struct ubcore_ops { * @return: jetty pointer on success, NULL on error */ struct ubcore_jetty *(*create_jetty)(struct ubcore_device *dev, - const struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); /** * modify jetty from ubep. @@ -1435,7 +1910,7 @@ struct ubcore_ops { * @param[in] udata: ucontext and user space driver data * @return: 0 on success, other value on error */ - int (*modify_jetty)(struct ubcore_jetty *jetty, const struct ubcore_jetty_attr *attr, + int (*modify_jetty)(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, struct ubcore_udata *udata); /** * query jetty from ubep. @@ -1446,22 +1921,20 @@ struct ubcore_ops { */ int (*query_jetty)(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, struct ubcore_jetty_attr *attr); - /** - * destroy jetty from ubep. - * @param[in] jetty: the jetty created before; - * @return: 0 on success, other value on error - */ - int (*destroy_jetty)(struct ubcore_jetty *jetty); - /** * flush jetty from ubep. * @param[in] jetty: the jetty created before; * @param[in] cr_cnt: the maximum number of CRs expected to be returned; * @param[out] cr: the addr of returned CRs; - * @return: the number of completion record returned, - * 0 means no completion record returned, -1 on error + * @return: the number of CR returned, 0 means no completion record returned, -1 on error */ int (*flush_jetty)(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr); + /** + * destroy jetty from ubep. + * @param[in] jetty: the jetty created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jetty)(struct ubcore_jetty *jetty); /** * import jetty to ubep. @@ -1471,7 +1944,7 @@ struct ubcore_ops { * @return: target jetty pointer on success, NULL on error */ struct ubcore_tjetty *(*import_jetty)(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata); /** * unimport jetty from ubep. @@ -1479,6 +1952,54 @@ struct ubcore_ops { * @return: 0 on success, other value on error */ int (*unimport_jetty)(struct ubcore_tjetty *tjetty); + /** + * bind jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[in] tjetty: the target jetty imported before; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*bind_jetty)(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata); + /** + * unbind jetty from ubep. + * @param[in] jetty: the jetty binded before; + * @return: 0 on success, other value on error + */ + int (*unbind_jetty)(struct ubcore_jetty *jetty); + + /** + * create jetty group to ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: pointer of the jetty group config; + * @param[in] udata: ucontext and user space driver data + * @return: jetty group pointer on success, NULL on error + */ + struct ubcore_jetty_group *(*create_jetty_grp)(struct ubcore_device *dev, + struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); + /** + * destroy jetty group to ubep. + * @param[in] jetty_grp: the jetty group created before; + * @return: 0 on success, other value on error + */ + int (*delete_jetty_grp)(struct ubcore_jetty_group *jetty_grp); + + /** + * create tpg. + * @param[in] dev: the ub device handle; + * @param[in] cfg: tpg init attributes + * @param[in] udata: ucontext and user space driver data + * @return: tp pointer on success, NULL on error + */ + struct ubcore_tpg *(*create_tpg)(struct ubcore_device *dev, + struct ubcore_tpg_cfg *cfg, struct ubcore_udata *udata); + /** + * destroy tpg. + * @param[in] tp: tp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_tpg)(struct ubcore_tpg *tpg); + /** * create tp. * @param[in] dev: the ub device handle; @@ -1486,8 +2007,8 @@ struct ubcore_ops { * @param[in] udata: ucontext and user space driver data * @return: tp pointer on success, NULL on error */ - struct ubcore_tp *(*create_tp)(struct ubcore_device *dev, const struct ubcore_tp_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_tp *(*create_tp)(struct ubcore_device *dev, + struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata); /** * modify tp. * @param[in] tp: tp pointer created before @@ -1495,14 +2016,123 @@ struct ubcore_ops { * @param[in] mask: attr mask indicating the attributes to be modified * @return: 0 on success, other value on error */ - int (*modify_tp)(struct ubcore_tp *tp, const struct ubcore_tp_attr *attr, - union ubcore_tp_attr_mask mask); + int (*modify_tp)(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask); /** * destroy tp. * @param[in] tp: tp pointer created before * @return: 0 on success, other value on error */ int (*destroy_tp)(struct ubcore_tp *tp); + + /** + * create multi tp. + * @param[in] dev: the ub device handle; + * @param[in] cnt: the number of tp, must be less than or equal to 32; + * @param[in] cfg: array of tp init attributes + * @param[in] udata: array of ucontext and user space driver data + * @param[out] tp: pointer array of tp + * @return: created tp cnt, 0 on error + */ + int (*create_multi_tp)(struct ubcore_device *dev, uint32_t cnt, struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata, struct ubcore_tp **tp); + /** + * modify multi tp. + * @param[in] cnt: the number of tp; + * @param[in] tp: pointer array of tp created before + * @param[in] attr: array of tp attributes + * @param[in] mask: array of attr mask indicating the attributes to be modified + * @param[in] fail_tp: pointer of tp failed to modify + * @return: modified successfully tp cnt, 0 on error + */ + int (*modify_multi_tp)(uint32_t cnt, struct ubcore_tp **tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, struct ubcore_tp **fail_tp); + /** + * destroy multi tp. + * @param[in] cnt: the number of tp; + * @param[in] tp: pointer array of tp created before + * @return: destroyed tp cnt, 0 on error + */ + int (*destroy_multi_tp)(uint32_t cnt, struct ubcore_tp **tp); + + /** + * allocate vtp. + * @param[in] dev: the ub device handle; + * @return: vtpn pointer on success, NULL on error + */ + struct ubcore_vtpn *(*alloc_vtpn)(struct ubcore_device *dev); + + /** + * free vtpn. + * @param[in] vtpn: vtpn pointer allocated before + * @return: 0 on success, other value on error + */ + int (*free_vtpn)(struct ubcore_vtpn *vtpn); + + /** + * create vtp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: vtp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: vtp pointer on success, NULL on error + */ + struct ubcore_vtp *(*create_vtp)(struct ubcore_device *dev, + struct ubcore_vtp_cfg *cfg, struct ubcore_udata *udata); + /** + * destroy vtp. + * @param[in] vtp: vtp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_vtp)(struct ubcore_vtp *vtp); + + /** + * create utp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: utp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: utp pointer on success, NULL on error + */ + struct ubcore_utp *(*create_utp)(struct ubcore_device *dev, + struct ubcore_utp_cfg *cfg, struct ubcore_udata *udata); + /** + * destroy utp. + * @param[in] utp: utp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_utp)(struct ubcore_utp *utp); + + /** + * create ctp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: ctp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: ctp pointer on success, NULL on error + */ + struct ubcore_ctp *(*create_ctp)(struct ubcore_device *dev, + struct ubcore_ctp_cfg *cfg, struct ubcore_udata *udata); + /** + * destroy ctp. + * @param[in] ctp: ctp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_ctp)(struct ubcore_ctp *ctp); + + /** + * send msg to ubep device. + * @param[in] dev: the ub device handle; + * @param[in] msg: msg to send; + * @return: 0 on success, other value on error + */ + int (*send_msg)(struct ubcore_device *dev, struct ubcore_msg *msg); + + /** + * query cc table to get cc pattern idx + * @param[in] dev: the ub device handle; + * @param[in] cc_entry_cnt: cc entry cnt; + * @return: return NULL on fail, otherwise, return cc entry array + */ + struct ubcore_cc_entry *(*query_cc)(struct ubcore_device *dev, uint32_t *cc_entry_cnt); + /** * operation of user ioctl cmd. * @param[in] user_ctl: kdrv user control command pointer; @@ -1518,7 +2148,7 @@ struct ubcore_ops { * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ - int (*post_jfs_wr)(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, + int (*post_jfs_wr)(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); /** * post jfr wr. @@ -1527,7 +2157,7 @@ struct ubcore_ops { * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ - int (*post_jfr_wr)(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr, + int (*post_jfr_wr)(struct ubcore_jfr *jfr, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr); /** * post jetty send wr. @@ -1536,7 +2166,7 @@ struct ubcore_ops { * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ - int (*post_jetty_send_wr)(struct ubcore_jetty *jetty, const struct ubcore_jfs_wr *wr, + int (*post_jetty_send_wr)(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); /** * post jetty receive wr. @@ -1545,7 +2175,7 @@ struct ubcore_ops { * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ - int (*post_jetty_recv_wr)(struct ubcore_jetty *jetty, const struct ubcore_jfr_wr *wr, + int (*post_jetty_recv_wr)(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr); /** * poll jfc. @@ -1554,8 +2184,9 @@ struct ubcore_ops { * @return: 0 on success, other value on error */ int (*poll_jfc)(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); - int (*config_utp)(struct ubcore_device *dev, uint32_t utp_id, - const struct ubcore_utp_attr *attr, union ubcore_utp_attr_mask mask); + + int (*config_utp)(struct ubcore_device *dev, uint32_t utp_id, struct ubcore_utp_attr *attr, + union ubcore_utp_attr_mask mask); /** * query_stats. success to query and buffer length is enough * @param[in] dev: the ub device handle; @@ -1563,8 +2194,29 @@ struct ubcore_ops { * @param[in/out] val: address and buffer length of query results * @return: 0 on success, other value on error */ - int (*query_stats)(const struct ubcore_device *dev, struct ubcore_stats_key *key, - struct ubcore_stats_val *val); + int (*query_stats)(struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val); + /** + * config function migrate state. + * @param[in] dev: the ub device handle; + * @param[in] fe_idx: fe id; + * @param[in] cnt: config count; + * @param[in] cfg: eid and the upi of fe to which the eid belongs can be specified; + * @param[in] state: config state (start, rollback and finish) + * @return: config success count, -1 on error + */ + int (*config_function_migrate_state)( + struct ubcore_device *dev, uint16_t fe_idx, uint32_t cnt, + struct ubcore_ueid_cfg *cfg, enum ubcore_mig_state state); + /** + * modify vtp. + * @param[in] vtp: vtp pointer to be modified; + * @param[in] attr: vtp attr, tp that we want to change; + * @param[in] mask: attr mask; + * @return: 0 on success, other value on error + */ + int (*modify_vtp)(struct ubcore_vtp *vtp, struct ubcore_vtp_attr *attr, + union ubcore_vtp_attr_mask *mask); }; struct ubcore_bitmap { @@ -1578,10 +2230,29 @@ enum ubcore_hash_table_type { UBCORE_HT_JFR, /* jfr hash table */ UBCORE_HT_JFC, /* jfc hash table */ UBCORE_HT_JETTY, /* jetty hash table */ - UBCORE_HT_TP, /* tp table */ + UBCORE_HT_TP, /* tp table */ + UBCORE_HT_TPG, /* tpg table */ + UBCORE_HT_RM_VTP, /* rm vtp table */ + UBCORE_HT_RC_VTP, /* rc vtp table */ + UBCORE_HT_UM_VTP, /* um vtp table */ + UBCORE_HT_VTPN, /* vtpn table */ + UBCORE_HT_UTP, /* utp table */ + UBCORE_HT_CTP, /* ctp table */ UBCORE_HT_NUM }; +struct ubcore_eid_entry { + union ubcore_eid eid; + uint32_t eid_index; + bool valid; +}; + +struct ubcore_eid_table { + uint32_t max_valid_pos; + struct ubcore_eid_entry *eid_entries; + spinlock_t lock; +}; + struct ubcore_device { struct list_head list_node; /* add to device list */ @@ -1593,16 +2264,15 @@ struct ubcore_device { struct net_device *netdev; struct ubcore_ops *ops; enum ubcore_transport_type transport_type; - int num_comp_vectors; /* Number of completion interrupt vectors for the device */ struct ubcore_device_attr attr; struct attribute_group *group[UBCORE_MAX_ATTR_GROUP]; /* driver may fill group [1] */ /* driver fills end */ - + struct ubcore_eid_table eid_table; struct ubcore_device_cfg cfg; /* port management */ struct kobject *ports_parent; /* kobject parent of the ports in the port list */ - struct list_head port_list; + struct list_head port_list; /* add to port list */ /* For ubcore client */ spinlock_t client_ctx_lock; @@ -1614,12 +2284,13 @@ struct ubcore_device { /* protect from unregister device */ atomic_t use_cnt; struct completion comp; + bool dynamic_eid; /* Assign eid dynamically with netdev notifier */ }; struct ubcore_port { struct kobject kobj; /* add to port list */ struct ubcore_device *ub_dev; - uint32_t port_no; + uint32_t port_id; struct ubcore_net_addr net_addr; }; @@ -1638,12 +2309,8 @@ struct ubcore_client_ctx { union ubcore_umem_flag { struct { - uint32_t non_pin : 1; /* 0: pinned to physical memory. - * 1: non pin. - */ - uint32_t writable : 1; /* 0: read-only. - * 1: writable. - */ + uint32_t non_pin : 1; /* 0: pinned to physical memory. 1: non pin. */ + uint32_t writable : 1; /* 0: read-only. 1: writable. */ uint32_t reserved : 30; } bs; uint32_t value; @@ -1659,4 +2326,32 @@ struct ubcore_umem { uint32_t nmap; }; +struct ubcore_sip_info { + char dev_name[UBCORE_MAX_DEV_NAME]; + struct ubcore_net_addr addr; + uint32_t prefix_len; + uint8_t port_cnt; + uint8_t port_id[UBCORE_MAX_PORT_CNT]; + uint32_t mtu; +}; + +union ubcore_global_cfg_mask { + struct { + uint32_t mtu : 1; + uint32_t slice : 1; + uint32_t suspend_period : 1; + uint32_t suspend_cnt : 1; + uint32_t reserved : 28; + } bs; + uint32_t value; +}; + +struct ubcore_global_cfg { + union ubcore_global_cfg_mask mask; + enum ubcore_mtu mtu; + uint32_t slice; + uint32_t suspend_period; + uint32_t suspend_cnt; +}; + #endif diff --git a/include/urma/ubcore_uapi.h b/include/urma/ubcore_uapi.h index 8241775399a6..7f04345d08f7 100644 --- a/include/urma/ubcore_uapi.h +++ b/include/urma/ubcore_uapi.h @@ -23,18 +23,18 @@ #ifndef UBCORE_UAPI_H #define UBCORE_UAPI_H -#include +#include "ubcore_types.h" /** * Application specifies the device to allocate an context. * @param[in] dev: ubcore_device found by add ops in the client. - * @param[in] uasid: (deprecated) + * @param[in] eid_index: function entity id (eid) index to set; * @param[in] udrv_data (optional): ucontext and user space driver data * @return: ubcore_ucontext pointer on success, NULL on fail. * Note: this API is called only by uburma representing user-space application, - * not by other kernel modules + * not by other kernel modules */ -struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t uasid, - struct ubcore_udrv_priv *udrv_data); +struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data); /** * Free the allocated context. * @param[in] dev: device to free context. @@ -42,55 +42,32 @@ struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_ * Note: this API is called only by uburma representing user-space application, * not by other kernel modules */ -void ubcore_free_ucontext(const struct ubcore_device *dev, struct ubcore_ucontext *ucontext); -/** - * set function entity id for ub device. must be called before alloc context - * @param[in] dev: the ubcore_device handle; - * @param[in] eid: function entity id (eid) to set; - * @return: 0 on success, other value on error - */ -int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid); +void ubcore_free_ucontext(struct ubcore_device *dev, struct ubcore_ucontext *ucontext); /** * set upi * @param[in] dev: the ubcore_device handle; - * @param[in] vf_id: vf_id; - * @param[in] idx: idx of upi in vf; - * @param[in] upi: upi of vf to set + * @param[in] fe_idx: fe_idx; + * @param[in] idx: idx of upi in fe; + * @param[in] upi: upi of fe to set * @return: 0 on success, other value on error */ -int ubcore_set_upi(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx, uint32_t upi); -/** - * add a function entity id (eid) to ub device, the upi of vf to which the eid belongs - * can be specified - * @param[in] dev: the ubcore_device handle; - * @param[in] eid: function entity id (eid) to be added; - * @param[in] upi: upi of vf; - * @return: the index of eid/upi, less than 0 indicating error - */ -int ubcore_add_eid(struct ubcore_device *dev, union ubcore_eid *eid); -/** - * remove a function entity id (eid) specified by idx from ub device - * @param[in] dev: the ubcore_device handle; - * @param[in] idx: the idx of function entity id (eid) to be deleted; - * @return: 0 on success, other value on error - */ -int ubcore_delete_eid(struct ubcore_device *dev, uint16_t idx); +int ubcore_set_upi(struct ubcore_device *dev, uint16_t fe_idx, uint16_t idx, uint32_t upi); /** * add a function entity id (eid) to ub device (for uvs) * @param[in] dev: the ubcore_device handle; - * @param[in] vf_id: vf_id; - * @param[in] cfg: eid and the upi of vf to which the eid belongs can be specified; + * @param[in] fe_idx: fe_idx; + * @param[in] cfg: eid and the upi of fe to which the eid belongs can be specified; * @return: the index of eid/upi, less than 0 indicating error */ -int ubcore_add_ueid(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_ueid_cfg *cfg); +int ubcore_add_ueid(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg); /** * remove a function entity id (eid) specified by idx from ub device (for uvs) * @param[in] dev: the ubcore_device handle; - * @param[in] vf_id: vf_id; - * @param[in] idx: the idx of function entity id (eid) to be deleted; + * @param[in] fe_idx: fe_idx; + * @param[in] cfg: eid and the upi of fe to which the eid belongs can be specified; * @return: 0 on success, other value on error */ -int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t vf_id, uint16_t idx); +int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t fe_idx, struct ubcore_ueid_cfg *cfg); /** * query device attributes * @param[in] dev: the ubcore_device handle; @@ -104,7 +81,7 @@ int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_att * @param[out] status: status returned to client * @return: 0 on success, other value on error */ -int ubcore_query_device_status(const struct ubcore_device *dev, +int ubcore_query_device_status(struct ubcore_device *dev, struct ubcore_device_status *status); /** * query stats @@ -113,7 +90,7 @@ int ubcore_query_device_status(const struct ubcore_device *dev, * @param[in/out] val: addr and len of value * @return: 0 on success, other value on error */ -int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, +int ubcore_query_stats(struct ubcore_device *dev, struct ubcore_stats_key *key, struct ubcore_stats_val *val); /** * query resource @@ -122,7 +99,7 @@ int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key * @param[in/out] val: addr and len of value * @return: 0 on success, other value on error */ -int ubcore_query_resource(const struct ubcore_device *dev, struct ubcore_res_key *key, +int ubcore_query_resource(struct ubcore_device *dev, struct ubcore_res_key *key, struct ubcore_res_val *val); /** * config device @@ -130,7 +107,7 @@ int ubcore_query_resource(const struct ubcore_device *dev, struct ubcore_res_key * @param[in] cfg: device configuration * @return: 0 on success, other value on error */ -int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg); +int ubcore_config_device(struct ubcore_device *dev, struct ubcore_device_cfg *cfg); /** * set ctx data of a client @@ -139,7 +116,7 @@ int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_c * @param[in] data: client private data to be set * @return: 0 on success, other value on error */ -void ubcore_set_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client, +void ubcore_set_client_ctx_data(struct ubcore_device *dev, struct ubcore_client *client, void *data); /** * get ctx data of a client @@ -147,7 +124,7 @@ void ubcore_set_client_ctx_data(struct ubcore_device *dev, const struct ubcore_c * @param[in] client: ubcore client pointer * @return: client private data set before */ -void *ubcore_get_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client); +void *ubcore_get_client_ctx_data(struct ubcore_device *dev, struct ubcore_client *client); /** * Register a new client to ubcore * @param[in] dev: the ubcore_device handle; @@ -161,18 +138,20 @@ int ubcore_register_client(struct ubcore_client *new_client); */ void ubcore_unregister_client(struct ubcore_client *rm_client); /** - * alloc key to ubcore device + * alloc token to ubcore device * @param[in] dev: the ubcore device handle; * @param[in] udata (optional): ucontext and user space driver data - * @return: key id pointer on success, NULL on error + * @return: token id pointer on success, NULL on error */ -struct ubcore_key_id *ubcore_alloc_key_id(struct ubcore_device *dev, struct ubcore_udata *udata); +struct ubcore_token_id *ubcore_alloc_token_id(struct ubcore_device *dev, + struct ubcore_udata *udata); /** - * free key id from ubcore device - * @param[in] key: the key id alloced before; + * free token id from ubcore device + * @param[in] token: the token id alloced before; * @return: 0 on success, other value on error */ -int ubcore_free_key_id(struct ubcore_key_id *key); +int ubcore_free_token_id(struct ubcore_token_id *key); + /** * register segment to ubcore device * @param[in] dev: the ubcore device handle; @@ -181,7 +160,7 @@ int ubcore_free_key_id(struct ubcore_key_id *key); * @return: target segment pointer on success, NULL on error */ struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, - const struct ubcore_seg_cfg *cfg, + struct ubcore_seg_cfg *cfg, struct ubcore_udata *udata); /** * unregister segment from ubcore device @@ -197,7 +176,7 @@ int ubcore_unregister_seg(struct ubcore_target_seg *tseg); * @return: target segment handle on success, NULL on error */ struct ubcore_target_seg *ubcore_import_seg(struct ubcore_device *dev, - const struct ubcore_target_seg_cfg *cfg, + struct ubcore_target_seg_cfg *cfg, struct ubcore_udata *udata); /** * unimport seg from ubcore device @@ -214,10 +193,9 @@ int ubcore_unimport_seg(struct ubcore_target_seg *tseg); * @param[in] udata (optional): ucontext and user space driver data * @return: jfc pointer on success, NULL on error */ -struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, const struct ubcore_jfc_cfg *cfg, - ubcore_comp_callback_t jfce_handler, - ubcore_event_callback_t jfae_handler, - struct ubcore_udata *udata); +struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, struct ubcore_jfc_cfg *cfg, + ubcore_comp_callback_t jfce_handler, ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); /** * modify jfc from ubcore device. * @param[in] jfc: the jfc created before; @@ -225,7 +203,7 @@ struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, const struct ubc * @param[in] udata (optional): ucontext and user space driver data * @return: 0 on success, other value on error */ -int ubcore_modify_jfc(struct ubcore_jfc *jfc, const struct ubcore_jfc_attr *attr, +int ubcore_modify_jfc(struct ubcore_jfc *jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); /** * destroy jfc from ubcore device. @@ -248,9 +226,8 @@ int ubcore_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only); * @param[in] udata (optional): ucontext and user space driver data * @return: jfs pointer on success, NULL on error */ -struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, const struct ubcore_jfs_cfg *cfg, - ubcore_event_callback_t jfae_handler, - struct ubcore_udata *udata); +struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, struct ubcore_jfs_cfg *cfg, + ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata); /** * modify jfs from ubcore device. * @param[in] jfs: the jfs created before; @@ -258,7 +235,7 @@ struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, const struct ubc * @param[in] udata (optional): ucontext and user space driver data * @return: 0 on success, other value on error */ -int ubcore_modify_jfs(struct ubcore_jfs *jfs, const struct ubcore_jfs_attr *attr, +int ubcore_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, struct ubcore_udata *udata); /** * query jfs from ubcore device. @@ -292,9 +269,8 @@ int ubcore_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr); * @param[in] udata (optional): ucontext and user space driver data * @return: jfr pointer on success, NULL on error */ -struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, const struct ubcore_jfr_cfg *cfg, - ubcore_event_callback_t jfae_handler, - struct ubcore_udata *udata); +struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, + ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata); /** * modify jfr from ubcore device. * @param[in] jfr: the jfr created before; @@ -302,7 +278,7 @@ struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, const struct ubc * @param[in] udata (optional): ucontext and user space driver data * @return: 0 on success, other value on error */ -int ubcore_modify_jfr(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, +int ubcore_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, struct ubcore_udata *udata); /** * query jfr from ubcore device. @@ -328,7 +304,7 @@ int ubcore_delete_jfr(struct ubcore_jfr *jfr); * @return: jetty pointer on success, NULL on error */ struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, - const struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_cfg *cfg, ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata); /** @@ -338,7 +314,7 @@ struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, * @param[in] udata (optional): ucontext and user space driver data * @return: 0 on success, other value on error */ -int ubcore_modify_jetty(struct ubcore_jetty *jetty, const struct ubcore_jetty_attr *attr, +int ubcore_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, struct ubcore_udata *udata); /** * query jetty from ubcore device. @@ -372,8 +348,7 @@ int ubcore_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr * @return: target jfr pointer on success, NULL on error */ struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata); /** * unimport jfr from ubcore device. * @param[in] tjfr: the target jfr imported before; @@ -388,8 +363,7 @@ int ubcore_unimport_jfr(struct ubcore_tjetty *tjfr); * @return: target jetty pointer on success, NULL on error */ struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev, - const struct ubcore_tjetty_cfg *cfg, - struct ubcore_udata *udata); + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata); /** * unimport jetty from ubcore device. * @param[in] tjetty: the target jetty imported before; @@ -443,10 +417,26 @@ int ubcore_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, * Unbind jetty: Unbind local jetty with remote jetty, * and tear down the transport channel between them. * @param[in] jetty: local jetty to unbind; - * @param[in] tjetty: target jetty advised before; * @return: 0 on success, other value on error */ -int ubcore_unbind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty); +int ubcore_unbind_jetty(struct ubcore_jetty *jetty); +/** + * create jetty group with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jetty group configurations + * @param[in] jfae_handler (optional): jetty async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jetty group pointer on success, NULL on error + */ +struct ubcore_jetty_group *ubcore_create_jetty_grp(struct ubcore_device *dev, + struct ubcore_jetty_grp_cfg *cfg, ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * destroy jetty group from ubcore device. + * @param[in] jetty_grp: the jetty group created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); /** * operation of user ioctl cmd. * @param[in] k_user_ctl: kdrv user control command pointer; @@ -460,7 +450,8 @@ int ubcore_user_control(struct ubcore_user_ctl *k_user_ctl); * Note: the handler will be called when driver reports an async_event with * ubcore_dispatch_async_event */ -void ubcore_register_event_handler(struct ubcore_device *dev, struct ubcore_event_handler *handler); +void ubcore_register_event_handler(struct ubcore_device *dev, + struct ubcore_event_handler *handler); /** * Client unregister async_event handler from ubcore * @param[in] dev: the ubcore device handle; @@ -477,7 +468,7 @@ void ubcore_unregister_event_handler(struct ubcore_device *dev, * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ -int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, +int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); /** * post jfr wr. @@ -486,7 +477,7 @@ int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ -int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr, +int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr); /** * post jetty send wr. @@ -495,7 +486,7 @@ int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr, * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ -int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, const struct ubcore_jfs_wr *wr, +int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); /** * post jetty receive wr. @@ -504,7 +495,7 @@ int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, const struct ubcore_jf * @param[out] bad_wr: the first failed wr; * @return: 0 on success, other value on error */ -int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, const struct ubcore_jfr_wr *wr, +int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr); /** * poll jfc. -- Gitee