From e78c9c70711b306b1aea472566daf2cc344054f6 Mon Sep 17 00:00:00 2001 From: chenyutao Date: Thu, 27 Nov 2025 16:06:38 +0800 Subject: [PATCH] urma: Reuse RC RTP in both initiator and target urma inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID3WJX --------------------------------------------- The inplementation includes: 1. Try to reuse one tpid pair in both initiator and target, this implementation is only for RC and RTP connection. Signed-off-by: chenyutao --- .../ub/urma/ubcore/ubcore_connect_adapter.c | 316 +++++++++++++++++- .../ub/urma/ubcore/ubcore_connect_adapter.h | 35 ++ drivers/ub/urma/ubcore/ubcore_device.c | 9 + 3 files changed, 355 insertions(+), 5 deletions(-) diff --git a/drivers/ub/urma/ubcore/ubcore_connect_adapter.c b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c index b4a0f0b96c85..83797d289d99 100644 --- a/drivers/ub/urma/ubcore/ubcore_connect_adapter.c +++ b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c @@ -10,6 +10,7 @@ */ #include +#include #include #include "ubcore_log.h" #include "net/ubcore_net.h" @@ -35,6 +36,8 @@ struct msg_create_conn_req { struct ubcore_get_tp_cfg get_tp_cfg; uint64_t tp_handle; uint32_t tx_psn; + uint32_t local_jetty_id; + uint32_t peer_jetty_id; }; struct msg_create_conn_resp { @@ -327,6 +330,198 @@ int ubcore_exchange_tp_info(struct ubcore_device *dev, } EXPORT_SYMBOL(ubcore_exchange_tp_info); +/* Only for RC + RTP */ +int ubcore_exchange_tpid_info(struct ubcore_device *dev, + struct ubcore_get_tp_cfg *cfg, struct ubcore_ex_tpid_info *info, + struct ubcore_udata *udata) +{ + struct session_data_create_conn *session_data; + struct msg_create_conn_req req = { 0 }; + struct ubcore_session *session; + int ret; + + if (!dev || !cfg || !info) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ubcore_is_loopback(dev, &cfg->peer_eid)) { + info->peer_tp_handle = info->tp_handle; + info->rx_psn = info->tx_psn; + ubcore_log_info("Finish to handle loop back tp: %llu.\n", + info->tp_handle); + return 0; + } + + session = create_session_for_create_connection(dev); + if (!session) { + ubcore_free_local_tpid(dev, info->tp_handle, info->tx_psn, udata); + return -ENOMEM; + } + + req.get_tp_cfg = *cfg; + req.tp_handle = info->tp_handle; + req.tx_psn = info->tx_psn; + req.local_jetty_id = info->local_jetty_id; + req.peer_jetty_id = info->peer_jetty_id; + ret = send_create_req(dev, ubcore_session_get_id(session), &req); + if (ret != 0) { + ubcore_log_err("Failed to send create req message"); + ubcore_session_complete(session); + ubcore_session_ref_release(session); + ubcore_free_local_tpid(dev, info->tp_handle, info->tx_psn, udata); + return ret; + } + + ubcore_session_wait(session); + session_data = + (struct session_data_create_conn *)ubcore_session_get_data( + session); + ret = session_data->ret; + if (ret != 0) { + ubcore_log_err("Failed to send create req message, ret: %d.\n", + ret); + ubcore_session_ref_release(session); + ubcore_free_local_tpid(dev, info->tp_handle, info->tx_psn, udata); + return ret; + } + info->peer_tp_handle = session_data->peer_tp_handle; + info->rx_psn = session_data->rx_psn; + ubcore_session_ref_release(session); + + ret = ubcore_add_ex_tp_info(dev, info->tp_handle); + ubcore_log_info("exchange tp_handle is %llu\n", + (unsigned long long)info->tp_handle); + /* ubcore_add_ex_tp_info result will not have effect on excange_tp_info result */ + if (ret != 0) + ubcore_log_err("Failed to add ex tp info, ret: %d.\n", ret); + return 0; +} + +static inline uint32_t ubcore_get_tpid_hash(struct ubcore_tpid_key *key) +{ + return jhash(key, sizeof(struct ubcore_tpid_key), 0); +} + +static struct ubcore_tpid_ctx *ubcore_fget_tpid_ctx( + struct ubcore_device *dev, struct ubcore_tpid_key *key) +{ + struct ubcore_tpid_ctx *ctx = NULL; + uint32_t hash; + + hash = ubcore_get_tpid_hash(key); + ctx = ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_RC_TP_ID], + hash, key); + ubcore_log_info("Find tpid_ctx result: %d, hash: %u.\n", + (int)(ctx != NULL), hash); + + return ctx; +} + +void ubcore_tpid_get(void *obj) +{ + struct ubcore_tpid_ctx *ctx = obj; + + kref_get(&ctx->ref); +} + +static void ubcore_tpid_ctx_free(struct kref *ref) +{ + struct ubcore_tpid_ctx *ctx = container_of(ref, + struct ubcore_tpid_ctx, ref); + + kfree(ctx); +} + +static void ubcore_tpid_put(struct ubcore_tpid_ctx *ctx) +{ + kref_put(&ctx->ref, ubcore_tpid_ctx_free); +} + +static void ubcore_reuse_target_rtp_tpid(struct ubcore_device *dev, + struct ubcore_tpid_ctx *ctx, struct ubcore_get_tp_cfg *cfg, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_create_conn_resp resp = { 0 }; + int ret; + + if (cfg->flag.bs.rtp != 1 || cfg->trans_mode != UBCORE_TP_RC) { + ubcore_log_err("Invalid operation.\n"); + ubcore_tpid_put(ctx); + ret = -EINVAL; + goto send_resp; + } + + if (!ctx->is_init) { + ubcore_log_err("Duplicate operation.\n"); + ubcore_tpid_put(ctx); + ret = -EINVAL; + goto send_resp; + } + + /* Reuse */ + ubcore_log_info("Reuse tpid: %llu.\n", ctx->tp_handle); + resp.tp_handle = ctx->tp_handle; + resp.tx_psn = ctx->tx_psn; + ret = CREATE_CONN_SUCCESS; + +send_resp: + resp.result = ret; + if (send_create_resp(dev, conn, msg->session_id, &resp) != 0) + ubcore_log_err("Failed to send create resp message.\n"); +} + +static inline void fill_tpid_ctx(struct ubcore_tpid_ctx *ctx, + struct ubcore_tpid_key *key, struct ubcore_active_tp_cfg *cfg, + bool is_init) +{ + ctx->tp_handle = cfg->tp_handle.value; + ctx->peer_tp_handle = cfg->peer_tp_handle.value; + kref_init(&ctx->ref); + ctx->trans_mode = UBCORE_TP_RC; + ctx->key = *key; + ctx->tp_type = UBCORE_RTP; + ctx->is_init = is_init; + ctx->tx_psn = cfg->tp_attr.tx_psn; + ctx->rx_psn = cfg->tp_attr.rx_psn; +} + +static void ubcore_fadd_target_tpid_ctx(struct ubcore_device *dev, + struct ubcore_tpid_key *key, struct ubcore_active_tp_cfg *cfg) +{ + struct ubcore_tpid_ctx *add_ctx = NULL; + struct ubcore_tpid_ctx *ctx = NULL; + struct ubcore_hash_table *ht; + uint32_t hash; + + hash = ubcore_get_tpid_hash(key); + ht = &dev->ht[UBCORE_HT_RC_TP_ID]; + + add_ctx = kzalloc(sizeof(struct ubcore_tpid_ctx), GFP_KERNEL); + if (IS_ERR_OR_NULL(add_ctx)) + return; + + spin_lock(&ht->lock); + ctx = ubcore_hash_table_lookup_nolock(ht, hash, key); + if (ctx && ctx->is_init) { + spin_unlock(&ht->lock); + ubcore_log_info("Find tpid in initiator, hash: %u.\n", hash); + (void)ubcore_deactive_tp(dev, cfg->tp_handle, NULL); + kfree(add_ctx); + return; + } + if (ctx && !ctx->is_init) { + spin_unlock(&ht->lock); + ubcore_log_info("Find tpid in target, hash: %u.\n", hash); + kfree(add_ctx); + return; + } + + fill_tpid_ctx(add_ctx, key, cfg, false); + ubcore_hash_table_add_nolock(ht, &add_ctx->hnode, hash); + spin_unlock(&ht->lock); +} + static void handle_create_req(struct ubcore_device *dev, struct ubcore_net_msg *msg, void *conn) { @@ -336,13 +531,27 @@ static void handle_create_req(struct ubcore_device *dev, struct ubcore_active_tp_cfg active_cfg = { 0 }; struct msg_create_conn_resp resp = { 0 }; struct ubcore_tp_info tp_info = { 0 }; + struct ubcore_tpid_key key = { 0 }; + struct ubcore_tpid_ctx *ctx = NULL; uint32_t tp_cnt = 1; uint64_t tp_handle; uint32_t tx_psn; int ret; + key.local_eid = req->get_tp_cfg.peer_eid; + key.peer_eid = req->get_tp_cfg.local_eid; + key.local_jetty_id = req->peer_jetty_id; + key.peer_jetty_id = req->local_jetty_id; + get_tp_cfg.local_eid = req->get_tp_cfg.peer_eid; get_tp_cfg.peer_eid = req->get_tp_cfg.local_eid; + + ctx = ubcore_fget_tpid_ctx(dev, &key); + if (ctx) { + ubcore_reuse_target_rtp_tpid(dev, ctx, &get_tp_cfg, msg, conn); + return; + } + ret = ubcore_get_tp_list(dev, &get_tp_cfg, &tp_cnt, &tp_info, NULL); if (ret != 0 || tp_cnt != 1) { ubcore_log_err("Failed to get tp list, local eid " EID_FMT @@ -373,6 +582,9 @@ static void handle_create_req(struct ubcore_device *dev, goto send_resp; } + if (get_tp_cfg.trans_mode == UBCORE_TP_RC) + ubcore_fadd_target_tpid_ctx(dev, &key, &active_cfg); + resp.tp_handle = tp_handle; resp.tx_psn = tx_psn; ret = CREATE_CONN_SUCCESS; @@ -634,6 +846,83 @@ struct ubcore_tjetty *ubcore_import_jetty_compat(struct ubcore_device *dev, return tjetty; } +static inline void ubcore_update_vtpn(struct ubcore_vtpn *vtpn, + struct ubcore_tpid_ctx *ctx) +{ + union ubcore_tp_handle tp_handle; + + tp_handle.value = ctx->tp_handle; + vtpn->vtpn = (uint32_t)tp_handle.bs.tpid; + vtpn->tp_handle = ctx->tp_handle; + vtpn->peer_tp_handle = ctx->peer_tp_handle; +} + +static void ubcore_fadd_init_tpid_ctx(struct ubcore_device *dev, + struct ubcore_tpid_key *key, struct ubcore_active_tp_cfg *cfg, + struct ubcore_vtpn *vtpn) +{ + struct ubcore_tpid_ctx *add_ctx = NULL; + struct ubcore_tpid_ctx *ctx = NULL; + struct ubcore_hash_table *ht; + uint32_t hash; + + hash = ubcore_get_tpid_hash(key); + ht = &dev->ht[UBCORE_HT_RC_TP_ID]; + + add_ctx = kzalloc(sizeof(struct ubcore_tpid_ctx), GFP_KERNEL); + if (IS_ERR_OR_NULL(add_ctx)) + return; + + spin_lock(&ht->lock); + ctx = ubcore_hash_table_lookup_nolock(ht, hash, key); + if (ctx && ctx->is_init) { + spin_unlock(&ht->lock); + ubcore_log_warn("Find tpid in initiator, hash: %u.\n", hash); + kfree(add_ctx); + return; + } + if (ctx && !ctx->is_init) { + ubcore_update_vtpn(vtpn, ctx); + spin_unlock(&ht->lock); + ubcore_log_info("Find tpid in target, hash: %u.\n", hash); + (void)ubcore_deactive_tp(dev, cfg->tp_handle, NULL); + kfree(add_ctx); + return; + } + + fill_tpid_ctx(add_ctx, key, cfg, true); + ubcore_hash_table_add_nolock(ht, &add_ctx->hnode, hash); + spin_unlock(&ht->lock); +} + +static int ubcore_reuse_init_rtp_tpid(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, struct ubcore_tpid_ctx *ctx, + struct ubcore_udata *udata) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + int ret; + + if (ctx->is_init) { + ubcore_log_err("Invalid operation, tp_handle: %llu.\n", + ctx->tp_handle); + ubcore_tpid_put(ctx); + return -EINVAL; + } + + /* Reuse */ + active_tp_cfg.tp_handle.value = ctx->tp_handle; + active_tp_cfg.peer_tp_handle.value = ctx->peer_tp_handle; + active_tp_cfg.tp_attr.tx_psn = ctx->tx_psn; + active_tp_cfg.tp_attr.rx_psn = ctx->rx_psn; + + ret = ubcore_bind_jetty_ex(jetty, tjetty, &active_tp_cfg, udata); + if (ret != 0) { + ubcore_log_err("Failed to bind jetty in target, ret: %d.\n", ret); + ubcore_tpid_put(ctx); + } + return ret; +} + int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, struct ubcore_udata *udata) @@ -641,10 +930,21 @@ int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; struct ubcore_get_tp_cfg get_tp_cfg = { 0 }; struct ubcore_device *dev = jetty->ub_dev; + struct ubcore_ex_tpid_info info = { 0 }; struct ubcore_tp_info tp_list = { 0 }; + struct ubcore_tpid_key key = { 0 }; + struct ubcore_tpid_ctx *ctx; uint32_t tp_cnt = 1; int ret; + key.local_eid = jetty->jetty_id.eid; + key.peer_eid = tjetty->cfg.id.eid; + key.local_jetty_id = jetty->jetty_id.id; + key.peer_jetty_id = tjetty->cfg.id.id; + ctx = ubcore_fget_tpid_ctx(dev, &key); + if (ctx) + return ubcore_reuse_init_rtp_tpid(jetty, tjetty, ctx, udata); + ret = ubcore_fill_get_tp_cfg(dev, &get_tp_cfg, &tjetty->cfg); if (ret != 0) return ret; @@ -660,11 +960,12 @@ int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, active_tp_cfg.tp_attr.tx_psn = get_random_u32(); if (tjetty->cfg.tp_type == UBCORE_RTP) { - ret = ubcore_exchange_tp_info(dev, &get_tp_cfg, - tp_list.tp_handle.value, - active_tp_cfg.tp_attr.tx_psn, - &active_tp_cfg.peer_tp_handle.value, - &active_tp_cfg.tp_attr.rx_psn, udata); + info.tp_handle = tp_list.tp_handle.value; + info.tx_psn = active_tp_cfg.tp_attr.tx_psn; + info.local_jetty_id = jetty->jetty_id.id; + info.peer_jetty_id = tjetty->cfg.id.id; + ret = ubcore_exchange_tpid_info(dev, &get_tp_cfg, + &info, udata); if (ret != 0) { ubcore_log_err("Failed to exchange tp info, ret: %d.\n", ret); return ret; @@ -673,12 +974,17 @@ int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, ", peer eid " EID_FMT ".\n", EID_ARGS(get_tp_cfg.local_eid), EID_ARGS(get_tp_cfg.peer_eid)); + active_tp_cfg.peer_tp_handle.value = info.peer_tp_handle; + active_tp_cfg.tp_attr.rx_psn = info.rx_psn; } ret = ubcore_bind_jetty_ex(jetty, tjetty, &active_tp_cfg, udata); if (ret != 0) ubcore_log_err("Failed to bind jetty ex, ret: %d.\n", ret); + if (ret == 0 && tjetty->cfg.tp_type == UBCORE_RTP) + ubcore_fadd_init_tpid_ctx(dev, &key, &active_tp_cfg, tjetty->vtpn); + return ret; } diff --git a/drivers/ub/urma/ubcore/ubcore_connect_adapter.h b/drivers/ub/urma/ubcore/ubcore_connect_adapter.h index 66df8cc1d272..f8ff94a3dff5 100644 --- a/drivers/ub/urma/ubcore/ubcore_connect_adapter.h +++ b/drivers/ub/urma/ubcore/ubcore_connect_adapter.h @@ -20,6 +20,39 @@ struct ubcore_ex_tp_info { struct kref ref_cnt; }; +struct ubcore_tpid_key { + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint64_t tag; + uint32_t local_jetty_id; + uint32_t peer_jetty_id; +}; + +/* Tpid will be added into UBCORE_HT_RC_TP_ID hash table + after active_tp */ +struct ubcore_tpid_ctx { + struct hlist_node hnode; + uint64_t tp_handle; + uint64_t peer_tp_handle; + struct kref ref; + enum ubcore_transport_mode trans_mode; + struct ubcore_tpid_key key; + enum ubcore_tp_type tp_type; + /* true: initiator tp_handle; false: target tp_handle */ + bool is_init; + uint32_t tx_psn; /* optional */ + uint32_t rx_psn; /* optional */ +}; + +struct ubcore_ex_tpid_info { + uint64_t tp_handle; + uint64_t peer_tp_handle; + uint32_t tx_psn; + uint32_t rx_psn; + uint32_t local_jetty_id; + uint32_t peer_jetty_id; +}; + struct ubcore_tjetty *ubcore_import_jfr_compat(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata); @@ -41,4 +74,6 @@ static inline bool ubcore_check_ctrlplane_compat(void *op_ptr) return (op_ptr == NULL); } +void ubcore_tpid_get(void *obj); + #endif diff --git a/drivers/ub/urma/ubcore/ubcore_device.c b/drivers/ub/urma/ubcore/ubcore_device.c index 213cce97f809..315e3b8fde88 100644 --- a/drivers/ub/urma/ubcore/ubcore_device.c +++ b/drivers/ub/urma/ubcore/ubcore_device.c @@ -594,6 +594,15 @@ static struct ubcore_ht_param g_ht_params[] = { offsetof(struct ubcore_ex_tp_info, tp_handle), sizeof(uint64_t), NULL, ubcore_free_driver_obj, NULL }, + [UBCORE_HT_RC_TP_ID] = { + UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_tpid_ctx, hnode), + offsetof(struct ubcore_tpid_ctx, key), + sizeof(struct ubcore_tpid_key), + NULL, + ubcore_free_driver_obj, + ubcore_tpid_get + }, }; static inline void ubcore_set_vtpn_hash_table_size(uint32_t vtpn_size) -- Gitee