diff --git a/drivers/ub/urma/ubcore/Makefile b/drivers/ub/urma/ubcore/Makefile index eba8e210fda39bbf2a71de8c3a38b89c03c22fa4..21242a76024c14e03a062f719f223536c7d14b30 100644 --- a/drivers/ub/urma/ubcore/Makefile +++ b/drivers/ub/urma/ubcore/Makefile @@ -5,8 +5,13 @@ ubcore-objs := ubcore_main.o \ ubcore_device.o \ + ubcore_jetty.o \ + ubcore_segment.o \ ubcore_umem.o \ + ubcore_hash_table.o \ ubcore_tp.o \ - ubcore_netlink.o + ubcore_tp_table.o \ + ubcore_netlink.o \ + ubcore_dp.o obj-$(CONFIG_UB) += ubcore.o diff --git a/drivers/ub/urma/ubcore/ubcore_cmd.h b/drivers/ub/urma/ubcore/ubcore_cmd.h index 51dbbb8aae7687a835c09b64beb7948579493e71..7e8e49a09d73f1051a0e66bf5218240b3441587d 100644 --- a/drivers/ub/urma/ubcore/ubcore_cmd.h +++ b/drivers/ub/urma/ubcore/ubcore_cmd.h @@ -81,6 +81,39 @@ struct ubcore_cmd_query_stats { } out; }; +struct ubcore_cmd_query_res { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint8_t eid[UBCORE_CMD_EID_SIZE]; + uint32_t tp_type; + uint32_t type; + uint32_t key; + } in; + struct { + uint64_t addr; + uint32_t len; + } out; +}; + +struct ubcore_cmd_set_utp { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint8_t eid[UBCORE_CMD_EID_SIZE]; + uint32_t transport_type; + bool spray_en; + uint16_t data_udp_start; + uint8_t udp_range; + } in; +}; + +struct ubcore_cmd_show_utp { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint8_t eid[UBCORE_CMD_EID_SIZE]; + uint32_t transport_type; + } in; +}; + /* copy from user_space addr to kernel args */ static inline int ubcore_copy_from_user(void *args, const void *args_addr, unsigned long args_size) { diff --git a/drivers/ub/urma/ubcore/ubcore_device.c b/drivers/ub/urma/ubcore/ubcore_device.c index 56f8ac5364b7e02092a5761787cb74fd068ef5c3..d2978665ea8473ebe5c7fea51736342ae05ccb8b 100644 --- a/drivers/ub/urma/ubcore/ubcore_device.c +++ b/drivers/ub/urma/ubcore/ubcore_device.c @@ -32,6 +32,9 @@ #include #include #include "ubcore_priv.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" static LIST_HEAD(g_client_list); static LIST_HEAD(g_device_list); @@ -309,6 +312,52 @@ void ubcore_put_device(struct ubcore_device *dev) complete(&dev->comp); } +static struct ubcore_ht_param g_ht_params[] = { + [UBCORE_HT_JFS] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfs, hnode), + offsetof(struct ubcore_jfs, id), sizeof(uint32_t), NULL, NULL }, + + [UBCORE_HT_JFR] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfr, hnode), + offsetof(struct ubcore_jfr, id), sizeof(uint32_t), NULL, NULL }, + + [UBCORE_HT_JFC] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfc, hnode), + offsetof(struct ubcore_jfc, id), sizeof(uint32_t), NULL, NULL }, + + [UBCORE_HT_JETTY] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jetty, hnode), + offsetof(struct ubcore_jetty, id), sizeof(uint32_t), NULL, NULL }, + + [UBCORE_HT_TP] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_tp_node, hnode), + offsetof(struct ubcore_tp_node, key), sizeof(struct ubcore_tp_key), NULL, + NULL }, +}; + +static int ubcore_alloc_hash_tables(struct ubcore_device *dev) +{ + uint32_t i, j; + int ret; + + for (i = 0; i < ARRAY_SIZE(g_ht_params); i++) { + ret = ubcore_hash_table_alloc(&dev->ht[i], &g_ht_params[i]); + if (ret != 0) { + ubcore_log_err("alloc hash tables failed.\n"); + goto free_tables; + } + } + return 0; + +free_tables: + for (j = 0; j < i; j++) + ubcore_hash_table_free(&dev->ht[j]); + return -1; +} + +static void ubcore_free_hash_tables(struct ubcore_device *dev) +{ + uint32_t i; + + for (i = 0; i < ARRAY_SIZE(g_ht_params); i++) + ubcore_hash_table_free(&dev->ht[i]); +} + static void ubcore_device_release(struct device *device) { } @@ -336,12 +385,17 @@ static int init_ubcore_device(struct ubcore_device *dev) init_completion(&dev->comp); atomic_set(&dev->use_cnt, 1); + if (ubcore_alloc_hash_tables(dev) != 0) { + ubcore_log_err("alloc hash tables failed.\n"); + return -1; + } ubcore_set_default_eid(dev); return 0; } static void uninit_ubcore_device(struct ubcore_device *dev) { + ubcore_free_hash_tables(dev); put_device(&dev->dev); } @@ -425,8 +479,58 @@ void ubcore_unregister_device(struct ubcore_device *dev) } EXPORT_SYMBOL(ubcore_unregister_device); +void ubcore_register_event_handler(struct ubcore_device *dev, struct ubcore_event_handler *handler) +{ + unsigned long flags; + + if (dev == NULL || handler == NULL) { + ubcore_log_err("Invalid argument.\n"); + return; + } + + spin_lock_irqsave(&dev->event_handler_lock, flags); + list_add_tail(&handler->node, &dev->event_handler_list); + spin_unlock_irqrestore(&dev->event_handler_lock, flags); +} +EXPORT_SYMBOL(ubcore_register_event_handler); + +void ubcore_unregister_event_handler(struct ubcore_device *dev, + struct ubcore_event_handler *handler) +{ + unsigned long flags; + + if (dev == NULL || handler == NULL) { + ubcore_log_err("Invalid argument.\n"); + return; + } + + spin_lock_irqsave(&dev->event_handler_lock, flags); + list_del(&handler->node); + spin_unlock_irqrestore(&dev->event_handler_lock, flags); +} +EXPORT_SYMBOL(ubcore_unregister_event_handler); + void ubcore_dispatch_async_event(struct ubcore_event *event) { + struct ubcore_event_handler *handler; + struct ubcore_device *dev; + unsigned long flags; + + if (event == NULL || event->ub_dev == NULL) { + ubcore_log_err("Invalid argument.\n"); + return; + } + + if (event->event_type == UBCORE_EVENT_TP_ERR && event->element.tp != NULL) { + ubcore_restore_tp(event->ub_dev, event->element.tp); + return; + } + + dev = event->ub_dev; + spin_lock_irqsave(&dev->event_handler_lock, flags); + list_for_each_entry(handler, &dev->event_handler_list, node) + handler->event_callback(event, handler); + spin_unlock_irqrestore(&dev->event_handler_lock, flags); } EXPORT_SYMBOL(ubcore_dispatch_async_event); @@ -486,6 +590,96 @@ int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid) } EXPORT_SYMBOL(ubcore_set_eid); +int ubcore_set_upi(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx, uint32_t upi) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->set_upi == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->set_upi(dev, vf_id, idx, upi); + if (ret != 0) { + ubcore_log_err("failed to set vf%hu upi%hu, ret: %d.\n", vf_id, idx, ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_set_upi); + +int ubcore_add_eid(struct ubcore_device *dev, union ubcore_eid *eid) +{ + int ret; + + if (dev == NULL || eid == NULL || dev->ops == NULL || dev->ops->add_eid == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->add_eid(dev, eid); + if (ret != 0) { + ubcore_log_err("failed to add eid, ret: %d.\n", ret); + return -EPERM; + } + return ret; +} +EXPORT_SYMBOL(ubcore_add_eid); + +int ubcore_delete_eid(struct ubcore_device *dev, uint16_t idx) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->delete_eid_by_idx == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->delete_eid_by_idx(dev, idx); + if (ret != 0) { + ubcore_log_err("failed to delete eid, ret: %d.\n", ret); + return -EPERM; + } + return ret; +} +EXPORT_SYMBOL(ubcore_delete_eid); + +int ubcore_add_ueid(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_ueid_cfg *cfg) +{ + int ret; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || dev->ops->add_ueid == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->add_ueid(dev, vf_id, cfg); + if (ret != 0) { + ubcore_log_err("failed to add ueid, ret: %d.\n", ret); + return -EPERM; + } + return ret; +} +EXPORT_SYMBOL(ubcore_add_ueid); + +int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t vf_id, uint16_t idx) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->delete_ueid_by_idx == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->delete_ueid_by_idx(dev, vf_id, idx); + if (ret != 0) { + ubcore_log_err("failed to delete eid, ret: %d.\n", ret); + return -EPERM; + } + return ret; +} +EXPORT_SYMBOL(ubcore_delete_ueid); + int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_attr *attr) { int ret; @@ -504,6 +698,44 @@ int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_att } EXPORT_SYMBOL(ubcore_query_device_attr); +int ubcore_query_device_status(const struct ubcore_device *dev, struct ubcore_device_status *status) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->query_device_status == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->query_device_status(dev, status); + if (ret != 0) { + ubcore_log_err("failed to query device status, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_device_status); + +int ubcore_query_resource(const struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + int ret; + + if (dev == NULL || key == NULL || val == NULL || dev->ops == NULL || + dev->ops->query_res == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->query_res(dev, key, val); + if (ret != 0) { + ubcore_log_err("failed to query res, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_resource); + int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg) { int ret; @@ -522,6 +754,32 @@ int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_c } EXPORT_SYMBOL(ubcore_config_device); +int ubcore_user_control(struct ubcore_user_ctl *k_user_ctl) +{ + struct ubcore_device *dev; + int ret; + + if (k_user_ctl == NULL || k_user_ctl->uctx == NULL) { + ubcore_log_err("invalid parameter with input nullptr.\n"); + return -1; + } + + dev = k_user_ctl->uctx->ub_dev; + if (dev == NULL || dev->ops == NULL || dev->ops->user_ctl == NULL) { + ubcore_log_err("invalid parameter with dev nullptr.\n"); + return -1; + } + + ret = dev->ops->user_ctl(k_user_ctl); + if (ret != 0) { + ubcore_log_err("failed to exec kdrv_user_ctl in %s.\n", __func__); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ubcore_user_control); + int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, struct ubcore_stats_val *val) { diff --git a/drivers/ub/urma/ubcore/ubcore_dp.c b/drivers/ub/urma/ubcore/ubcore_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..c2bf3b1e173f9811945c8e856b7fba56e0cd28e8 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_dp.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: kmod ub data path API + * Author: sunfang + * Create: 2023-05-09 + * Note: + * History: 2023-05-09 + */ +#include +#include "ubcore_log.h" +#include +#include +#include + +int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, const struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->post_jetty_send_wr == NULL || wr == NULL || bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jetty->ub_dev->ops; + return dev_ops->post_jetty_send_wr(jetty, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jetty_send_wr); + +int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, const struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->post_jetty_recv_wr == NULL || wr == NULL || bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jetty->ub_dev->ops; + return dev_ops->post_jetty_recv_wr(jetty, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jetty_recv_wr); + +int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops == NULL || + jfs->ub_dev->ops->post_jfs_wr == NULL || wr == NULL || bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfs->ub_dev->ops; + return dev_ops->post_jfs_wr(jfs, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jfs_wr); + +int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops == NULL || + jfr->ub_dev->ops->post_jfr_wr == NULL || wr == NULL || bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfr->ub_dev->ops; + return dev_ops->post_jfr_wr(jfr, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jfr_wr); + +int ubcore_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) +{ + struct ubcore_ops *dev_ops; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->poll_jfc == NULL || cr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfc->ub_dev->ops; + return dev_ops->poll_jfc(jfc, cr_cnt, cr); +} +EXPORT_SYMBOL(ubcore_poll_jfc); + +int ubcore_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only) +{ + struct ubcore_ops *dev_ops; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->rearm_jfc == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfc->ub_dev->ops; + return dev_ops->rearm_jfc(jfc, solicited_only); +} +EXPORT_SYMBOL(ubcore_rearm_jfc); diff --git a/drivers/ub/urma/ubcore/ubcore_hash_table.c b/drivers/ub/urma/ubcore/ubcore_hash_table.c new file mode 100644 index 0000000000000000000000000000000000000000..c3d66301c22c4b913be892c8676a237d869dc727 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_hash_table.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: implement hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" + +int ubcore_hash_table_alloc(struct ubcore_hash_table *ht, const struct ubcore_ht_param *p) +{ + uint32_t i; + + if (p == NULL || p->size == 0) + return -1; + ht->p = *p; + ht->head = kcalloc(p->size, sizeof(struct hlist_head), GFP_KERNEL); + if (ht->head == NULL) { + ubcore_log_err("hash table allocation failed.\n"); + return -1; + } + for (i = 0; i < p->size; i++) + INIT_HLIST_HEAD(&ht->head[i]); + + spin_lock_init(&ht->lock); + kref_init(&ht->kref); + return 0; +} + +void ubcore_hash_table_free_with_cb(struct ubcore_hash_table *ht, void (*free_cb)(void *)) +{ + struct hlist_node *pos = NULL, *next = NULL; + struct hlist_head *head; + uint32_t i; + void *obj; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + for (i = 0; i < ht->p.size; i++) { + hlist_for_each_safe(pos, next, &ht->head[i]) { + obj = ubcore_ht_obj(ht, pos); + hlist_del(pos); + if (free_cb != NULL) + free_cb(obj); + else if (ht->p.free_f != NULL) + ht->p.free_f(obj); + else + kfree(obj); + } + } + head = ht->head; + ht->head = NULL; + spin_unlock(&ht->lock); + if (head != NULL) + kfree(head); +} + +void ubcore_hash_table_free(struct ubcore_hash_table *ht) +{ + ubcore_hash_table_free_with_cb(ht, NULL); +} + +void ubcore_hash_table_add_nolock(struct ubcore_hash_table *ht, struct hlist_node *hnode, + uint32_t hash) +{ + INIT_HLIST_NODE(hnode); + hlist_add_head(hnode, &ht->head[hash % ht->p.size]); +} + +void ubcore_hash_table_add(struct ubcore_hash_table *ht, struct hlist_node *hnode, uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + ubcore_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); +} + +void ubcore_hash_table_remove(struct ubcore_hash_table *ht, struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + hlist_del(hnode); + spin_unlock(&ht->lock); +} + +void *ubcore_hash_table_lookup_nolock(struct ubcore_hash_table *ht, uint32_t hash, const void *key) +{ + struct hlist_node *pos = NULL; + void *obj = NULL; + + hlist_for_each(pos, &ht->head[hash % ht->p.size]) { + obj = ubcore_ht_obj(ht, pos); + if (ht->p.cmp_f != NULL && ht->p.cmp_f(obj, key) == 0) { + break; + } else if (ht->p.key_size > 0 && + memcmp(ubcore_ht_key(ht, pos), key, ht->p.key_size) == 0) { + break; + } + obj = NULL; + } + return obj; +} + +void *ubcore_hash_table_lookup(struct ubcore_hash_table *ht, uint32_t hash, const void *key) +{ + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + obj = ubcore_hash_table_lookup_nolock(ht, hash, key); + spin_unlock(&ht->lock); + return obj; +} + +/* Do not insert a new entry if an old entry with the same key exists */ +int ubcore_hash_table_find_add(struct ubcore_hash_table *ht, struct hlist_node *hnode, + uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return -1; + } + /* Old entry with the same key exists */ + if (ubcore_hash_table_lookup_nolock(ht, hash, ubcore_ht_key(ht, hnode)) != NULL) { + spin_unlock(&ht->lock); + return -1; + } + ubcore_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); + return 0; +} + +void *ubcore_hash_table_find_remove(struct ubcore_hash_table *ht, uint32_t hash, const void *key) +{ + struct hlist_node *pos = NULL, *next = NULL; + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + hlist_for_each_safe(pos, next, &ht->head[hash % ht->p.size]) { + obj = ubcore_ht_obj(ht, pos); + if (ht->p.cmp_f != NULL && ht->p.cmp_f(obj, key) == 0) { + hlist_del(pos); + break; + } else if (ht->p.key_size > 0 && + memcmp(ubcore_ht_key(ht, pos), key, ht->p.key_size) == 0) { + hlist_del(pos); + break; + } + obj = NULL; + } + spin_unlock(&ht->lock); + return obj; +} diff --git a/drivers/ub/urma/ubcore/ubcore_hash_table.h b/drivers/ub/urma/ubcore/ubcore_hash_table.h new file mode 100644 index 0000000000000000000000000000000000000000..cdf136e74fa6528278056c25886f8f2bf1d10c56 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_hash_table.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: define hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#ifndef UBCORE_HASH_TABLE_H +#define UBCORE_HASH_TABLE_H + +#include + +static inline void *ubcore_ht_obj(const struct ubcore_hash_table *ht, + const struct hlist_node *hnode) +{ + return (char *)hnode - ht->p.node_offset; +} + +static inline void *ubcore_ht_key(const struct ubcore_hash_table *ht, + const struct hlist_node *hnode) +{ + return ((char *)hnode - ht->p.node_offset) + ht->p.key_offset; +} +/* Init ht head, not calloc hash table itself */ +int ubcore_hash_table_alloc(struct ubcore_hash_table *ht, const struct ubcore_ht_param *p); +/* Free ht head, not release hash table itself */ +void ubcore_hash_table_free(struct ubcore_hash_table *ht); +void ubcore_hash_table_free_with_cb(struct ubcore_hash_table *ht, void (*free_cb)(void *)); +void ubcore_hash_table_add(struct ubcore_hash_table *ht, struct hlist_node *hnode, uint32_t hash); +void ubcore_hash_table_add_nolock(struct ubcore_hash_table *ht, struct hlist_node *hnode, + uint32_t hash); +void ubcore_hash_table_remove(struct ubcore_hash_table *ht, struct hlist_node *hnode); +void *ubcore_hash_table_lookup(struct ubcore_hash_table *ht, uint32_t hash, const void *key); +void *ubcore_hash_table_lookup_nolock(struct ubcore_hash_table *ht, uint32_t hash, const void *key); +void *ubcore_hash_table_find_remove(struct ubcore_hash_table *ht, uint32_t hash, const void *key); +/* Do not insert a new entry if an old entry with the same key exists */ +int ubcore_hash_table_find_add(struct ubcore_hash_table *ht, struct hlist_node *hnode, + uint32_t hash); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_jetty.c b/drivers/ub/urma/ubcore/ubcore_jetty.c new file mode 100644 index 0000000000000000000000000000000000000000..e662189c59f058921c43ca726a93d78e9d7518b4 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_jetty.c @@ -0,0 +1,908 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore jetty kernel module + * Author: Ouyang Changchun + * Create: 2021-11-25 + * Note: + * History: 2021-11-25: create file + * History: 2022-07-28: Yan Fangfang move jetty implementation here + */ + +#include +#include +#include +#include +#include +#include "ubcore_log.h" +#include +#include +#include "ubcore_priv.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" + +struct ubcore_jfc *ubcore_find_jfc(struct ubcore_device *dev, uint32_t jfc_id) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JFC], jfc_id, &jfc_id); +} +EXPORT_SYMBOL(ubcore_find_jfc); + +struct ubcore_jfs *ubcore_find_jfs(struct ubcore_device *dev, uint32_t jfs_id) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JFS], jfs_id, &jfs_id); +} +EXPORT_SYMBOL(ubcore_find_jfs); + +struct ubcore_jfr *ubcore_find_jfr(struct ubcore_device *dev, uint32_t jfr_id) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JFR], jfr_id, &jfr_id); +} +EXPORT_SYMBOL(ubcore_find_jfr); + +static uint32_t ubcore_get_eq_id(const struct ubcore_device *dev) +{ + uint32_t eq_id = 0; + int cpu; + + if (dev->num_comp_vectors > 0) { + cpu = get_cpu(); + eq_id = (uint32_t)(cpu % dev->num_comp_vectors); + put_cpu(); + } + return eq_id; +} + +static int check_and_fill_jfc_attr(struct ubcore_jfc_cfg *cfg, const struct ubcore_jfc_cfg *user) +{ + if (cfg->depth < user->depth) + return -1; + + /* store the immutable and skip the driver updated depth */ + cfg->flag = user->flag; + cfg->jfc_context = user->jfc_context; + return 0; +} + +struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, const struct ubcore_jfc_cfg *cfg, + ubcore_comp_callback_t jfce_handler, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jfc *jfc; + uint32_t eq_id; + + if (dev == NULL || cfg == NULL || dev->ops->create_jfc == NULL || + dev->ops->destroy_jfc == NULL) + return NULL; + + eq_id = ubcore_get_eq_id(dev); + + ((struct ubcore_jfc_cfg *)cfg)->eq_id = eq_id; + jfc = dev->ops->create_jfc(dev, cfg, udata); + if (jfc == NULL) { + ubcore_log_err("failed to create jfc.\n"); + return NULL; + } + + if (check_and_fill_jfc_attr(&jfc->jfc_cfg, cfg) != 0) { + (void)dev->ops->destroy_jfc(jfc); + ubcore_log_err("jfc cfg is not qualified.\n"); + return NULL; + } + jfc->jfc_cfg.eq_id = eq_id; + jfc->jfce_handler = jfce_handler; + jfc->jfae_handler = jfae_handler; + jfc->ub_dev = dev; + jfc->uctx = ubcore_get_uctx(udata); + atomic_set(&jfc->use_cnt, 0); + + if (ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFC], &jfc->hnode, jfc->id) != 0) { + (void)dev->ops->destroy_jfc(jfc); + ubcore_log_err("Failed to add jfc.\n"); + return NULL; + } + return jfc; +} +EXPORT_SYMBOL(ubcore_create_jfc); + +int ubcore_modify_jfc(struct ubcore_jfc *jfc, const struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + uint32_t jfc_id; + int ret; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops->modify_jfc == NULL) + return -EINVAL; + + jfc_id = jfc->id; + dev = jfc->ub_dev; + + ret = dev->ops->modify_jfc(jfc, attr, udata); + if (ret < 0) + ubcore_log_err("UBEP failed to modify jfc, jfc_id:%u.\n", jfc_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jfc); + +int ubcore_delete_jfc(struct ubcore_jfc *jfc) +{ + struct ubcore_device *dev; + uint32_t jfc_id; + int ret; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops->destroy_jfc == NULL) + return -1; + + if (WARN_ON_ONCE(atomic_read(&jfc->use_cnt))) + return -EBUSY; + + jfc_id = jfc->id; + dev = jfc->ub_dev; + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFC], &jfc->hnode); + ret = dev->ops->destroy_jfc(jfc); + if (ret < 0) + ubcore_log_err("UBEP failed to destroy jfc, jfc_id:%u.\n", jfc_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfc); + +static int check_and_fill_jfs_attr(struct ubcore_jfs_cfg *cfg, const struct ubcore_jfs_cfg *user) +{ + if (cfg->depth < user->depth || cfg->max_sge < user->max_sge || + cfg->max_rsge < user->max_rsge || cfg->max_inline_data < user->max_inline_data) + return -1; + + /* store the immutable and skip the driver updated attributes including depth, + * max_sge and max_inline_data + */ + cfg->flag = user->flag; + cfg->priority = user->priority; + cfg->retry_cnt = user->retry_cnt; + cfg->rnr_retry = user->rnr_retry; + cfg->err_timeout = user->err_timeout; + cfg->trans_mode = user->trans_mode; + cfg->jfs_context = user->jfs_context; + cfg->jfc = user->jfc; + return 0; +} + +struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, const struct ubcore_jfs_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jfs *jfs; + + if (dev == NULL || cfg == NULL || dev->ops->create_jfs == NULL || + dev->ops->destroy_jfs == NULL) + return NULL; + + if (((uint16_t)cfg->trans_mode & dev->attr.dev_cap.trans_mode) == 0) { + ubcore_log_err("jfs cfg is not supported.\n"); + return NULL; + } + + jfs = dev->ops->create_jfs(dev, cfg, udata); + if (jfs == NULL) { + ubcore_log_err("failed to create jfs.\n"); + return NULL; + } + + /* Prevent ubcore private data from being modified */ + if (check_and_fill_jfs_attr(&jfs->jfs_cfg, cfg) != 0) { + (void)dev->ops->destroy_jfs(jfs); + ubcore_log_err("jfs cfg is not qualified.\n"); + return NULL; + } + jfs->ub_dev = dev; + jfs->uctx = ubcore_get_uctx(udata); + jfs->jfae_handler = jfae_handler; + if (ubcore_jfs_need_advise(jfs)) { + jfs->tptable = ubcore_create_tptable(); + if (jfs->tptable == NULL) { + (void)dev->ops->destroy_jfs(jfs); + ubcore_log_err("Failed to create tp table in the jfs.\n"); + return NULL; + } + } + atomic_set(&jfs->use_cnt, 0); + + if (ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFS], &jfs->hnode, jfs->id) != 0) { + ubcore_destroy_tptable(&jfs->tptable); + (void)dev->ops->destroy_jfs(jfs); + ubcore_log_err("Failed to add jfs.\n"); + return NULL; + } + + atomic_inc(&cfg->jfc->use_cnt); + return jfs; +} +EXPORT_SYMBOL(ubcore_create_jfs); + +int ubcore_modify_jfs(struct ubcore_jfs *jfs, const struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + uint32_t jfs_id; + int ret; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops->modify_jfs == NULL) + return -EINVAL; + + jfs_id = jfs->id; + dev = jfs->ub_dev; + ret = dev->ops->modify_jfs(jfs, attr, udata); + if (ret < 0) + ubcore_log_err("UBEP failed to modify jfs, jfs_id:%u.\n", jfs_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jfs); + +int ubcore_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr) +{ + struct ubcore_device *dev; + uint32_t jfs_id; + int ret; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops->query_jfs == NULL) + return -EINVAL; + + jfs_id = jfs->id; + dev = jfs->ub_dev; + ret = dev->ops->query_jfs(jfs, cfg, attr); + if (ret < 0) + ubcore_log_err("UBEP failed to query jfs, jfs_id:%u.\n", jfs_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_query_jfs); + +int ubcore_delete_jfs(struct ubcore_jfs *jfs) +{ + struct ubcore_device *dev; + struct ubcore_jfc *jfc; + uint32_t jfs_id; + int ret; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops->destroy_jfs == NULL) + return -EINVAL; + + jfc = jfs->jfs_cfg.jfc; + jfs_id = jfs->id; + dev = jfs->ub_dev; + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFS], &jfs->hnode); + ubcore_destroy_tptable(&jfs->tptable); + ret = dev->ops->destroy_jfs(jfs); + if (ret < 0) + ubcore_log_err("UBEP failed to destroy jfs, jfs_id:%u.\n", jfs_id); + else + atomic_dec(&jfc->use_cnt); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfs); + +int ubcore_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr) +{ + struct ubcore_ops *dev_ops; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops == NULL || + jfs->ub_dev->ops->flush_jfs == NULL || cr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfs->ub_dev->ops; + return dev_ops->flush_jfs(jfs, cr_cnt, cr); +} +EXPORT_SYMBOL(ubcore_flush_jfs); + +static int check_and_fill_jfr_attr(struct ubcore_jfr_cfg *cfg, const struct ubcore_jfr_cfg *user) +{ + if (cfg->depth < user->depth || cfg->max_sge < user->max_sge) + return -1; + + /* store the immutable and skip the driver updated attributes including depth, max_sge */ + cfg->flag = user->flag; + cfg->min_rnr_timer = user->min_rnr_timer; + cfg->trans_mode = user->trans_mode; + cfg->ukey = user->ukey; + cfg->jfr_context = user->jfr_context; + cfg->jfc = user->jfc; + return 0; +} + +struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, const struct ubcore_jfr_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jfr *jfr; + + if (dev == NULL || cfg == NULL || dev->ops->create_jfr == NULL || + dev->ops->destroy_jfr == NULL) + return NULL; + + jfr = dev->ops->create_jfr(dev, cfg, udata); + if (jfr == NULL) { + ubcore_log_err("failed to create jfr.\n"); + return NULL; + } + + if (check_and_fill_jfr_attr(&jfr->jfr_cfg, cfg) != 0) { + ubcore_log_err("jfr cfg is not qualified.\n"); + (void)dev->ops->destroy_jfr(jfr); + return NULL; + } + jfr->ub_dev = dev; + jfr->uctx = ubcore_get_uctx(udata); + jfr->jfae_handler = jfae_handler; + if (ubcore_jfr_need_advise(jfr)) { + jfr->tptable = ubcore_create_tptable(); + if (jfr->tptable == NULL) { + (void)dev->ops->destroy_jfr(jfr); + ubcore_log_err("Failed to create tp table in the jfr.\n"); + return NULL; + } + } + atomic_set(&jfr->use_cnt, 0); + + if (ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFR], &jfr->hnode, jfr->id) != 0) { + ubcore_destroy_tptable(&jfr->tptable); + (void)dev->ops->destroy_jfr(jfr); + ubcore_log_err("Failed to add jfr.\n"); + return NULL; + } + + atomic_inc(&cfg->jfc->use_cnt); + return jfr; +} +EXPORT_SYMBOL(ubcore_create_jfr); + +int ubcore_modify_jfr(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + uint32_t jfr_id; + int ret; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops->modify_jfr == NULL) + return -EINVAL; + + jfr_id = jfr->id; + dev = jfr->ub_dev; + ret = dev->ops->modify_jfr(jfr, attr, udata); + if (ret < 0) + ubcore_log_err("UBEP failed to modify jfr, jfr_id:%u.\n", jfr_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jfr); + +int ubcore_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr) +{ + struct ubcore_device *dev; + uint32_t jfr_id; + int ret; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops->query_jfr == NULL) + return -EINVAL; + + jfr_id = jfr->id; + dev = jfr->ub_dev; + ret = dev->ops->query_jfr(jfr, cfg, attr); + if (ret < 0) + ubcore_log_err("UBEP failed to query jfr, jfr_id:%u.\n", jfr_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_query_jfr); + +int ubcore_delete_jfr(struct ubcore_jfr *jfr) +{ + struct ubcore_device *dev; + struct ubcore_jfc *jfc; + uint32_t jfr_id; + int ret; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops->destroy_jfr == NULL) + return -EINVAL; + + if (WARN_ON_ONCE(atomic_read(&jfr->use_cnt))) + return -EBUSY; + + jfc = jfr->jfr_cfg.jfc; + jfr_id = jfr->id; + dev = jfr->ub_dev; + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFR], &jfr->hnode); + ubcore_destroy_tptable(&jfr->tptable); + ret = dev->ops->destroy_jfr(jfr); + if (ret < 0) + ubcore_log_err("UBEP failed to destroy jfr, jfr_id:%u.\n", jfr_id); + else + atomic_dec(&jfc->use_cnt); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfr); + +struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev, + const struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_tjetty *tjfr; + + if (dev == NULL || cfg == NULL || dev->ops->import_jfr == NULL || + dev->ops->unimport_jfr == NULL || !ubcore_have_tp_ops(dev)) + return NULL; + + tjfr = dev->ops->import_jfr(dev, cfg, udata); + if (tjfr == NULL) { + ubcore_log_err("UBEP failed to import jfr, jfr_id:%u.\n", cfg->id.id); + return NULL; + } + tjfr->cfg = *cfg; + tjfr->ub_dev = dev; + tjfr->uctx = ubcore_get_uctx(udata); + tjfr->type = UBCORE_JFR; + atomic_set(&tjfr->use_cnt, 0); + + tjfr->tp = NULL; + + return tjfr; +} +EXPORT_SYMBOL(ubcore_import_jfr); + +int ubcore_unimport_jfr(struct ubcore_tjetty *tjfr) +{ + struct ubcore_device *dev; + + if (tjfr == NULL || tjfr->ub_dev == NULL || tjfr->ub_dev->ops->unimport_jfr == NULL || + !ubcore_have_tp_ops(tjfr->ub_dev)) + return -1; + + dev = tjfr->ub_dev; + + return dev->ops->unimport_jfr(tjfr); +} +EXPORT_SYMBOL(ubcore_unimport_jfr); + +static int check_and_fill_jetty_attr(struct ubcore_jetty_cfg *cfg, + const struct ubcore_jetty_cfg *user) +{ + if (cfg->jfs_depth < user->jfs_depth || cfg->max_send_sge < user->max_send_sge || + cfg->max_send_rsge < user->max_send_rsge || + cfg->max_inline_data < user->max_inline_data) { + ubcore_log_err("send attributes are not qualified.\n"); + return -1; + } + if (cfg->jfr_depth < user->jfr_depth || cfg->max_recv_sge < user->max_recv_sge) { + ubcore_log_err("recv attributes are not qualified.\n"); + return -1; + } + /* store the immutable and skip the driver updated send and recv attributes */ + cfg->flag = user->flag; + cfg->send_jfc = user->send_jfc; + cfg->recv_jfc = user->recv_jfc; + cfg->jfr = user->jfr; + cfg->priority = user->priority; + cfg->retry_cnt = user->retry_cnt; + cfg->rnr_retry = user->rnr_retry; + cfg->err_timeout = user->err_timeout; + cfg->min_rnr_timer = user->min_rnr_timer; + cfg->trans_mode = user->trans_mode; + cfg->jetty_context = user->jetty_context; + cfg->ukey = user->ukey; + return 0; +} + +struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, + const struct ubcore_jetty_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jetty *jetty; + + if (dev == NULL || cfg == NULL || dev->ops->create_jetty == NULL || + dev->ops->destroy_jetty == NULL) + return NULL; + + jetty = dev->ops->create_jetty(dev, cfg, udata); + if (jetty == NULL) { + ubcore_log_err("failed to create jetty.\n"); + return NULL; + } + if (check_and_fill_jetty_attr(&jetty->jetty_cfg, cfg) != 0) { + ubcore_log_err("jetty cfg is not qualified.\n"); + (void)dev->ops->destroy_jetty(jetty); + return NULL; + } + jetty->ub_dev = dev; + jetty->uctx = ubcore_get_uctx(udata); + jetty->jfae_handler = jfae_handler; + if (ubcore_jetty_need_advise(jetty) || jetty->jetty_cfg.trans_mode == UBCORE_TP_RC) { + jetty->tptable = ubcore_create_tptable(); + if (jetty->tptable == NULL) { + ubcore_log_err("Failed to create tp table in the jetty.\n"); + (void)dev->ops->destroy_jetty(jetty); + return NULL; + } + } else { + jetty->tptable = NULL; /* To prevent kernel-mode drivers, malloc is not empty */ + } + atomic_set(&jetty->use_cnt, 0); + + if (ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode, jetty->id) != 0) { + ubcore_destroy_tptable(&jetty->tptable); + (void)dev->ops->destroy_jetty(jetty); + ubcore_log_err("Failed to add jetty.\n"); + } + + atomic_inc(&cfg->send_jfc->use_cnt); + atomic_inc(&cfg->recv_jfc->use_cnt); + if (cfg->jfr) + atomic_inc(&cfg->jfr->use_cnt); + return jetty; +} +EXPORT_SYMBOL(ubcore_create_jetty); + +int ubcore_modify_jetty(struct ubcore_jetty *jetty, const struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + uint32_t jetty_id; + int ret; + + if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops->modify_jetty == NULL || + attr == NULL) + return -EINVAL; + + jetty_id = jetty->id; + dev = jetty->ub_dev; + + ret = dev->ops->modify_jetty(jetty, attr, udata); + if (ret < 0) + ubcore_log_err("UBEP failed to modify jetty, jetty_id:%u.\n", jetty_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jetty); + +int ubcore_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr) +{ + struct ubcore_device *dev; + uint32_t jetty_id; + int ret; + + if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops->query_jetty == NULL) + return -EINVAL; + + jetty_id = jetty->id; + dev = jetty->ub_dev; + ret = dev->ops->query_jetty(jetty, cfg, attr); + if (ret < 0) + ubcore_log_err("UBEP failed to query jetty, jetty_id:%u.\n", jetty_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_query_jetty); + +int ubcore_delete_jetty(struct ubcore_jetty *jetty) +{ + struct ubcore_jfc *send_jfc; + struct ubcore_jfc *recv_jfc; + struct ubcore_device *dev; + struct ubcore_jfr *jfr; + uint32_t jetty_id; + int ret; + + if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops->destroy_jetty == NULL) + return -1; + + send_jfc = jetty->jetty_cfg.send_jfc; + recv_jfc = jetty->jetty_cfg.recv_jfc; + jfr = jetty->jetty_cfg.jfr; + jetty_id = jetty->id; + dev = jetty->ub_dev; + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JETTY], &jetty->hnode); + ubcore_destroy_tptable(&jetty->tptable); + ret = dev->ops->destroy_jetty(jetty); + if (ret < 0) { + ubcore_log_err("UBEP failed to destroy jetty, jetty_id:%u.\n", jetty_id); + } else { + if (send_jfc) + atomic_dec(&send_jfc->use_cnt); + if (recv_jfc) + atomic_dec(&recv_jfc->use_cnt); + if (jfr) + atomic_dec(&jfr->use_cnt); + } + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jetty); + +int ubcore_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr) +{ + struct ubcore_ops *dev_ops; + + if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->flush_jetty == NULL || cr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jetty->ub_dev->ops; + return dev_ops->flush_jetty(jetty, cr_cnt, cr); +} +EXPORT_SYMBOL(ubcore_flush_jetty); + +struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev, + const struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_tjetty *tjetty; + + if (dev == NULL || cfg == NULL || dev->ops->import_jetty == NULL || + dev->ops->unimport_jetty == NULL || !ubcore_have_tp_ops(dev)) + return NULL; + + tjetty = dev->ops->import_jetty(dev, cfg, udata); + if (tjetty == NULL) { + ubcore_log_err("UBEP failed to import jetty, jetty_id:%u.\n", cfg->id.id); + return NULL; + } + tjetty->cfg = *cfg; + tjetty->ub_dev = dev; + tjetty->uctx = ubcore_get_uctx(udata); + tjetty->type = UBCORE_JETTY; + atomic_set(&tjetty->use_cnt, 0); + + mutex_init(&tjetty->lock); + tjetty->tp = NULL; + + return tjetty; +} +EXPORT_SYMBOL(ubcore_import_jetty); + +int ubcore_unimport_jetty(struct ubcore_tjetty *tjetty) +{ + struct ubcore_device *dev; + + if (tjetty == NULL || tjetty->ub_dev == NULL || + tjetty->ub_dev->ops->unimport_jetty == NULL || !ubcore_have_tp_ops(tjetty->ub_dev)) + return -1; + + dev = tjetty->ub_dev; + + return dev->ops->unimport_jetty(tjetty); +} +EXPORT_SYMBOL(ubcore_unimport_jetty); + +static int ubcore_advice_jfs_tjfr(struct ubcore_tp_advice *advice, struct ubcore_jfs *jfs, + struct ubcore_tjetty *tjfr) +{ + (void)memset(advice, 0, sizeof(struct ubcore_tp_advice)); + advice->meta.ht = ubcore_get_tptable(jfs->tptable); + if (advice->meta.ht == NULL) { + ubcore_log_err("tp table has already been destroyed"); + return -1; + } + + advice->ta.type = UBCORE_TA_JFS_TJFR; + advice->ta.jfs = jfs; + advice->ta.tjetty_id = tjfr->cfg.id; + + ubcore_init_tp_key_jetty_id(&advice->meta.key, &tjfr->cfg.id); + advice->meta.hash = ubcore_get_jetty_hash(&tjfr->cfg.id); + return 0; +} + +static int ubcore_advice_jetty_tjetty(struct ubcore_tp_advice *advice, struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty) +{ + (void)memset(advice, 0, sizeof(struct ubcore_tp_advice)); + advice->meta.ht = ubcore_get_tptable(jetty->tptable); + if (advice->meta.ht == NULL) { + ubcore_log_err("tp table has already been destroyed"); + return -1; + } + + advice->ta.type = UBCORE_TA_JETTY_TJETTY; + advice->ta.jetty = jetty; + advice->ta.tjetty_id = tjetty->cfg.id; + + ubcore_init_tp_key_jetty_id(&advice->meta.key, &tjetty->cfg.id); + advice->meta.hash = ubcore_get_jetty_hash(&tjetty->cfg.id); + return 0; +} + +static inline void ubcore_put_advice(const struct ubcore_tp_advice *advice) +{ + ubcore_put_tptable(advice->meta.ht); +} + +int ubcore_advise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr, + struct ubcore_udata *udata) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jfs == NULL || tjfr == NULL || !ubcore_have_tp_ops(jfs->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if (!ubcore_jfs_tjfr_need_advise(jfs, tjfr)) { + ubcore_log_err("The transport mode is not rm.\n"); + return -1; + } + + ret = ubcore_advice_jfs_tjfr(&advice, jfs, tjfr); + if (ret != 0) + return ret; + + ret = ubcore_advise_tp(jfs->ub_dev, &tjfr->cfg.id.eid, &advice, udata); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_advise_jfr); + +int ubcore_unadvise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jfs == NULL || tjfr == NULL || !ubcore_have_tp_ops(jfs->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if (!ubcore_jfs_tjfr_need_advise(jfs, tjfr)) { + ubcore_log_err("The transport mode is not rm.\n"); + return -1; + } + + ret = ubcore_advice_jfs_tjfr(&advice, jfs, tjfr); + if (ret != 0) + return ret; + + ret = ubcore_unadvise_tp(jfs->ub_dev, &advice); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_unadvise_jfr); + +int ubcore_advise_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jetty == NULL || tjetty == NULL || !ubcore_have_tp_ops(jetty->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if (!ubcore_jetty_tjetty_need_advise(jetty, tjetty)) { + ubcore_log_err("The transport mode is not rm.\n"); + return -1; + } + + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + /* alpha version, IB transport type and RM tp mode */ + ret = ubcore_advise_tp(jetty->ub_dev, &tjetty->cfg.id.eid, &advice, udata); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_advise_jetty); + +int ubcore_unadvise_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jetty == NULL || tjetty == NULL || !ubcore_have_tp_ops(jetty->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if (!ubcore_jetty_tjetty_need_advise(jetty, tjetty)) { + ubcore_log_err("The transport mode is not rm.\n"); + return -1; + } + + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + ret = ubcore_unadvise_tp(jetty->ub_dev, &advice); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_unadvise_jetty); + +int ubcore_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jetty == NULL || tjetty == NULL || !ubcore_have_tp_ops(jetty->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || + (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + ubcore_log_err("trans mode is not rc type.\n"); + return -1; + } + if (jetty->remote_jetty != NULL) { + ubcore_log_err("The same jetty, different tjetty, prevent duplicate bind.\n"); + return -1; + } + + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + ret = ubcore_bind_tp(jetty, tjetty, &advice, udata); + + ubcore_put_advice(&advice); + if (ret != 0) { + ubcore_log_err("Failed to setup tp connection.\n"); + return ret; + } + jetty->remote_jetty = tjetty; + return 0; +} +EXPORT_SYMBOL(ubcore_bind_jetty); + +int ubcore_unbind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jetty == NULL || tjetty == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || + (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + ubcore_log_err("trans mode is not rc type.\n"); + return -1; + } + + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + ret = ubcore_unbind_tp(jetty, tjetty, &advice); + ubcore_put_advice(&advice); + if (ret != 0) + ubcore_log_err("Failed to destroy jetty tp.\n"); + + jetty->remote_jetty = NULL; + return ret; +} +EXPORT_SYMBOL(ubcore_unbind_jetty); + +struct ubcore_jetty *ubcore_find_jetty(struct ubcore_device *dev, uint32_t jetty_id) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JETTY], jetty_id, &jetty_id); +} +EXPORT_SYMBOL(ubcore_find_jetty); diff --git a/drivers/ub/urma/ubcore/ubcore_main.c b/drivers/ub/urma/ubcore/ubcore_main.c index db1f82d62275b4deccb05123d3a79696fae6604f..733aff02e61c70342fc2461358001b5370a87c0e 100644 --- a/drivers/ub/urma/ubcore/ubcore_main.c +++ b/drivers/ub/urma/ubcore/ubcore_main.c @@ -32,6 +32,7 @@ #include "ubcore_log.h" #include "ubcore_netlink.h" #include +#include #include #include "ubcore_priv.h" @@ -153,6 +154,82 @@ static int ubcore_cmd_put_uasid(struct ubcore_cmd_hdr *hdr) return 0; } +static void ubcore_set_utp_cfg(struct ubcore_cmd_set_utp *arg, struct ubcore_utp_attr *attr, + union ubcore_utp_attr_mask *mask) +{ + attr->flag.bs.spray_en = arg->in.spray_en; + attr->data_udp_start = arg->in.data_udp_start; + attr->udp_range = arg->in.udp_range; + mask->bs.flag = 1; + mask->bs.udp_port = 1; + mask->bs.udp_range = 1; +} + +static int ubcore_cmd_set_utp(struct ubcore_cmd_hdr *hdr) +{ + enum ubcore_transport_type trans_type; + union ubcore_utp_attr_mask mask = { 0 }; + struct ubcore_cmd_set_utp arg; + struct ubcore_utp_attr attr; + struct ubcore_device *dev; + union ubcore_eid eid; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_set_utp)); + if (ret != 0) + return -EPERM; + + (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); + trans_type = arg.in.transport_type; + dev = ubcore_find_device(&eid, trans_type); + if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { + ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", + dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); + return -EINVAL; + } + + ubcore_set_utp_cfg(&arg, &attr, &mask); + if (ubcore_config_utp(dev, &eid, &attr, mask) != 0) { + ubcore_log_err("config utp failed.\n"); + ubcore_put_device(dev); + return -EPERM; + } + ubcore_put_device(dev); + return 0; +} + +static int ubcore_cmd_show_utp(struct ubcore_cmd_hdr *hdr) +{ + enum ubcore_transport_type trans_type; + struct ubcore_cmd_show_utp arg; + struct ubcore_device *dev; + union ubcore_eid eid; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_show_utp)); + if (ret != 0) + return -EPERM; + + (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); + trans_type = arg.in.transport_type; + + dev = ubcore_find_device(&eid, trans_type); + if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { + ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", + dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); + return -EINVAL; + } + if (ubcore_show_utp(dev, &eid) != 0) { + ubcore_log_err("show utp failed.\n"); + ubcore_put_device(dev); + return -EPERM; + } + ubcore_put_device(dev); + return 0; +} + static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) { enum ubcore_transport_type trans_type; @@ -180,7 +257,7 @@ static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) key.type = (uint8_t)arg.in.type; key.key = arg.in.key; - val.addr = (uint64_t)&com_val; + val.addr = (uintptr_t)&com_val; val.len = sizeof(struct ubcore_stats_com_val); ret = ubcore_query_stats(dev, &key, &val); @@ -195,6 +272,305 @@ static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) sizeof(struct ubcore_cmd_query_stats)); } +static uint32_t ubcore_get_query_res_len(uint32_t type) +{ + switch (type) { + case UBCORE_RES_KEY_UPI: + return (uint32_t)sizeof(struct ubcore_res_upi_val); + case UBCORE_RES_KEY_TP: + return (uint32_t)sizeof(struct ubcore_res_tp_val); + case UBCORE_RES_KEY_TPG: + return (uint32_t)sizeof(struct ubcore_res_tpg_val); + case UBCORE_RES_KEY_UTP: + return (uint32_t)sizeof(struct ubcore_res_utp_val); + case UBCORE_RES_KEY_JFS: + return (uint32_t)sizeof(struct ubcore_res_jfs_val); + case UBCORE_RES_KEY_JFR: + return (uint32_t)sizeof(struct ubcore_res_jfr_val); + case UBCORE_RES_KEY_JETTY: + return (uint32_t)sizeof(struct ubcore_res_jetty_val); + case UBCORE_RES_KEY_JETTY_GROUP: + return (uint32_t)sizeof(struct ubcore_res_jetty_group_val); + case UBCORE_RES_KEY_JFC: + return (uint32_t)sizeof(struct ubcore_res_jfc_val); + case UBCORE_RES_KEY_SEG: + return (uint32_t)sizeof(struct ubcore_res_seg_val); + case UBCORE_RES_KEY_URMA_DEV: + return (uint32_t)sizeof(struct ubcore_res_dev_val); + default: + break; + } + return 0; +} + +static void ubcore_dealloc_res_dev(struct ubcore_res_dev_val *ubcore_addr) +{ + if (ubcore_addr->seg_list != NULL) { + vfree(ubcore_addr->seg_list); + ubcore_addr->seg_list = NULL; + } + if (ubcore_addr->jfs_list != NULL) { + vfree(ubcore_addr->jfs_list); + ubcore_addr->jfs_list = NULL; + } + if (ubcore_addr->jfr_list != NULL) { + vfree(ubcore_addr->jfr_list); + ubcore_addr->jfr_list = NULL; + } + if (ubcore_addr->jfc_list != NULL) { + vfree(ubcore_addr->jfc_list); + ubcore_addr->jfc_list = NULL; + } + if (ubcore_addr->jetty_list != NULL) { + vfree(ubcore_addr->jetty_list); + ubcore_addr->jetty_list = NULL; + } + if (ubcore_addr->jetty_group_list != NULL) { + vfree(ubcore_addr->jetty_group_list); + ubcore_addr->jetty_group_list = NULL; + } + if (ubcore_addr->tp_list != NULL) { + vfree(ubcore_addr->tp_list); + ubcore_addr->tp_list = NULL; + } + if (ubcore_addr->tpg_list != NULL) { + vfree(ubcore_addr->tpg_list); + ubcore_addr->tpg_list = NULL; + } + if (ubcore_addr->utp_list != NULL) { + vfree(ubcore_addr->utp_list); + ubcore_addr->utp_list = NULL; + } +} + +static int ubcore_fill_res_addr(struct ubcore_res_dev_val *ubcore_addr) +{ + ubcore_addr->seg_list = vmalloc(sizeof(struct ubcore_seg_info) * ubcore_addr->seg_cnt); + if (ubcore_addr->seg_list == NULL) + return -ENOMEM; + + ubcore_addr->jfs_list = vmalloc(sizeof(uint32_t) * ubcore_addr->jfs_cnt); + if (ubcore_addr->jfs_list == NULL) + goto free_seg_list; + + ubcore_addr->jfr_list = vmalloc(sizeof(uint32_t) * ubcore_addr->jfr_cnt); + if (ubcore_addr->jfr_list == NULL) + goto free_jfs_list; + + ubcore_addr->jfc_list = vmalloc(sizeof(uint32_t) * ubcore_addr->jfc_cnt); + if (ubcore_addr->jfc_list == NULL) + goto free_jfr_list; + + ubcore_addr->jetty_list = vmalloc(sizeof(uint32_t) * ubcore_addr->jetty_cnt); + if (ubcore_addr->jetty_list == NULL) + goto free_jfc_list; + + ubcore_addr->jetty_group_list = vmalloc(sizeof(uint32_t) * ubcore_addr->jetty_group_cnt); + if (ubcore_addr->jetty_group_list == NULL) + goto free_jetty_list; + + ubcore_addr->tp_list = vmalloc(sizeof(uint32_t) * ubcore_addr->tp_cnt); + if (ubcore_addr->tp_list == NULL) + goto free_jetty_group_list; + + ubcore_addr->tpg_list = vmalloc(sizeof(uint32_t) * ubcore_addr->tpg_cnt); + if (ubcore_addr->tpg_list == NULL) + goto free_tp_list; + + ubcore_addr->utp_list = vmalloc(sizeof(uint32_t) * ubcore_addr->utp_cnt); + if (ubcore_addr->utp_list == NULL) + goto free_tpg_list; + + return 0; +free_tpg_list: + vfree(ubcore_addr->tpg_list); +free_tp_list: + vfree(ubcore_addr->tp_list); +free_jetty_group_list: + vfree(ubcore_addr->jetty_group_list); +free_jetty_list: + vfree(ubcore_addr->jetty_list); +free_jfc_list: + vfree(ubcore_addr->jfc_list); +free_jfr_list: + vfree(ubcore_addr->jfr_list); +free_jfs_list: + vfree(ubcore_addr->jfs_list); +free_seg_list: + vfree(ubcore_addr->seg_list); + return -ENOMEM; +} + +static int ubcore_fill_user_res_dev(struct ubcore_res_dev_val *dev_val, + struct ubcore_res_dev_val *ubcore_addr) +{ + int ret; + + dev_val->seg_cnt = ubcore_addr->seg_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->seg_list, + ubcore_addr->seg_list, + dev_val->seg_cnt * sizeof(struct ubcore_seg_info)); + if (ret != 0) + return ret; + + dev_val->jfs_cnt = ubcore_addr->jfs_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jfs_list, + ubcore_addr->jfs_list, dev_val->jfs_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->jfr_cnt = ubcore_addr->jfr_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jfr_list, + ubcore_addr->jfr_list, dev_val->jfr_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->jfc_cnt = ubcore_addr->jfc_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jfc_list, + ubcore_addr->jfc_list, dev_val->jfc_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->jetty_cnt = ubcore_addr->jetty_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jetty_list, + ubcore_addr->jetty_list, dev_val->jetty_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->jetty_group_cnt = ubcore_addr->jetty_group_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->jetty_group_list, + ubcore_addr->jetty_group_list, + dev_val->jetty_group_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->tp_cnt = ubcore_addr->tp_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->tp_list, + ubcore_addr->tp_list, dev_val->tp_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->tpg_cnt = ubcore_addr->tpg_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->tpg_list, + ubcore_addr->tpg_list, dev_val->tpg_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + dev_val->utp_cnt = ubcore_addr->utp_cnt; + ret = ubcore_copy_to_user((void __user *)(uintptr_t)dev_val->utp_list, + ubcore_addr->utp_list, dev_val->utp_cnt * sizeof(uint32_t)); + if (ret != 0) + return ret; + + return 0; +} + +static int ubcore_query_res_dev(const struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_dev_val *dev_val) +{ + struct ubcore_res_dev_val ubcore_addr = { 0 }; + struct ubcore_res_val val = { 0 }; + int ret = 0; + + (void)memcpy(&ubcore_addr, dev_val, + sizeof(struct ubcore_res_dev_val)); // save + + if (ubcore_fill_res_addr(&ubcore_addr) != 0) { + ubcore_log_err("Failed to fill dev dev_val.\n"); + return -ENOMEM; + } + + val.addr = (uintptr_t)&ubcore_addr; + val.len = sizeof(struct ubcore_res_dev_val); + + ret = ubcore_query_resource(dev, key, &val); + if (ret != 0) + goto ubcore_free_dev; + + ret = ubcore_fill_user_res_dev(dev_val, &ubcore_addr); +ubcore_free_dev: + ubcore_dealloc_res_dev(&ubcore_addr); + return ret; +} + +static int ubcore_query_res_arg(const struct ubcore_device *dev, struct ubcore_cmd_query_res *arg, + uint32_t res_len) +{ + struct ubcore_res_key key = { 0 }; + struct ubcore_res_val val = { 0 }; + void *addr; + int ret; + + addr = kzalloc(res_len, GFP_KERNEL); + if (addr == NULL) + return -1; + + ret = ubcore_copy_from_user(addr, (void __user *)(uintptr_t)arg->out.addr, res_len); + if (ret != 0) + goto kfree_addr; + + key.type = (uint8_t)arg->in.type; + key.key = arg->in.key; + val.addr = (uintptr_t)addr; + val.len = res_len; + + if (arg->in.type == UBCORE_RES_KEY_URMA_DEV) + ret = ubcore_query_res_dev(dev, &key, (struct ubcore_res_dev_val *)addr); + else + ret = ubcore_query_resource(dev, &key, &val); + + if (ret != 0) + goto kfree_addr; + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)arg->out.addr, addr, res_len); + +kfree_addr: + kfree(addr); + return ret; +} + +static int ubcore_cmd_query_res(struct ubcore_cmd_hdr *hdr) +{ + enum ubcore_transport_type trans_type; + struct ubcore_cmd_query_res arg = { 0 }; + struct ubcore_device *dev; + union ubcore_eid eid; + uint32_t res_len; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_query_res)); + if (ret != 0) + return ret; + + res_len = ubcore_get_query_res_len((uint32_t)arg.in.type); + if (res_len != arg.out.len) { + ubcore_log_err("Failed to check res len, type: %u, res_len: %u, len: %u.\n", + (uint32_t)arg.in.type, res_len, arg.out.len); + return -1; + } + (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); + trans_type = (enum ubcore_transport_type)arg.in.tp_type; + + dev = ubcore_find_device(&eid, trans_type); + if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { + ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", + dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); + return -EINVAL; + } + + ret = ubcore_query_res_arg(dev, &arg, res_len); + if (ret != 0) { + ubcore_put_device(dev); + ubcore_log_err("Failed to query res by arg, tp_type: %d.\n", (int)trans_type); + return -1; + } + + ubcore_put_device(dev); + return ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_query_res)); +} + static int ubcore_cmd_parse(struct ubcore_cmd_hdr *hdr) { switch (hdr->command) { @@ -202,8 +578,14 @@ static int ubcore_cmd_parse(struct ubcore_cmd_hdr *hdr) return ubcore_cmd_set_uasid(hdr); case UBCORE_CMD_PUT_UASID: return ubcore_cmd_put_uasid(hdr); + case UBCORE_CMD_SET_UTP: + return ubcore_cmd_set_utp(hdr); + case UBCORE_CMD_SHOW_UTP: + return ubcore_cmd_show_utp(hdr); case UBCORE_CMD_QUERY_STATS: return ubcore_cmd_query_stats(hdr); + case UBCORE_CMD_QUERY_RES: + return ubcore_cmd_query_res(hdr); default: ubcore_log_err("bad ubcore command: %d.\n", (int)hdr->command); return -EINVAL; diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.c b/drivers/ub/urma/ubcore/ubcore_netlink.c index 15cdff268966536e0f36b42f999adb77cb3cbdc8..985424f8bba6a8c9ca5dd8c685d155923d041905 100644 --- a/drivers/ub/urma/ubcore/ubcore_netlink.c +++ b/drivers/ub/urma/ubcore/ubcore_netlink.c @@ -23,6 +23,7 @@ #include #include #include "ubcore_log.h" +#include "ubcore_tp.h" #include "ubcore_netlink.h" #define UBCORE_NL_TYPE 24 /* same with agent netlink type */ @@ -30,8 +31,131 @@ #define UBCORE_NL_INVALID_PORT 0 struct sock *nl_sock; +static LIST_HEAD(g_nl_session_list); +static DEFINE_SPINLOCK(g_nl_session_lock); +atomic_t g_nlmsg_seq; static uint32_t g_agent_port = UBCORE_NL_INVALID_PORT; /* get agent pid */ +static int ubcore_nl_send(struct ubcore_nlmsg *pbuf, uint16_t len); + +static uint32_t ubcore_get_nlmsg_seq(void) +{ + return atomic_inc_return(&g_nlmsg_seq); +} + +static struct ubcore_nl_session *ubcore_create_nl_session(struct ubcore_nlmsg *req) +{ + struct ubcore_nl_session *s; + unsigned long flags; + + s = kzalloc(sizeof(struct ubcore_nl_session), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->req = req; + spin_lock_irqsave(&g_nl_session_lock, flags); + list_add_tail(&s->node, &g_nl_session_list); + spin_unlock_irqrestore(&g_nl_session_lock, flags); + kref_init(&s->kref); + init_completion(&s->comp); + return s; +} + +static void ubcore_free_nl_session(struct kref *kref) +{ + struct ubcore_nl_session *s = container_of(kref, struct ubcore_nl_session, kref); + unsigned long flags; + + spin_lock_irqsave(&g_nl_session_lock, flags); + list_del(&s->node); + spin_unlock_irqrestore(&g_nl_session_lock, flags); + kfree(s); +} + +static inline void ubcore_destroy_nl_session(struct ubcore_nl_session *s) +{ + kref_put(&s->kref, ubcore_free_nl_session); +} + +static struct ubcore_nl_session *ubcore_find_nl_session(uint32_t nlmsg_seq) +{ + struct ubcore_nl_session *tmp, *target = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_nl_session_lock, flags); + list_for_each_entry(tmp, &g_nl_session_list, node) { + if (tmp->req->nlmsg_seq == nlmsg_seq) { + target = tmp; + kref_get(&target->kref); + break; + } + } + spin_unlock_irqrestore(&g_nl_session_lock, flags); + return target; +} + +static struct ubcore_nlmsg *ubcore_get_nlmsg_data(struct nlmsghdr *nlh) +{ + struct ubcore_nlmsg *msg; + + msg = kzalloc(nlmsg_len(nlh), GFP_KERNEL); + if (msg == NULL) + return NULL; + + (void)memcpy(msg, nlmsg_data(nlh), nlmsg_len(nlh)); + return msg; +} + +static void ubcore_nl_handle_tp_resp(struct nlmsghdr *nlh) +{ + struct ubcore_nl_session *s; + struct ubcore_nlmsg *resp; + + resp = ubcore_get_nlmsg_data(nlh); + if (resp == NULL) { + ubcore_log_err("Failed to calloc and copy response"); + return; + } + s = ubcore_find_nl_session(resp->nlmsg_seq); + if (s == NULL) { + ubcore_log_err("Failed to find nl session with seq %u", resp->nlmsg_seq); + kfree(resp); + return; + } + s->resp = resp; + kref_put(&s->kref, ubcore_free_nl_session); + complete(&s->comp); +} + +static void ubcore_nl_handle_tp_req(struct nlmsghdr *nlh) +{ + struct ubcore_nlmsg *resp = NULL; + struct ubcore_nlmsg *req; + + req = ubcore_get_nlmsg_data(nlh); + if (req == NULL) { + ubcore_log_err("Failed to calloc and copy req"); + return; + } + if (nlh->nlmsg_type == UBCORE_NL_CREATE_TP_REQ) + resp = ubcore_handle_create_tp_req(req); + else if (nlh->nlmsg_type == UBCORE_NL_DESTROY_TP_REQ) + resp = ubcore_handle_destroy_tp_req(req); + else if (nlh->nlmsg_type == UBCORE_NL_RESTORE_TP_REQ) + resp = ubcore_handle_restore_tp_req(req); + + if (resp == NULL) { + ubcore_log_err("Failed to handle tp req"); + kfree(req); + return; + } + if (ubcore_nl_send(resp, ubcore_nlmsg_len(resp)) != 0) + ubcore_log_err("Failed to send response"); + + kfree(req); + kfree(resp); +} + static void ubcore_nl_cb_func(struct sk_buff *skb) { struct nlmsghdr *nlh; @@ -43,15 +167,89 @@ static void ubcore_nl_cb_func(struct sk_buff *skb) } switch (nlh->nlmsg_type) { + case UBCORE_NL_CREATE_TP_REQ: + case UBCORE_NL_DESTROY_TP_REQ: + case UBCORE_NL_RESTORE_TP_REQ: + ubcore_nl_handle_tp_req(nlh); + break; + case UBCORE_NL_CREATE_TP_RESP: + case UBCORE_NL_DESTROY_TP_RESP: + case UBCORE_NL_QUERY_TP_RESP: + case UBCORE_NL_RESTORE_TP_RESP: + ubcore_nl_handle_tp_resp(nlh); + break; case UBCORE_NL_SET_AGENT_PID: g_agent_port = nlh->nlmsg_pid; break; + case UBCORE_NL_QUERY_TP_REQ: default: ubcore_log_err("Unexpected nl msg type: %d received\n", nlh->nlmsg_type); break; } } +static int ubcore_nl_send(struct ubcore_nlmsg *pbuf, uint16_t len) +{ + struct sk_buff *nl_skb; + struct nlmsghdr *nlh; + int ret; + + if (pbuf == NULL || g_agent_port == UBCORE_NL_INVALID_PORT) { + ubcore_log_err("There are illegal parameters.\n"); + return -1; + } + + /* create sk_buff */ + nl_skb = nlmsg_new(len, GFP_ATOMIC); + if (nl_skb == NULL) { + ubcore_log_err("failed to alloc.\n"); + return -1; + } + /* set netlink head */ + nlh = nlmsg_put(nl_skb, 0, pbuf->nlmsg_seq, pbuf->msg_type, len, 0); + if (nlh == NULL) { + ubcore_log_err("Failed to nlmsg put.\n"); + nlmsg_free(nl_skb); + return -1; + } + /* copy msg */ + (void)memcpy(nlmsg_data(nlh), pbuf, len); + ret = netlink_unicast(nl_sock, nl_skb, g_agent_port, 0); + return ret < 0 ? ret : 0; +} + +struct ubcore_nlmsg *ubcore_nl_send_wait(struct ubcore_nlmsg *req) +{ + unsigned long leavetime; + struct ubcore_nl_session *s; + struct ubcore_nlmsg *resp; + int ret; + + req->nlmsg_seq = ubcore_get_nlmsg_seq(); + s = ubcore_create_nl_session(req); + if (s == NULL) { + ubcore_log_err("Failed to create nl session"); + return NULL; + } + + ret = ubcore_nl_send(req, ubcore_nlmsg_len(req)); + if (ret != 0) { + ubcore_log_err("Failed to send nl msg %d", ret); + ubcore_destroy_nl_session(s); + return NULL; + } + + leavetime = wait_for_completion_timeout(&s->comp, msecs_to_jiffies(UBCORE_NL_TIMEOUT)); + if (leavetime == 0) { + ubcore_log_err("Failed to wait reply, ret: %d, leavetime: %lu\n", ret, leavetime); + ubcore_destroy_nl_session(s); + return NULL; + } + resp = s->resp; + ubcore_destroy_nl_session(s); + return resp; +} + int ubcore_netlink_init(void) { /* create netlink socket */ diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.h b/drivers/ub/urma/ubcore/ubcore_netlink.h index b07ba64a67f022da3aacb692d33be0291816917b..1bdda997f96a3c2eb5bc9f266f8be8b8fa8db858 100644 --- a/drivers/ub/urma/ubcore/ubcore_netlink.h +++ b/drivers/ub/urma/ubcore/ubcore_netlink.h @@ -48,6 +48,84 @@ struct ubcore_nlmsg { uint8_t payload[0]; } __packed; +struct ubcore_ta_data { + enum ubcore_ta_type type; + struct ubcore_jetty_id jetty_id; /* local jetty id */ + struct ubcore_jetty_id tjetty_id; /* peer jetty id */ +}; + +struct ubcore_multipath_tp_cfg { + union ubcore_tp_flag flag; + uint16_t data_rctp_start; + uint16_t ack_rctp_start; + uint16_t data_rmtp_start; + uint16_t ack_rmtp_start; + uint8_t tp_range; + uint16_t congestion_alg; +}; + +struct ubcore_nl_create_tp_req { + uint32_t tpn; + struct ubcore_net_addr local_net_addr; + struct ubcore_net_addr peer_net_addr; + enum ubcore_transport_mode trans_mode; + struct ubcore_multipath_tp_cfg cfg; + uint32_t rx_psn; + enum ubcore_mtu mtu; + struct ubcore_ta_data ta; + uint32_t ext_len; + uint32_t udrv_in_len; + uint8_t ext_udrv[0]; /* struct ubcore_tp_ext->len + struct ubcore_udrv_priv->in_len */ +}; + +struct ubcore_nl_create_tp_resp { + enum ubcore_nl_resp_status ret; + union ubcore_tp_flag flag; + uint32_t peer_tpn; + uint32_t peer_rx_psn; + enum ubcore_mtu peer_mtu; + uint32_t peer_ext_len; + uint8_t peer_ext[0]; /* struct ubcore_tp_ext->len */ +}; + +struct ubcore_nl_destroy_tp_req { + uint32_t tpn; + uint32_t peer_tpn; + enum ubcore_transport_mode trans_mode; + struct ubcore_ta_data ta; +}; + +struct ubcore_nl_destroy_tp_resp { + enum ubcore_nl_resp_status ret; +}; + +struct ubcore_nl_query_tp_req { + enum ubcore_transport_mode trans_mode; +}; + +struct ubcore_nl_query_tp_resp { + enum ubcore_nl_resp_status ret; + bool tp_exist; + uint32_t tpn; /* must set if tp exist is true */ + union ubcore_eid dst_eid; /* underlay */ + struct ubcore_net_addr src_addr; /* underlay */ + struct ubcore_net_addr dst_addr; /* underlay */ + struct ubcore_multipath_tp_cfg cfg; +}; + +struct ubcore_nl_restore_tp_req { + enum ubcore_transport_mode trans_mode; + uint32_t tpn; + uint32_t peer_tpn; + uint32_t rx_psn; + struct ubcore_ta_data ta; +}; + +struct ubcore_nl_restore_tp_resp { + enum ubcore_nl_resp_status ret; + uint32_t peer_rx_psn; +}; + struct ubcore_nl_session { struct ubcore_nlmsg *req; struct ubcore_nlmsg *resp; @@ -64,4 +142,6 @@ static inline uint32_t ubcore_nlmsg_len(struct ubcore_nlmsg *msg) int ubcore_netlink_init(void); void ubcore_netlink_exit(void); +/* return response msg pointer, caller must release it */ +struct ubcore_nlmsg *ubcore_nl_send_wait(struct ubcore_nlmsg *req); #endif diff --git a/drivers/ub/urma/ubcore/ubcore_priv.h b/drivers/ub/urma/ubcore/ubcore_priv.h index 28c78d000cb0b9e21d3853c5fa1dee0dfa9669d5..73a3060e2d78dab85cdd5cdfaf4c64912d8c4153 100644 --- a/drivers/ub/urma/ubcore/ubcore_priv.h +++ b/drivers/ub/urma/ubcore/ubcore_priv.h @@ -24,6 +24,11 @@ #include #include +static inline struct ubcore_ucontext *ubcore_get_uctx(struct ubcore_udata *udata) +{ + return udata == NULL ? NULL : udata->uctx; +} + static inline bool ubcore_check_dev_name_invalid(struct ubcore_device *dev, char *dev_name) { return (strcmp(dev->dev_name, dev_name) != 0); @@ -39,4 +44,56 @@ struct ubcore_device **ubcore_get_devices_from_netdev(struct net_device *netdev, void ubcore_put_devices(struct ubcore_device **devices, uint32_t cnt); void ubcore_set_default_eid(struct ubcore_device *dev); +int ubcore_config_utp(struct ubcore_device *dev, const union ubcore_eid *eid, + const struct ubcore_utp_attr *attr, union ubcore_utp_attr_mask mask); +int ubcore_show_utp(struct ubcore_device *dev, const union ubcore_eid *eid); + +static inline uint32_t ubcore_get_jetty_hash(const struct ubcore_jetty_id *jetty_id) +{ + return jhash(jetty_id, sizeof(struct ubcore_jetty_id), 0); +} + +static inline uint32_t ubcore_get_tseg_hash(const struct ubcore_ubva *ubva) +{ + return jhash(ubva, sizeof(struct ubcore_ubva), 0); +} + +static inline uint32_t ubcore_get_eid_hash(const union ubcore_eid *eid) +{ + return jhash(eid, sizeof(union ubcore_eid), 0); +} + +static inline bool ubcore_jfs_need_advise(const struct ubcore_jfs *jfs) +{ + return jfs->ub_dev->transport_type == UBCORE_TRANSPORT_IB && + jfs->jfs_cfg.trans_mode == UBCORE_TP_RM; +} + +static inline bool ubcore_jfs_tjfr_need_advise(const struct ubcore_jfs *jfs, + const struct ubcore_tjetty *tjfr) +{ + return jfs->ub_dev->transport_type == UBCORE_TRANSPORT_IB && + jfs->jfs_cfg.trans_mode == UBCORE_TP_RM && tjfr->cfg.trans_mode == UBCORE_TP_RM; +} + +static inline bool ubcore_jetty_need_advise(const struct ubcore_jetty *jetty) +{ + return jetty->ub_dev->transport_type == UBCORE_TRANSPORT_IB && + jetty->jetty_cfg.trans_mode == UBCORE_TP_RM; +} + +static inline bool ubcore_jetty_tjetty_need_advise(const struct ubcore_jetty *jetty, + const struct ubcore_tjetty *tjetty) +{ + return jetty->ub_dev->transport_type == UBCORE_TRANSPORT_IB && + jetty->jetty_cfg.trans_mode == UBCORE_TP_RM && + tjetty->cfg.trans_mode == UBCORE_TP_RM; +} + +static inline bool ubcore_jfr_need_advise(const struct ubcore_jfr *jfr) +{ + return jfr->ub_dev->transport_type == UBCORE_TRANSPORT_IB && + jfr->jfr_cfg.trans_mode == UBCORE_TP_RM; +} + #endif diff --git a/drivers/ub/urma/ubcore/ubcore_segment.c b/drivers/ub/urma/ubcore/ubcore_segment.c new file mode 100644 index 0000000000000000000000000000000000000000..be37b2d893d4d5fd207ce5ddbfc2f3e54f194ce4 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_segment.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore segment + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: Yan Fangfang move segment implementation here + */ + +#include "ubcore_log.h" +#include +#include "ubcore_priv.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" + +struct ubcore_key_id *ubcore_alloc_key_id(struct ubcore_device *dev, struct ubcore_udata *udata) +{ + struct ubcore_key_id *key; + + if (dev == NULL || dev->ops->alloc_key_id == NULL || dev->ops->free_key_id == NULL) { + ubcore_log_err("invalid parameter.\n"); + return NULL; + } + + key = dev->ops->alloc_key_id(dev, udata); + if (key == NULL) { + ubcore_log_err("failed to alloc key id.\n"); + return NULL; + } + key->ub_dev = dev; + key->uctx = ubcore_get_uctx(udata); + atomic_set(&key->use_cnt, 0); + return key; +} +EXPORT_SYMBOL(ubcore_alloc_key_id); + +int ubcore_free_key_id(struct ubcore_key_id *key) +{ + struct ubcore_device *dev; + + if (key == NULL || key->ub_dev == NULL || key->ub_dev->ops->free_key_id == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + dev = key->ub_dev; + + if (WARN_ON_ONCE(atomic_read(&key->use_cnt))) + return -EBUSY; + + return dev->ops->free_key_id(key); +} +EXPORT_SYMBOL(ubcore_free_key_id); + +struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, + const struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_target_seg *tseg; + + if (dev == NULL || cfg == NULL || dev->ops->register_seg == NULL || + dev->ops->unregister_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return NULL; + } + + if ((cfg->flag.bs.access & (UBCORE_ACCESS_REMOTE_WRITE | UBCORE_ACCESS_REMOTE_ATOMIC)) && + !(cfg->flag.bs.access & UBCORE_ACCESS_LOCAL_WRITE)) { + ubcore_log_err( + "Local write must be set when either remote write or remote atomic is declared.\n"); + return NULL; + } + + tseg = dev->ops->register_seg(dev, cfg, udata); + if (tseg == NULL) { + ubcore_log_err("UBEP failed to register segment with va:%llu\n", cfg->va); + return NULL; + } + + tseg->ub_dev = dev; + tseg->uctx = ubcore_get_uctx(udata); + tseg->seg.len = cfg->len; + tseg->seg.ubva.va = cfg->va; + tseg->keyid = cfg->keyid; + + (void)memcpy(tseg->seg.ubva.eid.raw, dev->attr.eid.raw, UBCORE_EID_SIZE); + (void)memcpy(&tseg->seg.attr, &cfg->flag, sizeof(union ubcore_reg_seg_flag)); + atomic_set(&tseg->use_cnt, 0); + if (tseg->keyid != NULL) + atomic_inc(&tseg->keyid->use_cnt); + + return tseg; +} +EXPORT_SYMBOL(ubcore_register_seg); + +int ubcore_unregister_seg(struct ubcore_target_seg *tseg) +{ + struct ubcore_device *dev; + int ret; + + if (tseg == NULL || tseg->ub_dev == NULL || tseg->ub_dev->ops->unregister_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + dev = tseg->ub_dev; + + if (tseg->keyid != NULL) + atomic_dec(&tseg->keyid->use_cnt); + + ret = dev->ops->unregister_seg(tseg); + return ret; +} +EXPORT_SYMBOL(ubcore_unregister_seg); + +struct ubcore_target_seg *ubcore_import_seg(struct ubcore_device *dev, + const struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_target_seg *tseg; + + if (dev == NULL || cfg == NULL || dev->ops->import_seg == NULL || + dev->ops->unimport_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return NULL; + } + + tseg = dev->ops->import_seg(dev, cfg, udata); + if (tseg == NULL) { + ubcore_log_err("UBEP failed to import segment with va:%llu\n", cfg->seg.ubva.va); + return NULL; + } + tseg->ub_dev = dev; + tseg->uctx = ubcore_get_uctx(udata); + tseg->seg = cfg->seg; + atomic_set(&tseg->use_cnt, 0); + + return tseg; +} +EXPORT_SYMBOL(ubcore_import_seg); + +int ubcore_unimport_seg(struct ubcore_target_seg *tseg) +{ + struct ubcore_device *dev; + + if (tseg == NULL || tseg->ub_dev == NULL || tseg->ub_dev->ops->unimport_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + dev = tseg->ub_dev; + + return dev->ops->unimport_seg(tseg); +} +EXPORT_SYMBOL(ubcore_unimport_seg); diff --git a/drivers/ub/urma/ubcore/ubcore_tp.c b/drivers/ub/urma/ubcore/ubcore_tp.c index ca665a16a07f3f7efe31a13acd4f6c18e60412b0..34104b8f050eb541834244d3830494d402dea239 100644 --- a/drivers/ub/urma/ubcore/ubcore_tp.c +++ b/drivers/ub/urma/ubcore/ubcore_tp.c @@ -22,12 +22,44 @@ #include #include #include -#include +#include "ubcore_log.h" +#include "ubcore_netlink.h" +#include "ubcore_priv.h" #include +#include "ubcore_tp_table.h" +#include "ubcore_tp.h" #define UB_PROTOCOL_HEAD_BYTES 313 #define UB_MTU_BITS_BASE_SHIFT 7 +static inline uint32_t get_udrv_in_len(const struct ubcore_udata *udata) +{ + return ((udata == NULL || udata->udrv_data == NULL) ? 0 : udata->udrv_data->in_len); +} + +static inline int get_udrv_in_data(uint8_t *dst, uint32_t dst_len, struct ubcore_udata *udata) +{ + if (udata == NULL || udata->udrv_data == NULL || udata->udrv_data->in_len == 0) + return 0; + + if (udata->uctx != NULL) { + if (dst_len < udata->udrv_data->in_len) + return -1; + return (int)copy_from_user(dst, (void __user *)(uintptr_t)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + } else { + (void)memcpy(dst, (void *)udata->udrv_data->in_addr, udata->udrv_data->in_len); + return 0; + } +} + +static inline void ubcore_set_net_addr_with_eid(struct ubcore_net_addr *net_addr, + const union ubcore_eid *eid) +{ + memset(net_addr, 0, sizeof(struct ubcore_net_addr)); + (void)memcpy(net_addr, eid, UBCORE_EID_SIZE); +} + static inline int ubcore_mtu_enum_to_int(enum ubcore_mtu mtu) { return 1 << ((int)mtu + UB_MTU_BITS_BASE_SHIFT); @@ -54,16 +86,1690 @@ enum ubcore_mtu ubcore_get_mtu(int mtu) } EXPORT_SYMBOL(ubcore_get_mtu); +static int ubcore_get_active_mtu(const struct ubcore_device *dev, uint8_t port_num, + enum ubcore_mtu *mtu) +{ + struct ubcore_device_status st = { 0 }; + + if (port_num >= dev->attr.port_cnt || dev->ops->query_device_status == NULL) { + ubcore_log_err("Invalid parameter"); + return -1; + } + if (dev->ops->query_device_status(dev, &st) != 0) { + ubcore_log_err("Failed to query query_device_status for port %d", port_num); + return -1; + } + if (st.port_status[port_num].state != UBCORE_PORT_ACTIVE) { + ubcore_log_err("Port %d is not active", port_num); + return -1; + } + *mtu = st.port_status[port_num].active_mtu; + return 0; +} + +static struct ubcore_nlmsg *ubcore_alloc_nlmsg(size_t payload_len, const union ubcore_eid *src_eid, + const union ubcore_eid *dst_eid) +{ + struct ubcore_nlmsg *msg = kzalloc(sizeof(struct ubcore_nlmsg) + payload_len, GFP_KERNEL); + + if (msg == NULL) + return NULL; + + msg->src_eid = *src_eid; + msg->dst_eid = *dst_eid; + msg->payload_len = payload_len; + return msg; +} + +static struct ubcore_nlmsg *ubcore_get_destroy_tp_req(struct ubcore_tp *tp, + const struct ubcore_ta_data *ta) +{ + struct ubcore_nl_destroy_tp_req *destroy; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(sizeof(struct ubcore_nl_destroy_tp_req), &tp->local_eid, + &tp->peer_eid); + if (req == NULL) + return NULL; + + req->msg_type = UBCORE_NL_DESTROY_TP_REQ; + req->transport_type = tp->ub_dev->transport_type; + destroy = (struct ubcore_nl_destroy_tp_req *)req->payload; + destroy->trans_mode = tp->trans_mode; + destroy->tpn = tp->tpn; + destroy->peer_tpn = tp->peer_tpn; + if (ta != NULL) + destroy->ta = *ta; + else + destroy->ta.type = UBCORE_TA_NONE; + + return req; +} + +static int ubcore_init_create_tp_req(struct ubcore_nl_create_tp_req *create, struct ubcore_tp *tp, + const struct ubcore_ta_data *ta, struct ubcore_udata *udata) +{ + create->tpn = tp->tpn; + create->local_net_addr = tp->local_net_addr; + create->peer_net_addr = tp->peer_net_addr; + create->trans_mode = tp->trans_mode; + create->mtu = tp->mtu; + create->rx_psn = tp->rx_psn; + create->cfg.flag = tp->flag; + create->cfg.congestion_alg = tp->ub_dev->attr.dev_cap.congestion_ctrl_alg; + + if (ta != NULL) + create->ta = *ta; + else + create->ta.type = UBCORE_TA_NONE; + + create->ext_len = tp->tp_ext.len; + create->udrv_in_len = get_udrv_in_len(udata); + if (tp->tp_ext.len > 0) + (void)memcpy(create->ext_udrv, (void *)tp->tp_ext.addr, tp->tp_ext.len); + + if (get_udrv_in_data(create->ext_udrv + tp->tp_ext.len, create->udrv_in_len, udata) != 0) { + ubcore_log_err("Failed to get udrv data"); + return -1; + } + + return 0; +} + +static struct ubcore_nlmsg *ubcore_get_create_tp_req(struct ubcore_tp *tp, + struct ubcore_ta_data *ta, + struct ubcore_udata *udata) +{ + uint32_t payload_len = + sizeof(struct ubcore_nl_create_tp_req) + tp->tp_ext.len + get_udrv_in_len(udata); + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = tp->ub_dev->transport_type; + req->msg_type = UBCORE_NL_CREATE_TP_REQ; + + if (ubcore_init_create_tp_req((struct ubcore_nl_create_tp_req *)req->payload, tp, ta, + udata) != 0) { + kfree(req); + ubcore_log_err("Failed to init create tp req"); + return NULL; + } + return req; +} + +static int ubcore_set_tp_peer_ext(struct ubcore_tp_attr *attr, const uint8_t *ext_addr, + const uint32_t ext_len) +{ + void *peer_ext = NULL; + + if (ext_len == 0 || ext_addr == NULL) + return 0; + + /* copy resp ext from req or response */ + peer_ext = kzalloc(ext_len, GFP_KERNEL); + if (peer_ext == NULL) + return -ENOMEM; + + (void)memcpy(peer_ext, ext_addr, ext_len); + + attr->peer_ext.addr = (uintptr_t)peer_ext; + attr->peer_ext.len = ext_len; + return 0; +} + +static inline void ubcore_unset_tp_peer_ext(struct ubcore_tp_attr *attr) +{ + if (attr->peer_ext.addr != 0) + kfree((void *)attr->peer_ext.addr); +} + +static int ubcore_negotiate_optimal_cc_alg(uint16_t local_congestion_alg, + uint16_t peer_local_congestion_alg) +{ + int i; + + /* TODO Configure congestion control priority based on UVS */ + for (i = 0; i <= UBCORE_TP_CC_DIP; i++) { + if ((0x1 << (uint32_t)i) & local_congestion_alg & peer_local_congestion_alg) + return i; + } + return -1; +} + +static int ubcore_set_initiator_peer(const struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, + const struct ubcore_nl_create_tp_resp *resp) +{ + mask->value = 0; + mask->bs.flag = 1; + mask->bs.peer_tpn = 1; + mask->bs.mtu = 1; + mask->bs.tx_psn = 1; + mask->bs.state = 1; + + memset(attr, 0, sizeof(*attr)); + attr->flag.bs.oor_en = tp->flag.bs.oor_en & resp->flag.bs.oor_en; + attr->flag.bs.sr_en = tp->flag.bs.sr_en & resp->flag.bs.sr_en; + attr->flag.bs.spray_en = tp->flag.bs.spray_en & resp->flag.bs.spray_en; + attr->flag.bs.cc_en = tp->flag.bs.cc_en & resp->flag.bs.cc_en; + attr->flag.bs.cc_alg = resp->flag.bs.cc_alg; /* negotiated with the remote */ + attr->peer_tpn = resp->peer_tpn; + attr->mtu = min(tp->mtu, resp->peer_mtu); + attr->tx_psn = resp->peer_rx_psn; + attr->state = UBCORE_TP_STATE_RTS; + + if (tp->peer_ext.addr != 0) + return 0; + + mask->bs.peer_ext = 1; + return ubcore_set_tp_peer_ext(attr, resp->peer_ext, resp->peer_ext_len); +} + +static struct ubcore_nlmsg *ubcore_get_query_tp_req(struct ubcore_device *dev, + const union ubcore_eid *remote_eid, + enum ubcore_transport_mode trans_mode) +{ + uint32_t payload_len = sizeof(struct ubcore_nl_query_tp_req); + struct ubcore_nl_query_tp_req *query; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &dev->attr.eid, remote_eid); + if (req == NULL) + return NULL; + + req->transport_type = dev->transport_type; + req->msg_type = UBCORE_NL_QUERY_TP_REQ; + query = (struct ubcore_nl_query_tp_req *)req->payload; + query->trans_mode = trans_mode; + return req; +} + +static int ubcore_query_tp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, + enum ubcore_transport_mode trans_mode, + struct ubcore_nl_query_tp_resp *query_tp_resp) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_nl_query_tp_resp *resp; + int ret = 0; + + req_msg = ubcore_get_query_tp_req(dev, remote_eid, trans_mode); + if (req_msg == NULL) { + ubcore_log_err("Failed to get query tp req"); + return -1; + } + + resp_msg = ubcore_nl_send_wait(req_msg); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait query response"); + kfree(req_msg); + return -1; + } + + resp = (struct ubcore_nl_query_tp_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_NL_QUERY_TP_RESP || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ret = -1; + ubcore_log_err("Query tp request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + } else { + (void)memcpy(query_tp_resp, resp, sizeof(struct ubcore_nl_query_tp_resp)); + } + kfree(resp_msg); + kfree(req_msg); + return ret; +} + +static void ubcore_get_ta_data_from_ta(const struct ubcore_ta *ta, struct ubcore_ta_data *ta_data) +{ + struct ubcore_jetty *jetty; + struct ubcore_jfs *jfs; + + ta_data->type = ta->type; + switch (ta->type) { + case UBCORE_TA_JFS_TJFR: + jfs = ta->jfs; + ta_data->jetty_id.eid = jfs->ub_dev->attr.eid; + if (jfs->uctx != NULL) + ta_data->jetty_id.uasid = jfs->uctx->uasid; + ta_data->jetty_id.id = jfs->id; + ta_data->tjetty_id = ta->tjetty_id; + break; + case UBCORE_TA_JETTY_TJETTY: + jetty = ta->jetty; + ta_data->jetty_id.eid = jetty->ub_dev->attr.eid; + if (jetty->uctx != NULL) + ta_data->jetty_id.uasid = jetty->uctx->uasid; + ta_data->jetty_id.id = jetty->id; + ta_data->tjetty_id = ta->tjetty_id; + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return; + } +} + +static struct ubcore_nlmsg *ubcore_exchange_tp(struct ubcore_tp *tp, struct ubcore_ta *ta, + struct ubcore_udata *udata) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + + struct ubcore_nl_create_tp_resp *resp; + struct ubcore_ta_data ta_data = { 0 }; + + if (ta != NULL) + ubcore_get_ta_data_from_ta(ta, &ta_data); + + req_msg = ubcore_get_create_tp_req(tp, &ta_data, udata); + if (req_msg == NULL) { + ubcore_log_err("Failed to get create tp req"); + return NULL; + } + + resp_msg = ubcore_nl_send_wait(req_msg); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait create_tp response %pI6c", &tp->peer_eid); + kfree(req_msg); + return NULL; + } + + resp = (struct ubcore_nl_create_tp_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != req_msg->msg_type + 1 || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err("Create tp request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + kfree(resp_msg); + resp_msg = NULL; + } + + kfree(req_msg); + return resp_msg; +} + +int ubcore_destroy_tp(struct ubcore_tp *tp) +{ + if (!ubcore_have_tp_ops(tp->ub_dev)) { + ubcore_log_err("TP ops is NULL"); + return -1; + } + + if (tp->peer_ext.len > 0 && tp->peer_ext.addr != 0) + kfree((void *)tp->peer_ext.addr); + + return tp->ub_dev->ops->destroy_tp(tp); +} +EXPORT_SYMBOL(ubcore_destroy_tp); + +static void ubcore_set_tp_flag(union ubcore_tp_flag *flag, const struct ubcore_tp_cfg *cfg, + const struct ubcore_device *dev) +{ + flag->bs.target = cfg->flag.bs.target; + flag->bs.sr_en = cfg->flag.bs.sr_en; + flag->bs.spray_en = cfg->flag.bs.spray_en; + flag->bs.oor_en = cfg->flag.bs.oor_en; + flag->bs.cc_en = cfg->flag.bs.cc_en; +} + +static void ubcore_set_tp_init_cfg(struct ubcore_tp *tp, const struct ubcore_tp_cfg *cfg) +{ + ubcore_set_tp_flag(&tp->flag, cfg, tp->ub_dev); + tp->local_net_addr = cfg->local_net_addr; + tp->peer_net_addr = cfg->peer_net_addr; + tp->local_eid = cfg->local_eid; + tp->peer_eid = cfg->peer_eid; + tp->trans_mode = cfg->trans_mode; + tp->rx_psn = cfg->rx_psn; + tp->tx_psn = 0; + tp->mtu = cfg->mtu; + tp->data_udp_start = cfg->data_udp_start; + tp->ack_udp_start = cfg->ack_udp_start; + tp->udp_range = cfg->udp_range; + tp->retry_num = cfg->retry_num; + tp->ack_timeout = cfg->ack_timeout; + tp->tc = cfg->tc; +} + +static struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, + const struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_tp *tp = NULL; + + if (!ubcore_have_tp_ops(dev)) { + ubcore_log_err("Invalid parameter"); + return NULL; + } + + tp = dev->ops->create_tp(dev, cfg, udata); + if (tp == NULL) { + ubcore_log_err("Failed to create tp towards remote eid %pI6c", &cfg->peer_eid); + return NULL; + } + tp->ub_dev = dev; + ubcore_set_tp_init_cfg(tp, cfg); + tp->state = UBCORE_TP_STATE_RESET; + tp->priv = NULL; + atomic_set(&tp->use_cnt, 1); + return tp; +} + +/* send request to destroy remote peer tp */ +static int ubcore_destroy_peer_tp(struct ubcore_tp *tp, struct ubcore_ta *ta) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_nl_destroy_tp_resp *resp; + struct ubcore_ta_data ta_data = { 0 }; + int ret = 0; + + if (tp == NULL) { + ubcore_log_err("Invalid parameter"); + return -1; + } + + if (ta != NULL) + ubcore_get_ta_data_from_ta(ta, &ta_data); + + req_msg = ubcore_get_destroy_tp_req(tp, &ta_data); + if (req_msg == NULL) { + ubcore_log_err("Failed to get destroy tp req"); + return -1; + } + + resp_msg = ubcore_nl_send_wait(req_msg); + if (resp_msg == NULL) { + ubcore_log_err("Failed to get destroy tp response"); + kfree(req_msg); + return -1; + } + + resp = (struct ubcore_nl_destroy_tp_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_NL_DESTROY_TP_RESP || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err("Destroy tp request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + ret = -1; + } + + kfree(resp_msg); + kfree(req_msg); + return ret; +} + +/* Destroy both local tp and remote peer tp */ +static int ubcore_destroy_local_peer_tp(struct ubcore_tp *tp, struct ubcore_ta *ta) +{ + struct ubcore_device *dev = tp->ub_dev; + int ret; + + /* Do not send destroy request to the remote if we are in the VM */ + if (!dev->attr.virtualization) { + ret = ubcore_destroy_peer_tp(tp, ta); + if (ret != 0) { + ubcore_log_err("Failed to destroy peer tp"); + return ret; + } + } + return ubcore_destroy_tp(tp); +} + +static void ubcore_abort_tp(struct ubcore_tp *tp, struct ubcore_tp_meta *meta) +{ + struct ubcore_tp *target; + + if (tp == NULL) + return; + + target = ubcore_find_remove_tp(meta->ht, meta->hash, &meta->key); + if (target == NULL || target != tp) { + ubcore_log_warn("TP is not found, already removed or under use\n"); + return; + } + + (void)ubcore_destroy_tp(tp); +} + +/* destroy initiator and peer tp created by ubcore_connect_vtp, called by ubcore_destroy_vtp */ +static int ubcore_disconnect_vtp(struct ubcore_tp *tp) +{ + struct ubcore_tp_node *tp_node = tp->priv; + struct ubcore_device *dev = tp->ub_dev; + + if (atomic_dec_return(&tp->use_cnt) == 0) { + struct ubcore_ta ta; + + ta.type = UBCORE_TA_VIRT; + + ubcore_remove_tp_node(&dev->ht[UBCORE_HT_TP], tp_node); + return ubcore_destroy_local_peer_tp(tp, &ta); + } + return 0; +} + +static void ubcore_set_multipath_tp_cfg(struct ubcore_tp_cfg *cfg, + enum ubcore_transport_mode trans_mode, + struct ubcore_nl_query_tp_resp *query_tp_resp) +{ + cfg->flag.bs.sr_en = query_tp_resp->cfg.flag.bs.sr_en; + cfg->flag.bs.spray_en = query_tp_resp->cfg.flag.bs.spray_en; + cfg->flag.bs.oor_en = query_tp_resp->cfg.flag.bs.oor_en; + cfg->flag.bs.cc_en = query_tp_resp->cfg.flag.bs.cc_en; + cfg->udp_range = query_tp_resp->cfg.tp_range; + if (trans_mode == UBCORE_TP_RC) { + cfg->data_udp_start = query_tp_resp->cfg.data_rctp_start; + cfg->ack_udp_start = query_tp_resp->cfg.ack_rctp_start; + } else if (trans_mode == UBCORE_TP_RM) { + cfg->data_udp_start = query_tp_resp->cfg.data_rmtp_start; + cfg->ack_udp_start = query_tp_resp->cfg.ack_rmtp_start; + } +} + +static int ubcore_set_initiator_tp_cfg(struct ubcore_tp_cfg *cfg, struct ubcore_device *dev, + enum ubcore_transport_mode trans_mode, + const union ubcore_eid *remote_eid, + struct ubcore_nl_query_tp_resp *query_tp_resp) +{ + cfg->flag.value = 0; + cfg->flag.bs.target = 0; + cfg->trans_mode = trans_mode; + cfg->local_eid = dev->attr.eid; + + if (dev->attr.virtualization) { + cfg->peer_eid = *remote_eid; + ubcore_set_net_addr_with_eid(&cfg->local_net_addr, &dev->attr.eid); + ubcore_set_net_addr_with_eid(&cfg->peer_net_addr, remote_eid); + } else { + if (dev->netdev == NULL) + ubcore_log_warn("Could not find netdev.\n"); + + cfg->peer_eid = query_tp_resp->dst_eid; /* set eid to be the remote underlay eid */ + cfg->local_net_addr = query_tp_resp->src_addr; + if (dev->netdev != NULL && dev->netdev->dev_addr != NULL) + (void)memcpy(cfg->local_net_addr.mac, dev->netdev->dev_addr, + dev->netdev->addr_len); + if (dev->netdev != NULL) + cfg->local_net_addr.vlan = (uint64_t)dev->netdev->vlan_features; + cfg->peer_net_addr = query_tp_resp->dst_addr; + ubcore_set_multipath_tp_cfg(cfg, trans_mode, query_tp_resp); + } + + /* set mtu to active mtu temperately */ + if (ubcore_get_active_mtu(dev, 0, &cfg->mtu) != 0) { + ubcore_log_err("Failed to get active mtu"); + return -1; + } + /* set psn to 0 temperately */ + cfg->rx_psn = 0; + return 0; +} + +static int ubcore_query_initiator_tp_cfg(struct ubcore_tp_cfg *cfg, struct ubcore_device *dev, + const union ubcore_eid *remote_eid, + enum ubcore_transport_mode trans_mode) +{ + struct ubcore_nl_query_tp_resp query_tp_resp; + + /* Do not query tp as TPS is not running on VM */ + if (dev->attr.virtualization) + return ubcore_set_initiator_tp_cfg(cfg, dev, trans_mode, remote_eid, NULL); + + if (ubcore_query_tp(dev, remote_eid, trans_mode, &query_tp_resp) != 0) { + ubcore_log_err("Failed to query tp"); + return -1; + } + return ubcore_set_initiator_tp_cfg(cfg, dev, trans_mode, NULL, &query_tp_resp); +} + +static int ubcore_modify_tp_to_rts(const struct ubcore_device *dev, struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + attr.state = UBCORE_TP_STATE_RTS; + + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err("Failed to modify tp"); + return -1; + } + tp->state = UBCORE_TP_STATE_RTS; + return 0; +} + +#define ubcore_mod_tp_attr_with_mask(tp, attr, field, mask) \ + (tp->field = mask.bs.field ? attr->field : tp->field) + +static void ubcore_modify_tp_attr(struct ubcore_tp *tp, const struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask) +{ + /* flag and mod flag must have the same layout */ + if (mask.bs.flag) + tp->flag.value = tp->flag.bs.target | (attr->flag.value << 1); + + ubcore_mod_tp_attr_with_mask(tp, attr, peer_tpn, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, state, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, tx_psn, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, rx_psn, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, mtu, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, cc_pattern_idx, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, peer_ext, mask); +} + +static int ubcore_enable_tp(const struct ubcore_device *dev, struct ubcore_tp_node *tp_node, + struct ubcore_ta *ta, struct ubcore_udata *udata) +{ + struct ubcore_tp *tp = tp_node->tp; + struct ubcore_nlmsg *resp_msg; + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + int ret; + + /* Do not exchange tp with remote in the VM */ + if (dev->attr.virtualization) + return 0; + + mutex_lock(&tp_node->lock); + if (tp->state == UBCORE_TP_STATE_RTR) { + ret = ubcore_modify_tp_to_rts(dev, tp); + mutex_unlock(&tp_node->lock); + return ret; + } + mutex_unlock(&tp_node->lock); + + /* send request to connection agent and set peer cfg and peer ext from response */ + resp_msg = ubcore_exchange_tp(tp, ta, udata); + if (resp_msg == NULL) { + ubcore_log_err("Failed to exchange tp info"); + return -1; + } + + mutex_lock(&tp_node->lock); + if (tp->state == UBCORE_TP_STATE_RTS) { + mutex_unlock(&tp_node->lock); + kfree(resp_msg); + ubcore_log_info("TP %u is already at RTS", tp->tpn); + return 0; + } + + ret = ubcore_set_initiator_peer( + tp, &attr, &mask, + (const struct ubcore_nl_create_tp_resp *)(void *)resp_msg->payload); + + /* Here we can free resp msg after use */ + kfree(resp_msg); + + if (ret != 0) { + mutex_unlock(&tp_node->lock); + (void)ubcore_destroy_peer_tp(tp, ta); + ubcore_unset_tp_peer_ext(&attr); + ubcore_log_err("Failed to set initiator peer"); + return -1; + } + + ret = dev->ops->modify_tp(tp, &attr, mask); + if (ret != 0) { + mutex_unlock(&tp_node->lock); + (void)ubcore_destroy_peer_tp(tp, ta); + ubcore_unset_tp_peer_ext(&attr); + ubcore_log_err("Failed to modify tp"); + return -1; + } + ubcore_modify_tp_attr(tp, &attr, mask); + mutex_unlock(&tp_node->lock); + return 0; +} + +/* create vtp and connect to a remote vtp peer, called by ubcore_create_vtp */ +static struct ubcore_tp *ubcore_connect_vtp(struct ubcore_device *dev, + const union ubcore_eid *remote_eid, + enum ubcore_transport_mode trans_mode, + struct ubcore_udata *udata) +{ + struct ubcore_tp_cfg cfg = { 0 }; + struct ubcore_tp_node *tp_node; + struct ubcore_tp *tp = NULL; + struct ubcore_ta ta; + + if (ubcore_query_initiator_tp_cfg(&cfg, dev, remote_eid, trans_mode) != 0) { + ubcore_log_err("Failed to init tp cfg"); + return NULL; + } + + tp = ubcore_create_tp(dev, &cfg, udata); + if (tp == NULL) { + ubcore_log_err("Failed to create tp"); + return NULL; + } + + tp_node = ubcore_add_tp_with_tpn(dev, tp); + if (tp_node == NULL) { + (void)ubcore_destroy_tp(tp); + ubcore_log_err("Failed to add vtp"); + return NULL; + } + + ta.type = UBCORE_TA_VIRT; + /* send request to connection agent and set peer cfg and peer ext from response */ + if (ubcore_enable_tp(dev, tp_node, &ta, udata) != 0) { + ubcore_remove_tp_node(&dev->ht[UBCORE_HT_TP], tp_node); + (void)ubcore_destroy_tp(tp); + ubcore_log_err("Failed to enable tp"); + return NULL; + } + return tp; +} + +static int ubcore_set_target_peer(const struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, + const struct ubcore_nl_create_tp_req *create) +{ + int ret; + + mask->value = 0; + mask->bs.peer_tpn = 1; + mask->bs.mtu = 1; + mask->bs.tx_psn = 1; + mask->bs.state = 1; + mask->bs.flag = 1; + + memset(attr, 0, sizeof(*attr)); + attr->peer_tpn = create->tpn; + attr->mtu = min(tp->mtu, create->mtu); + attr->tx_psn = create->rx_psn; + attr->state = UBCORE_TP_STATE_RTR; + + /* Negotiate local and remote optimal algorithms */ + ret = ubcore_negotiate_optimal_cc_alg(tp->ub_dev->attr.dev_cap.congestion_ctrl_alg, + create->cfg.congestion_alg); + if (ret == -1) { + ubcore_log_err("No congestion control algorithm available"); + return -1; + } + attr->flag.value = tp->flag.value >> 1; + attr->flag.bs.cc_alg = (enum ubcore_tp_cc_alg)ret; + + if (tp->peer_ext.addr != 0) + return 0; + + mask->bs.peer_ext = 1; + return ubcore_set_tp_peer_ext(attr, create->ext_udrv, create->ext_len); +} + +static struct ubcore_nlmsg *ubcore_get_destroy_tp_response(enum ubcore_nl_resp_status ret, + struct ubcore_nlmsg *req) +{ + struct ubcore_nl_destroy_tp_resp *destroy_resp; + struct ubcore_nlmsg *resp = NULL; + + resp = ubcore_alloc_nlmsg(sizeof(struct ubcore_nl_destroy_tp_resp), &req->dst_eid, + &req->src_eid); + if (resp == NULL) { + ubcore_log_err("Failed to alloc destroy tp response"); + return NULL; + } + + resp->msg_type = UBCORE_NL_DESTROY_TP_RESP; + resp->nlmsg_seq = req->nlmsg_seq; + resp->transport_type = req->transport_type; + destroy_resp = (struct ubcore_nl_destroy_tp_resp *)resp->payload; + destroy_resp->ret = ret; + + return resp; +} + +static struct ubcore_nlmsg *ubcore_get_create_tp_response(struct ubcore_tp *tp, + struct ubcore_nlmsg *req) +{ + uint32_t payload_len = + sizeof(struct ubcore_nl_create_tp_resp) + (tp == NULL ? 0 : tp->tp_ext.len); + struct ubcore_nl_create_tp_resp *create_resp; + struct ubcore_nlmsg *resp = NULL; + + resp = ubcore_alloc_nlmsg(payload_len, &req->dst_eid, &req->src_eid); + if (resp == NULL) { + ubcore_log_err("Failed to alloc create tp response"); + return NULL; + } + + resp->msg_type = req->msg_type + 1; + resp->nlmsg_seq = req->nlmsg_seq; + resp->transport_type = req->transport_type; + create_resp = (struct ubcore_nl_create_tp_resp *)resp->payload; + if (tp == NULL) { + create_resp->ret = UBCORE_NL_RESP_FAIL; + return resp; + } + + create_resp->ret = UBCORE_NL_RESP_SUCCESS; + create_resp->flag = tp->flag; + create_resp->peer_tpn = tp->tpn; + create_resp->peer_mtu = tp->mtu; + create_resp->peer_rx_psn = tp->rx_psn; + create_resp->peer_ext_len = tp->tp_ext.len; + if (tp->tp_ext.len > 0) + (void)memcpy(create_resp->peer_ext, (void *)tp->tp_ext.addr, tp->tp_ext.len); + + return resp; +} + +static void ubcore_set_multipath_target_tp_cfg(struct ubcore_tp_cfg *cfg, + enum ubcore_transport_mode trans_mode, + const struct ubcore_multipath_tp_cfg *tp_cfg) +{ + cfg->flag.bs.sr_en = tp_cfg->flag.bs.sr_en; + cfg->flag.bs.oor_en = tp_cfg->flag.bs.oor_en; + cfg->flag.bs.spray_en = tp_cfg->flag.bs.spray_en; + cfg->flag.bs.cc_en = tp_cfg->flag.bs.cc_en; + cfg->udp_range = tp_cfg->tp_range; + if (trans_mode == UBCORE_TP_RC) { + cfg->data_udp_start = tp_cfg->data_rctp_start; + cfg->ack_udp_start = tp_cfg->ack_rctp_start; + } else if (trans_mode == UBCORE_TP_RM) { + cfg->data_udp_start = tp_cfg->data_rmtp_start; + cfg->ack_udp_start = tp_cfg->ack_rmtp_start; + } +} + +static int ubcore_set_target_tp_cfg(struct ubcore_tp_cfg *cfg, const struct ubcore_device *dev, + struct ubcore_nlmsg *req, struct ubcore_ta *ta) +{ + struct ubcore_nl_create_tp_req *create = + (struct ubcore_nl_create_tp_req *)(void *)req->payload; + + /* set ubcore_ta */ + cfg->ta = ta; + ubcore_set_multipath_target_tp_cfg(cfg, create->trans_mode, &create->cfg); + cfg->flag.bs.target = !create->cfg.flag.bs.target; + cfg->trans_mode = create->trans_mode; + cfg->local_eid = dev->attr.eid; /* or req->dst_eid */ + cfg->peer_eid = req->src_eid; + + if (dev->netdev == NULL) + ubcore_log_warn("Could not find netdev.\n"); + + cfg->local_net_addr = create->peer_net_addr; + if (dev->netdev != NULL && dev->netdev->dev_addr != NULL) + (void)memcpy(cfg->local_net_addr.mac, dev->netdev->dev_addr, dev->netdev->addr_len); + if (dev->netdev != NULL) + cfg->local_net_addr.vlan = (uint64_t)dev->netdev->vlan_features; + cfg->peer_net_addr = create->local_net_addr; + + /* set mtu to active mtu temperately */ + if (ubcore_get_active_mtu(dev, 0, &cfg->mtu) != 0) { + ubcore_log_err("Failed to get active mtu"); + return -1; + } + cfg->mtu = min(cfg->mtu, create->mtu); + /* set psn to 0 temperately */ + cfg->rx_psn = 0; + /* todonext: set cc */ + return 0; +} + +static struct ubcore_tp *ubcore_create_target_tp(struct ubcore_device *dev, + struct ubcore_nlmsg *req, struct ubcore_ta *ta) +{ + struct ubcore_nl_create_tp_req *create = + (struct ubcore_nl_create_tp_req *)(void *)req->payload; + /* create tp parameters */ + struct ubcore_udrv_priv udrv_data = { .in_addr = (uintptr_t)(create->ext_udrv + + create->ext_len), + .in_len = create->udrv_in_len, + .out_addr = 0, + .out_len = 0 }; + struct ubcore_udata udata = { .uctx = NULL, .udrv_data = &udrv_data }; + struct ubcore_tp_cfg cfg = { 0 }; + struct ubcore_tp *tp = NULL; + + if (ubcore_set_target_tp_cfg(&cfg, dev, req, ta) != 0) { + ubcore_log_err("Failed to init tp cfg in create target tp.\n"); + return NULL; + } + + tp = ubcore_create_tp(dev, &cfg, &udata); + if (tp == NULL) { + ubcore_log_err("Failed to create tp in create target tp.\n"); + return NULL; + } + + return tp; +} + +static int ubcore_modify_target_tp(const struct ubcore_device *dev, struct ubcore_tp_node *tp_node, + const struct ubcore_nl_create_tp_req *create) +{ + struct ubcore_tp *tp = tp_node->tp; + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + int ret = 0; + + mutex_lock(&tp_node->lock); + + switch (tp->state) { + case UBCORE_TP_STATE_RTS: + ubcore_log_info("Reuse existing tp with tpn %u", tp->tpn); + break; + case UBCORE_TP_STATE_RESET: + /* Modify target tp to RTR */ + if (ubcore_set_target_peer(tp, &attr, &mask, create) != 0) { + ubcore_log_err("Failed to set target peer"); + ret = -1; + break; + } + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + ubcore_unset_tp_peer_ext(&attr); + ubcore_log_err("Failed to modify tp"); + ret = -1; + break; + } + ubcore_modify_tp_attr(tp, &attr, mask); + fallthrough; + case UBCORE_TP_STATE_RTR: + /* For RC target TP: modify to RTR only, to RTS when call bind_jetty; + * For IB RM target TP: modify to RTR only, to RTS when call advise_jetty + */ + if (tp->trans_mode == UBCORE_TP_RC || (dev->transport_type == UBCORE_TRANSPORT_IB)) + break; + + /* TRANSPORT_UB: modify target tp to RTS when receive ACK from intiator, + * currently, modify target tp to RTS immediately after target tp is modified to RTR + */ + ret = ubcore_modify_tp_to_rts(dev, tp); + break; + case UBCORE_TP_STATE_ERROR: + default: + ret = -1; + break; + } + + mutex_unlock(&tp_node->lock); + return ret; +} + +static struct ubcore_tp *ubcore_accept_target_tp(struct ubcore_device *dev, + struct ubcore_nlmsg *req, + struct ubcore_tp_advice *advice) +{ + struct ubcore_nl_create_tp_req *create = + (struct ubcore_nl_create_tp_req *)(void *)req->payload; + struct ubcore_tp_meta *meta = &advice->meta; + struct ubcore_tp *new_tp = NULL; /* new created target tp */ + struct ubcore_tp_node *tp_node; + + tp_node = ubcore_hash_table_lookup(meta->ht, meta->hash, &meta->key); + if (tp_node == NULL) { + new_tp = ubcore_create_target_tp(dev, req, &advice->ta); + if (new_tp == NULL) { + ubcore_log_err("Failed to create target tp towards remote eid %pI6c", + &req->src_eid); + return NULL; + } + tp_node = ubcore_add_tp_node(meta->ht, meta->hash, &meta->key, new_tp, &advice->ta); + if (tp_node == NULL) { + (void)ubcore_destroy_tp(new_tp); + ubcore_log_err( + "Failed to add target tp towards remote eid %pI6c to the tp table", + &req->src_eid); + return NULL; + } + if (tp_node->tp != new_tp) { + (void)ubcore_destroy_tp(new_tp); + new_tp = NULL; + } + } + + if (ubcore_modify_target_tp(dev, tp_node, create) != 0) { + ubcore_abort_tp(new_tp, meta); + ubcore_log_err("Failed to modify tp"); + return NULL; + } + return tp_node->tp; +} + +static int ubcore_parse_ta(struct ubcore_device *dev, struct ubcore_ta_data *ta_data, + struct ubcore_tp_advice *advice) +{ + struct ubcore_tp_meta *meta; + struct ubcore_jetty *jetty; + struct ubcore_jfr *jfr; + + (void)memset(advice, 0, sizeof(struct ubcore_tp_advice)); + meta = &advice->meta; + advice->ta.type = ta_data->type; + + switch (ta_data->type) { + case UBCORE_TA_JFS_TJFR: + jfr = ubcore_find_jfr(dev, ta_data->tjetty_id.id); + if (jfr != NULL) { + meta->ht = ubcore_get_tptable(jfr->tptable); + advice->ta.jfr = jfr; + advice->ta.tjetty_id = ta_data->jetty_id; + } + break; + case UBCORE_TA_JETTY_TJETTY: + /* todonext: add kref to jetty, as it may be destroyed any time */ + jetty = ubcore_find_jetty(dev, ta_data->tjetty_id.id); + if (jetty != NULL) { + if (jetty->jetty_cfg.trans_mode == UBCORE_TP_RC && + jetty->remote_jetty != NULL && + memcmp(&jetty->remote_jetty->cfg.id, &ta_data->jetty_id, + sizeof(struct ubcore_jetty_id))) { + ubcore_log_err( + "the same jetty is binded with another remote jetty.\n"); + return -1; + } + meta->ht = ubcore_get_tptable(jetty->tptable); + advice->ta.jetty = jetty; + advice->ta.tjetty_id = ta_data->jetty_id; + } + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return -1; + } + ubcore_init_tp_key_jetty_id(&meta->key, &ta_data->jetty_id); + + /* jetty and jfs should be indexed consecutively */ + meta->hash = ubcore_get_jetty_hash(&ta_data->jetty_id); + return 0; +} + +static struct ubcore_tp *ubcore_advise_target_tp(struct ubcore_device *dev, + struct ubcore_nlmsg *req) +{ + struct ubcore_nl_create_tp_req *create = + (struct ubcore_nl_create_tp_req *)(void *)req->payload; + struct ubcore_tp_advice advice; + struct ubcore_tp_meta *meta; + struct ubcore_tp *tp; + + meta = &advice.meta; + if (ubcore_parse_ta(dev, &create->ta, &advice) != 0) { + ubcore_log_err("Failed to parse ta with type %u", create->ta.type); + return NULL; + } else if (meta->ht == NULL) { + ubcore_log_err("tp table is already released"); + return NULL; + } + + tp = ubcore_accept_target_tp(dev, req, &advice); + /* pair with get_tptable in parse_ta */ + ubcore_put_tptable(meta->ht); + return tp; +} + +static struct ubcore_tp *ubcore_accept_target_vtp(struct ubcore_device *dev, + struct ubcore_nlmsg *req) +{ + struct ubcore_nl_create_tp_req *create = + (struct ubcore_nl_create_tp_req *)(void *)req->payload; + struct ubcore_tp_node *tp_node; + struct ubcore_tp *tp = NULL; + + tp = ubcore_create_target_tp(dev, req, NULL); + if (tp == NULL) { + ubcore_log_err("Failed to create tp"); + return NULL; + } + + tp_node = ubcore_add_tp_with_tpn(dev, tp); + if (tp_node == NULL) { + ubcore_log_err("Failed to add tp to the tp table in the device"); + goto destroy_tp; + } + + if (ubcore_modify_target_tp(dev, tp_node, create) != 0) { + ubcore_log_err("Failed to modify tp"); + goto remove_tp_node; + } + + return tp; + +remove_tp_node: + ubcore_remove_tp_node(&dev->ht[UBCORE_HT_TP], tp_node); +destroy_tp: + (void)ubcore_destroy_tp(tp); + return NULL; +} + +static struct ubcore_tp *ubcore_bind_target_tp(struct ubcore_device *dev, struct ubcore_nlmsg *req) +{ + return ubcore_advise_target_tp(dev, req); +} + +struct ubcore_nlmsg *ubcore_handle_create_tp_req(struct ubcore_nlmsg *req) +{ + struct ubcore_nl_create_tp_req *create = + (struct ubcore_nl_create_tp_req *)(void *)req->payload; + struct ubcore_tp *tp = NULL; + struct ubcore_device *dev; + + if (req->payload_len < sizeof(struct ubcore_nl_create_tp_req)) { + ubcore_log_err("Invalid create req"); + return NULL; + } + + dev = ubcore_find_device(&req->dst_eid, req->transport_type); + if (dev == NULL || !ubcore_have_tp_ops(dev)) { + if (dev != NULL) + ubcore_put_device(dev); + ubcore_log_err("Failed to find device or device ops invalid"); + return ubcore_get_create_tp_response(NULL, req); + } + + if (create->ta.type == UBCORE_TA_VIRT) { + tp = ubcore_accept_target_vtp(dev, req); + } else if (create->trans_mode == UBCORE_TP_RC) { + tp = ubcore_bind_target_tp(dev, req); + } else if (create->trans_mode == UBCORE_TP_RM && + dev->transport_type == UBCORE_TRANSPORT_IB) { + tp = ubcore_advise_target_tp(dev, req); + } + + if (tp == NULL) + ubcore_log_err("Failed to create target tp towards remote eid %pI6c", + &req->src_eid); + + ubcore_put_device(dev); + return ubcore_get_create_tp_response(tp, req); +} +EXPORT_SYMBOL(ubcore_handle_create_tp_req); + +/* destroy target vtp created by ubcore_accept_target_vtp */ +static int ubcore_unaccept_target_vtp(struct ubcore_device *dev, + struct ubcore_nl_destroy_tp_req *destroy) +{ + struct ubcore_tp *tp = ubcore_remove_tp_with_tpn(dev, destroy->peer_tpn); + + if (tp == NULL) { + ubcore_log_warn("tp is not found or already destroyed %u", destroy->peer_tpn); + return 0; + } + return ubcore_destroy_tp(tp); +} + +/* destroy target RM tp created by ubcore_advise_target_tp */ +static int ubcore_unadvise_target_tp(struct ubcore_device *dev, + struct ubcore_nl_destroy_tp_req *destroy) +{ + struct ubcore_tp_advice advice; + struct ubcore_tp_meta *meta; + struct ubcore_tp *tp = NULL; + + meta = &advice.meta; + if (ubcore_parse_ta(dev, &destroy->ta, &advice) != 0) { + ubcore_log_err("Failed to parse ta with type %u", destroy->ta.type); + return -1; + } else if (meta->ht == NULL) { + ubcore_log_warn("tp table is already released"); + return 0; + } + + tp = ubcore_find_remove_tp(meta->ht, meta->hash, &meta->key); + /* pair with get_tptable in parse_ta */ + ubcore_put_tptable(meta->ht); + if (tp == NULL) { + ubcore_log_warn("tp is not found, already destroyed or under use %u", + destroy->peer_tpn); + return 0; + } + + return ubcore_destroy_tp(tp); +} + +/* destroy target RC tp created by ubcore_bind_target_tp */ +static int ubcore_unbind_target_tp(struct ubcore_device *dev, + struct ubcore_nl_destroy_tp_req *destroy) +{ + return ubcore_unadvise_target_tp(dev, destroy); +} + +struct ubcore_nlmsg *ubcore_handle_destroy_tp_req(struct ubcore_nlmsg *req) +{ + struct ubcore_nl_destroy_tp_req *destroy = + (struct ubcore_nl_destroy_tp_req *)(void *)req->payload; + struct ubcore_device *dev; + int ret = -1; + + if (req->payload_len != sizeof(struct ubcore_nl_destroy_tp_req)) { + ubcore_log_err("Invalid destroy req"); + return NULL; + } + + dev = ubcore_find_device(&req->dst_eid, req->transport_type); + if (dev == NULL || !ubcore_have_tp_ops(dev)) { + if (dev != NULL) + ubcore_put_device(dev); + ubcore_log_err("Failed to find device or device ops invalid"); + return ubcore_get_destroy_tp_response(UBCORE_NL_RESP_FAIL, req); + } + + if (destroy->ta.type == UBCORE_TA_VIRT) { + ret = ubcore_unaccept_target_vtp(dev, destroy); + } else if (destroy->trans_mode == UBCORE_TP_RC) { + ret = ubcore_unbind_target_tp(dev, destroy); + } else if (destroy->trans_mode == UBCORE_TP_RM && + dev->transport_type == UBCORE_TRANSPORT_IB) { + ret = ubcore_unadvise_target_tp(dev, destroy); + } + ubcore_put_device(dev); + return ubcore_get_destroy_tp_response((enum ubcore_nl_resp_status)ret, req); +} +EXPORT_SYMBOL(ubcore_handle_destroy_tp_req); + struct ubcore_tp *ubcore_create_vtp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, enum ubcore_transport_mode trans_mode, struct ubcore_udata *udata) { + if (dev == NULL || dev->attr.virtualization || remote_eid == NULL || + !ubcore_have_tp_ops(dev)) { + ubcore_log_err("Invalid parameter"); + return NULL; + } + + switch (dev->transport_type) { + case UBCORE_TRANSPORT_IB: /* alpha */ + if (trans_mode == UBCORE_TP_RM || trans_mode == UBCORE_TP_RC) + return ubcore_connect_vtp(dev, remote_eid, trans_mode, udata); + break; + case UBCORE_TRANSPORT_UB: /* beta */ + case UBCORE_TRANSPORT_IP: + case UBCORE_TRANSPORT_INVALID: + case UBCORE_TRANSPORT_MAX: + default: + break; + } return NULL; } EXPORT_SYMBOL(ubcore_create_vtp); int ubcore_destroy_vtp(struct ubcore_tp *vtp) { + enum ubcore_transport_mode trans_mode; + struct ubcore_device *dev; + + if (vtp == NULL || vtp->ub_dev == NULL || vtp->priv == NULL || + vtp->ub_dev->attr.virtualization) { + ubcore_log_err("Invalid para"); + return -1; + } + dev = vtp->ub_dev; + trans_mode = vtp->trans_mode; + switch (dev->transport_type) { + case UBCORE_TRANSPORT_IB: /* alpha */ + if (trans_mode == UBCORE_TP_RM || trans_mode == UBCORE_TP_RC) + return ubcore_disconnect_vtp(vtp); + break; + case UBCORE_TRANSPORT_UB: /* beta */ + case UBCORE_TRANSPORT_IP: + case UBCORE_TRANSPORT_INVALID: + case UBCORE_TRANSPORT_MAX: + default: + break; + } return -1; } EXPORT_SYMBOL(ubcore_destroy_vtp); + +static inline void ubcore_set_ta_for_tp_cfg(struct ubcore_device *dev, struct ubcore_ta *ta, + struct ubcore_tp_cfg *cfg) +{ + if (dev->transport_type == UBCORE_TRANSPORT_IB) + cfg->ta = ta; + else + cfg->ta = NULL; +} + +int ubcore_bind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice, struct ubcore_udata *udata) +{ + struct ubcore_device *dev = jetty->ub_dev; + struct ubcore_tp_cfg cfg = { 0 }; + struct ubcore_tp_node *tp_node; + struct ubcore_tp *new_tp = NULL; + + if (ubcore_query_initiator_tp_cfg(&cfg, dev, (union ubcore_eid *)&tjetty->cfg.id.eid, + tjetty->cfg.trans_mode) != 0) { + ubcore_log_err("Failed to init tp cfg.\n"); + return -1; + } + + mutex_lock(&tjetty->lock); + if (tjetty->tp != NULL) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("The same tjetty, different jetty, prevent duplicate bind.\n"); + return -1; + } + + ubcore_set_ta_for_tp_cfg(dev, &advice->ta, &cfg); + + /* driver gurantee to return the same tp if we have created it as a target */ + new_tp = ubcore_create_tp(dev, &cfg, udata); + if (new_tp == NULL) { + ubcore_log_err("Failed to create tp.\n"); + mutex_unlock(&tjetty->lock); + return -1; + } + + tp_node = ubcore_add_tp_node(advice->meta.ht, advice->meta.hash, &advice->meta.key, new_tp, + &advice->ta); + if (tp_node == NULL) { + (void)ubcore_destroy_tp(new_tp); + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to find and add tp\n"); + return -1; + } else if (tp_node != NULL && tp_node->tp != new_tp) { + (void)ubcore_destroy_tp(new_tp); + new_tp = NULL; + } + tjetty->tp = tp_node->tp; + mutex_unlock(&tjetty->lock); + + /* send request to connection agent and set peer cfg and peer ext from response */ + if (ubcore_enable_tp(dev, tp_node, &advice->ta, udata) != 0) { + mutex_lock(&tjetty->lock); + tjetty->tp = NULL; + mutex_unlock(&tjetty->lock); + ubcore_abort_tp(new_tp, &advice->meta); + ubcore_log_err("Failed to enable tp.\n"); + return -1; + } + return 0; +} +EXPORT_SYMBOL(ubcore_bind_tp); + +int ubcore_unbind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice) +{ + if (tjetty->tp == NULL) { + ubcore_log_warn("TP is not found, already removed or under use\n"); + return 0; + } + if (ubcore_unadvise_tp(jetty->ub_dev, advice) != 0) { + ubcore_log_warn("failed to unbind tp\n"); + return -1; + } + mutex_lock(&tjetty->lock); + tjetty->tp = NULL; + mutex_unlock(&tjetty->lock); + return 0; +} +EXPORT_SYMBOL(ubcore_unbind_tp); + +int ubcore_advise_tp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, + struct ubcore_tp_advice *advice, struct ubcore_udata *udata) +{ + struct ubcore_tp_node *tp_node; + struct ubcore_tp_cfg cfg = { 0 }; + struct ubcore_tp *new_tp; + + /* Must call driver->create_tp with udata if we are advising jetty */ + tp_node = ubcore_hash_table_lookup(advice->meta.ht, advice->meta.hash, &advice->meta.key); + if (tp_node != NULL && !tp_node->tp->flag.bs.target) { + atomic_inc(&tp_node->tp->use_cnt); + return 0; + } + + if (ubcore_query_initiator_tp_cfg(&cfg, dev, remote_eid, UBCORE_TP_RM) != 0) { + ubcore_log_err("Failed to init tp cfg"); + return -1; + } + + ubcore_set_ta_for_tp_cfg(dev, &advice->ta, &cfg); + + /* driver gurantee to return the same tp if we have created it as a target */ + new_tp = ubcore_create_tp(dev, &cfg, udata); + if (new_tp == NULL) { + ubcore_log_err("Failed to create tp"); + return -1; + } + + tp_node = ubcore_add_tp_node(advice->meta.ht, advice->meta.hash, &advice->meta.key, new_tp, + &advice->ta); + if (tp_node == NULL) { + (void)ubcore_destroy_tp(new_tp); + ubcore_log_err("Failed to find and add tp\n"); + return -1; + } else if (tp_node != NULL && tp_node->tp != new_tp) { + (void)ubcore_destroy_tp(new_tp); + new_tp = NULL; + } + + if (ubcore_enable_tp(dev, tp_node, &advice->ta, udata) != 0) { + ubcore_abort_tp(new_tp, &advice->meta); + ubcore_log_err("Failed to enable tp"); + return -1; + } + + if (new_tp == NULL) + atomic_inc(&tp_node->tp->use_cnt); + + return 0; +} +EXPORT_SYMBOL(ubcore_advise_tp); + +int ubcore_unadvise_tp(struct ubcore_device *dev, struct ubcore_tp_advice *advice) +{ + struct ubcore_tp *tp = + ubcore_find_remove_tp(advice->meta.ht, advice->meta.hash, &advice->meta.key); + if (tp == NULL) { + ubcore_log_warn("TP is not found, already removed or under use\n"); + return 0; + } + + return ubcore_destroy_local_peer_tp(tp, &advice->ta); +} +EXPORT_SYMBOL(ubcore_unadvise_tp); + +static void ubcore_get_ta_from_tp(struct ubcore_ta *ta, struct ubcore_tp *tp) +{ + struct ubcore_tp_node *tp_node = (struct ubcore_tp_node *)tp->priv; + + ta->type = UBCORE_TA_NONE; + switch (tp->trans_mode) { + case UBCORE_TP_RC: + case UBCORE_TP_RM: + /* ta is none for UB native device */ + if (tp_node != NULL) + *ta = tp_node->ta; + break; + case UBCORE_TP_UM: + default: + break; + } +} + +static struct ubcore_nlmsg *ubcore_get_restore_tp_req(struct ubcore_tp *tp) +{ + uint32_t payload_len = sizeof(struct ubcore_nl_restore_tp_req); + struct ubcore_nl_restore_tp_req *restore; + struct ubcore_ta ta; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = tp->ub_dev->transport_type; + req->msg_type = UBCORE_NL_RESTORE_TP_REQ; + restore = (struct ubcore_nl_restore_tp_req *)(void *)req->payload; + restore->trans_mode = tp->trans_mode; + restore->tpn = tp->tpn; + restore->peer_tpn = tp->peer_tpn; + restore->rx_psn = get_random_u32(); + + ubcore_get_ta_from_tp(&ta, tp); + ubcore_get_ta_data_from_ta(&ta, &restore->ta); + + return req; +} + +static struct ubcore_nlmsg *ubcore_get_restore_tp_response(struct ubcore_nlmsg *req, + struct ubcore_tp *tp) +{ + struct ubcore_nl_restore_tp_resp *restore_resp; + struct ubcore_nlmsg *resp = NULL; + + resp = ubcore_alloc_nlmsg(sizeof(struct ubcore_nl_restore_tp_resp), &req->dst_eid, + &req->src_eid); + if (resp == NULL) { + ubcore_log_err("Failed to alloc restore tp response"); + return NULL; + } + + resp->msg_type = UBCORE_NL_RESTORE_TP_RESP; + resp->nlmsg_seq = req->nlmsg_seq; + resp->transport_type = req->transport_type; + restore_resp = (struct ubcore_nl_restore_tp_resp *)resp->payload; + + if (tp == NULL) { + restore_resp->ret = UBCORE_NL_RESP_FAIL; + return resp; + } + + restore_resp->peer_rx_psn = tp->rx_psn; + return resp; +} + +static int ubcore_restore_tp_to_rts(const struct ubcore_device *dev, struct ubcore_tp *tp, + uint32_t rx_psn, uint32_t tx_psn) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + mask.bs.rx_psn = 1; + mask.bs.tx_psn = 1; + + attr.state = UBCORE_TP_STATE_RTS; + attr.rx_psn = rx_psn; + attr.tx_psn = tx_psn; + + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err("Failed to modify tp"); + return -1; + } + + tp->state = UBCORE_TP_STATE_RTS; + tp->rx_psn = rx_psn; + tp->tx_psn = tx_psn; + + return 0; +} + +void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_nl_restore_tp_resp *resp; + struct ubcore_nl_restore_tp_req *req; + + /* Currently, only try to restore tp in the UBCORE_TRANSPORT_IB device, + * Do not send retore tp req from target to inititor, + * Do not restore UM TP, as it is only visable by the driver + */ + if (dev->transport_type != UBCORE_TRANSPORT_IB || tp->flag.bs.target || tp->priv == NULL || + tp->trans_mode == UBCORE_TP_UM || tp->state != UBCORE_TP_STATE_ERROR || + !ubcore_have_tp_ops(dev)) + return; + + req_msg = ubcore_get_restore_tp_req(tp); + if (req_msg == NULL) { + ubcore_log_err("Failed to get restore tp req"); + return; + } + + resp_msg = ubcore_nl_send_wait(req_msg); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait restore tp response %pI6c", &tp->peer_eid); + kfree(req_msg); + return; + } + + req = (struct ubcore_nl_restore_tp_req *)(void *)req_msg->payload; + resp = (struct ubcore_nl_restore_tp_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != req_msg->msg_type + 1 || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err("Restore tp request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + kfree(resp_msg); + kfree(req_msg); + return; + } + + if (ubcore_restore_tp_to_rts(dev, tp, req->rx_psn, resp->peer_rx_psn) != 0) + ubcore_log_err("Failed to restore tp with tpn %u", tp->tpn); + + kfree(req_msg); + kfree(resp_msg); + ubcore_log_info("Restored tp with tpn %u", tp->tpn); +} +EXPORT_SYMBOL(ubcore_restore_tp); + +/* restore target RM tp created by ubcore_advise_target_tp */ +static struct ubcore_tp *ubcore_restore_advised_target_tp(struct ubcore_device *dev, + struct ubcore_nl_restore_tp_req *restore) +{ + struct ubcore_tp_advice advice; + struct ubcore_tp_node *tp_node; + struct ubcore_tp_meta *meta; + struct ubcore_tp *tp; + + meta = &advice.meta; + if (ubcore_parse_ta(dev, &restore->ta, &advice) != 0) { + ubcore_log_err("Failed to parse ta with type %u", restore->ta.type); + return NULL; + } else if (meta->ht == NULL) { + ubcore_log_err("tp table is already released"); + return NULL; + } + + tp_node = ubcore_hash_table_lookup(meta->ht, meta->hash, &meta->key); + /* pair with get_tptable in parse_ta */ + ubcore_put_tptable(meta->ht); + if (tp_node == NULL) { + ubcore_log_err("tp is not found%u", restore->peer_tpn); + return NULL; + } + + tp = tp_node->tp; + if (ubcore_restore_tp_to_rts(dev, tp, get_random_u32(), restore->rx_psn) != 0) { + ubcore_log_err("Failed to modify tp to rts %u", restore->rx_psn); + return NULL; + } + return tp; +} + +static struct ubcore_tp *ubcore_restore_bound_target_tp(struct ubcore_device *dev, + struct ubcore_nl_restore_tp_req *restore) +{ + return ubcore_restore_advised_target_tp(dev, restore); +} + +static struct ubcore_tp *ubcore_handle_restore_tp(struct ubcore_device *dev, + struct ubcore_nl_restore_tp_req *restore) +{ + if (dev->transport_type != UBCORE_TRANSPORT_IB || restore->trans_mode == UBCORE_TP_UM || + restore->ta.type == UBCORE_TA_NONE || restore->ta.type >= UBCORE_TA_VIRT) + return NULL; + + if (restore->trans_mode == UBCORE_TP_RM) + return ubcore_restore_advised_target_tp(dev, restore); + else + return ubcore_restore_bound_target_tp(dev, restore); +} + +struct ubcore_nlmsg *ubcore_handle_restore_tp_req(struct ubcore_nlmsg *req) +{ + struct ubcore_nl_restore_tp_req *restore = + (struct ubcore_nl_restore_tp_req *)(void *)req->payload; + struct ubcore_device *dev; + struct ubcore_tp *tp; + + if (req->payload_len != sizeof(struct ubcore_nl_restore_tp_req)) { + ubcore_log_err("Invalid restore req"); + return NULL; + } + + dev = ubcore_find_device(&req->dst_eid, req->transport_type); + if (dev == NULL || !ubcore_have_tp_ops(dev)) { + if (dev != NULL) + ubcore_put_device(dev); + ubcore_log_err("Failed to find device or device ops invalid"); + return ubcore_get_restore_tp_response(req, NULL); + } + + tp = ubcore_handle_restore_tp(dev, restore); + if (tp == NULL) + ubcore_log_err("Failed to restore target tp towards remote eid %pI6c", + &req->src_eid); + + ubcore_put_device(dev); + return ubcore_get_restore_tp_response(req, tp); +} +EXPORT_SYMBOL(ubcore_handle_restore_tp_req); + +int ubcore_config_utp(struct ubcore_device *dev, const union ubcore_eid *eid, + const struct ubcore_utp_attr *attr, union ubcore_utp_attr_mask mask) +{ + struct ubcore_res_dev_val dev_val = { 0 }; + struct ubcore_res_key key_val; + struct ubcore_res_val val; + uint32_t i; + + if (dev == NULL || eid == NULL || attr == NULL || dev->ops == NULL || + dev->ops->query_res == NULL || dev->ops->config_utp == NULL) { + ubcore_log_err("dev ops has a null pointer.\n"); + return -1; + } + if (dev->transport_type == UBCORE_TRANSPORT_IB) { + ubcore_log_err( + "The configuration modification of this version of utp is not supported.\n"); + return -1; + } + // Query the utp_list under the device + val.addr = (uintptr_t)&dev_val; + val.len = sizeof(struct ubcore_res_dev_val); + key_val.type = UBCORE_RES_KEY_URMA_DEV; + key_val.key = eid->in4.addr; + if (dev->ops->query_res(dev, &key_val, &val) != 0) { + ubcore_log_err("failed to query res.\n"); + return -1; + } + for (i = 0; dev_val.utp_list != NULL && i < dev_val.utp_cnt; i++) { + if (dev->ops->config_utp(dev, dev_val.utp_list[i], attr, mask) != 0) { + ubcore_log_err("failed to config utp.\n"); + return -1; + } + } + return 0; +} +EXPORT_SYMBOL(ubcore_config_utp); + +int ubcore_show_utp(struct ubcore_device *dev, const union ubcore_eid *eid) +{ + struct ubcore_res_dev_val dev_val = { 0 }; + struct ubcore_res_utp_val utp_val = { 0 }; + struct ubcore_res_key key_val; + struct ubcore_res_val val; + uint32_t i; + + if (dev == NULL || eid == NULL || dev->ops == NULL || dev->ops->query_res == NULL) { + ubcore_log_err("dev ops has a null pointer.\n"); + return -1; + } + // Query the utp_list under the device + val.addr = (uintptr_t)&dev_val; + val.len = sizeof(struct ubcore_res_dev_val); + key_val.type = UBCORE_RES_KEY_URMA_DEV; + key_val.key = eid->in4.addr; + if (dev->ops->query_res(dev, &key_val, &val) != 0) { + ubcore_log_err("failed to query res.\n"); + return -1; + } + for (i = 0; dev_val.utp_list != NULL && i < dev_val.utp_cnt; i++) { + // Query the utp_val under the utp list + val.addr = (uintptr_t)&utp_val; + val.len = sizeof(struct ubcore_res_utp_val); + key_val.type = UBCORE_RES_KEY_UTP; + key_val.key = dev_val.utp_list[i]; + if (dev->ops->query_res(dev, &key_val, &val) != 0) { + ubcore_log_err("failed to query res.\n"); + return -1; + } + ubcore_log_info("-----------utp_info---------\n"); + ubcore_log_info("--utp_id: %d\n", (int)utp_val.utp_id); + ubcore_log_info("--spray_en: %d\n", (int)utp_val.spray_en); + ubcore_log_info("--data_udp_start: %d\n", (int)utp_val.data_udp_start); + ubcore_log_info("--udp_range: %d\n", (int)utp_val.udp_range); + ubcore_log_info("----------------------------\n"); + } + return 0; +} +EXPORT_SYMBOL(ubcore_show_utp); diff --git a/drivers/ub/urma/ubcore/ubcore_tp.h b/drivers/ub/urma/ubcore/ubcore_tp.h new file mode 100644 index 0000000000000000000000000000000000000000..567f80d05642d07e38a912eb85c9d01ac759fabe --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp header + * Author: Yan Fangfang + * Create: 2022-09-08 + * Note: + * History: 2022-09-208: Create file + */ + +#ifndef UBCORE_TP_H +#define UBCORE_TP_H + +#include +#include "ubcore_netlink.h" +#include "ubcore_tp_table.h" + +struct ubcore_tp_meta { + struct ubcore_hash_table *ht; + uint32_t hash; + struct ubcore_tp_key key; +}; + +struct ubcore_tp_advice { + struct ubcore_ta ta; + struct ubcore_tp_meta meta; +}; + +static inline bool ubcore_have_tp_ops(const struct ubcore_device *dev) +{ + return (dev != NULL && dev->ops->create_tp != NULL && dev->ops->modify_tp != NULL && + dev->ops->destroy_tp != NULL); +} + +/* alpha */ +int ubcore_advise_tp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, + struct ubcore_tp_advice *advice, struct ubcore_udata *udata); +int ubcore_unadvise_tp(struct ubcore_device *dev, struct ubcore_tp_advice *advice); + +struct ubcore_nlmsg *ubcore_handle_create_tp_req(struct ubcore_nlmsg *req); +struct ubcore_nlmsg *ubcore_handle_destroy_tp_req(struct ubcore_nlmsg *req); +struct ubcore_nlmsg *ubcore_handle_restore_tp_req(struct ubcore_nlmsg *req); + +/* bind tp APIs */ +int ubcore_bind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice, struct ubcore_udata *udata); +int ubcore_unbind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice); + +/* Called when clear tp table */ +int ubcore_destroy_tp(struct ubcore_tp *tp); + +/* restore tp from error state */ +void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_tp_table.c b/drivers/ub/urma/ubcore/ubcore_tp_table.c new file mode 100644 index 0000000000000000000000000000000000000000..2b2a26acb4d5c22cda5039645a6c5969c2749f85 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp_table.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp table implementation + * Author: Yan Fangfang + * Create: 2023-02-09 + * Note: + * History: 2023-02-09: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_priv.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" + +void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, const struct ubcore_jetty_id *jetty_id) +{ + memset(key, 0, sizeof(struct ubcore_tp_key)); + key->key_type = UBCORE_TP_KEY_JETTY_ID; + key->jetty_id = *jetty_id; +} + +void ubcore_remove_tp_node(struct ubcore_hash_table *ht, struct ubcore_tp_node *tp_node) +{ + if (tp_node == NULL) + return; + + ubcore_hash_table_remove(ht, &tp_node->hnode); + kfree(tp_node); +} + +/* Find and remove the tp from table only if it is unreferenced */ +struct ubcore_tp *ubcore_find_remove_tp(struct ubcore_hash_table *ht, uint32_t hash, + const struct ubcore_tp_key *key) +{ + struct ubcore_tp_node *tp_node; + struct ubcore_tp *tp = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + tp_node = ubcore_hash_table_lookup_nolock(ht, hash, key); + if (tp_node == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + + if (atomic_dec_return(&tp_node->tp->use_cnt) == 0) { + tp = tp_node->tp; + hlist_del(&tp_node->hnode); + kfree(tp_node); + } + spin_unlock(&ht->lock); + return tp; +} + +struct ubcore_hash_table *ubcore_create_tptable(void) +{ + struct ubcore_ht_param p = { .size = UBCORE_HASH_TABLE_SIZE, + .node_offset = offsetof(struct ubcore_tp_node, hnode), + .key_offset = offsetof(struct ubcore_tp_node, key), + .key_size = sizeof(struct ubcore_tp_key), + .cmp_f = NULL, + .free_f = NULL }; + struct ubcore_hash_table *ht; + + ht = kcalloc(1, sizeof(struct ubcore_hash_table), GFP_KERNEL); + if (ht == NULL) + return NULL; + + if (ubcore_hash_table_alloc(ht, &p) != 0) { + kfree(ht); + ubcore_log_err("Failed to calloc jfs tp hash table"); + return NULL; + } + return ht; +} + +static void ubcore_free_tp_node(void *obj) +{ + struct ubcore_tp_node *tp_node = (struct ubcore_tp_node *)obj; + (void)ubcore_destroy_tp(tp_node->tp); + kfree(tp_node); +} + +static void ubcore_tptable_release(struct kref *kref) +{ + struct ubcore_hash_table *ht = container_of(kref, struct ubcore_hash_table, kref); + + kfree(ht); +} + +void ubcore_destroy_tptable(struct ubcore_hash_table **pp_ht) +{ + struct ubcore_hash_table *ht; + + if (pp_ht == NULL || *pp_ht == NULL) + return; + + ht = *pp_ht; + *pp_ht = NULL; + ubcore_hash_table_free_with_cb(ht, ubcore_free_tp_node); + /* pair with kref_init */ + (void)kref_put(&ht->kref, ubcore_tptable_release); +} + +struct ubcore_hash_table *ubcore_get_tptable(struct ubcore_hash_table *ht) +{ + if (ht == NULL) + return NULL; + + kref_get(&ht->kref); + return ht; +} + +void ubcore_put_tptable(struct ubcore_hash_table *ht) +{ + if (ht == NULL) + return; + + (void)kref_put(&ht->kref, ubcore_tptable_release); +} + +struct ubcore_tp_node *ubcore_add_tp_node(struct ubcore_hash_table *ht, uint32_t hash, + const struct ubcore_tp_key *key, struct ubcore_tp *tp, + struct ubcore_ta *ta) +{ + struct ubcore_tp_node *new_tp_node; + struct ubcore_tp_node *tp_node; + + new_tp_node = kzalloc(sizeof(struct ubcore_tp_node), GFP_KERNEL); + if (new_tp_node == NULL) + return NULL; + + + new_tp_node->key = *key; + new_tp_node->tp = tp; + new_tp_node->ta = *ta; + mutex_init(&new_tp_node->lock); + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + kfree(new_tp_node); + return NULL; + } + tp_node = ubcore_hash_table_lookup_nolock(ht, hash, key); + if (tp_node != NULL) { + spin_unlock(&ht->lock); + kfree(new_tp_node); + return tp_node; + } + + ubcore_hash_table_add_nolock(ht, &new_tp_node->hnode, hash); + /* set private data for tp restore */ + tp->priv = new_tp_node; + spin_unlock(&ht->lock); + return new_tp_node; +} + +struct ubcore_tp_node *ubcore_add_tp_with_tpn(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + struct ubcore_tp_node *tp_node; + int ret; + + tp_node = kzalloc(sizeof(struct ubcore_tp_node), GFP_KERNEL); + if (tp_node == NULL) + return NULL; + + tp_node->key.key_type = UBCORE_TP_KEY_TPN; + tp_node->key.tpn = tp->tpn; + tp_node->tp = tp; + mutex_init(&tp_node->lock); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_TP], &tp_node->hnode, tp->tpn); + if (ret != 0) { + ubcore_log_err("Failed to add tp with tpn %u to tp table", tp->tpn); + kfree(tp_node); + return NULL; + } + /* set private data to find tp node fast */ + tp->priv = tp_node; + return tp_node; +} + +struct ubcore_tp *ubcore_remove_tp_with_tpn(struct ubcore_device *dev, uint32_t tpn) +{ + struct ubcore_tp_key key; + + memset(&key, 0, sizeof(struct ubcore_tp_key)); + key.key_type = UBCORE_TP_KEY_TPN; + key.tpn = tpn; + return ubcore_find_remove_tp(&dev->ht[UBCORE_HT_TP], tpn, &key); +} diff --git a/drivers/ub/urma/ubcore/ubcore_tp_table.h b/drivers/ub/urma/ubcore/ubcore_tp_table.h new file mode 100644 index 0000000000000000000000000000000000000000..5aa0f70ab9ddfe3487dda8b9e842da14c81d8f36 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp_table.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp table header + * Author: Yan Fangfang + * Create: 2023-02-09 + * Note: + * History: 2023-02-09: Create file + */ + +#ifndef UBCORE_TP_TABLE_H +#define UBCORE_TP_TABLE_H + +#include "ubcore_hash_table.h" +#include "ubcore_netlink.h" + +enum ubcore_tp_key_type { UBCORE_TP_KEY_JETTY_ID, UBCORE_TP_KEY_TPN }; + +struct ubcore_tp_key { + enum ubcore_tp_key_type key_type; + union { + struct ubcore_jetty_id jetty_id; /* for initiator tp towards target jfr or jetty */ + uint32_t tpn; /* for target tp */ + }; +} __packed; + +struct ubcore_tp_node { + struct ubcore_tp_key key; + struct ubcore_tp *tp; + struct ubcore_ta ta; + struct hlist_node hnode; + struct mutex lock; +}; + +void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, const struct ubcore_jetty_id *jetty_id); + +/* Return old tp node if key already exists */ +struct ubcore_tp_node *ubcore_add_tp_node(struct ubcore_hash_table *ht, uint32_t hash, + const struct ubcore_tp_key *key, struct ubcore_tp *tp, + struct ubcore_ta *ta); +void ubcore_remove_tp_node(struct ubcore_hash_table *ht, struct ubcore_tp_node *tp_node); +/* Find and remove the tp from table only if it is unreferenced */ +struct ubcore_tp *ubcore_find_remove_tp(struct ubcore_hash_table *ht, uint32_t hash, + const struct ubcore_tp_key *key); + +/* TP table ops for devices that do not natively support RM */ +struct ubcore_hash_table *ubcore_create_tptable(void); +void ubcore_destroy_tptable(struct ubcore_hash_table **pp_ht); +struct ubcore_hash_table *ubcore_get_tptable(struct ubcore_hash_table *ht); +void ubcore_put_tptable(struct ubcore_hash_table *ht); + +struct ubcore_tp_node *ubcore_add_tp_with_tpn(struct ubcore_device *dev, struct ubcore_tp *tp); +struct ubcore_tp *ubcore_remove_tp_with_tpn(struct ubcore_device *dev, uint32_t tpn); + +#endif diff --git a/drivers/ub/urma/uburma/Makefile b/drivers/ub/urma/uburma/Makefile index 72038c480241f9fbce89b4a2343c03355fe7a86d..4fea86fa790a0f91beb4726975d23c1eb75bebfe 100644 --- a/drivers/ub/urma/uburma/Makefile +++ b/drivers/ub/urma/uburma/Makefile @@ -4,6 +4,10 @@ # uburma-objs := uburma_main.o \ - uburma_cdev_file.o + uburma_dev_ops.o \ + uburma_cmd.o \ + uburma_cdev_file.o \ + uburma_event.o \ + uburma_uobj.o obj-$(CONFIG_UB) += uburma.o diff --git a/drivers/ub/urma/uburma/uburma_cdev_file.c b/drivers/ub/urma/uburma/uburma_cdev_file.c index cfc317dab628c173b90491f2bb7ebbd966daa946..700f8cfd1cd87d4d566cb43644d6f05c5af03816 100644 --- a/drivers/ub/urma/uburma/uburma_cdev_file.c +++ b/drivers/ub/urma/uburma/uburma_cdev_file.c @@ -35,6 +35,12 @@ /* callback information */ typedef ssize_t (*uburma_show_attr_cb)(const struct ubcore_device *ubc_dev, char *buf); typedef ssize_t (*uburma_store_attr_cb)(struct ubcore_device *ubc_dev, const char *buf, size_t len); +typedef ssize_t (*uburma_show_port_attr_cb)(const struct ubcore_device *ubc_dev, char *buf, + uint8_t port_num); +typedef ssize_t (*uburma_show_vf_attr_cb)(const struct ubcore_device *ubc_dev, char *buf, + uint16_t vf_num); +typedef ssize_t (*uburma_store_vf_attr_cb)(struct ubcore_device *ubc_dev, const char *buf, + size_t len, uint16_t vf_num); static ssize_t uburma_show_dev_attr(struct device *dev, struct device_attribute *attr, char *buf, uburma_show_attr_cb show_cb) @@ -127,7 +133,7 @@ static ssize_t eid_store_cb(struct ubcore_device *ubc_dev, const char *buf, size ssize_t ret; if (str_to_eid(buf, len, &eid) != 0) { - uburma_log_err("failed to str_to_eid: %s, %lu.\n", buf, len); + uburma_log_err("failed to str_to_eid: %s, %zu.\n", buf, len); return -EINVAL; } @@ -145,9 +151,459 @@ static ssize_t eid_store(struct device *dev, struct device_attribute *attr, cons static DEVICE_ATTR_RW(eid); // 0644 +static ssize_t guid_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%llu\n", ubc_dev->attr.guid); +} + +static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, guid_show_cb); +} + +static DEVICE_ATTR_RO(guid); + +static ssize_t max_upi_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.max_upi_cnt); +} + +static ssize_t max_upi_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_upi_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_upi_cnt); + +static ssize_t uburma_query_upi(const struct ubcore_device *ubc_dev, char *buf, uint16_t vf_id) +{ + struct ubcore_res_key key = { 0 }; + struct ubcore_res_val val = { 0 }; + uint32_t upi; + uint32_t i; + ssize_t ret; + + key.type = UBCORE_RES_KEY_UPI; + key.key = (uint32_t)vf_id; + + val.len = sizeof(uint32_t) * UBCORE_MAX_UPI_CNT; + val.addr = (uintptr_t)kcalloc(1, val.len, GFP_KERNEL); + if (val.addr == 0) { + uburma_log_err("kcalloc vf%u failed.\n", vf_id); + return -ENOMEM; + } + + if (ubcore_query_resource(ubc_dev, &key, &val) != 0) { + uburma_log_err("query vf%u resource failed.\n", vf_id); + kfree((void *)val.addr); + return -EPERM; + } + +#define UBURMA_UPI_STR_LEN (9) /* 2^20 <= 8bit, add 1 bit space */ + for (i = 0; i < (val.len / sizeof(upi)); i++) { + upi = *((uint32_t *)val.addr + i); + ret = snprintf(buf + (UBURMA_UPI_STR_LEN * i), UBURMA_UPI_STR_LEN + 1, "%8u ", upi); + if (ret <= 0) { + uburma_log_err("snprintf for vf%u upi failed %zd.\n", vf_id, ret); + kfree((void *)val.addr); + return ret; + } + } + + buf[(UBURMA_UPI_STR_LEN * i) - 1] = '\n'; + + kfree((void *)val.addr); + return (ssize_t)(UBURMA_UPI_STR_LEN * i); +} + +static int uburma_parse_upi_str(const char *buf, size_t len, uint16_t *idx, uint32_t *upi) +{ + int ret; + + ret = sscanf(buf, "%hu=%u", idx, upi); + if (ret <= 1) // ret must be equal to 2 + return -1; + + return 0; +} + +static ssize_t uburma_upi_store(struct ubcore_device *ubc_dev, const char *buf, size_t len, + uint16_t vf_id) +{ + ssize_t ret = -ENODEV; + uint16_t idx; + uint32_t upi; + + ret = uburma_parse_upi_str(buf, len, &idx, &upi); + if (ret != 0) { + uburma_log_err("parse vf%u upi str:%s failed %zd.\n", vf_id, buf, ret); + return -EINVAL; + } + + if (ubcore_set_upi(ubc_dev, vf_id, idx, upi) != 0) { + uburma_log_err("set vf%u idx:%u upi:%u failed.\n", vf_id, idx, upi); + return -EPERM; + } + return (ssize_t)len; // len is required for success return. +} + +static ssize_t upi_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return uburma_query_upi(ubc_dev, buf, UBCORE_OWN_VF_ID); +} + +static ssize_t upi_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, upi_show_cb); +} + +static ssize_t upi_store_cb(struct ubcore_device *ubc_dev, const char *buf, size_t len) +{ + return uburma_upi_store(ubc_dev, buf, len, UBCORE_OWN_VF_ID); +} + +static ssize_t upi_store(struct device *dev, struct device_attribute *attr, const char *buf, + size_t len) +{ + return uburma_store_dev_attr(dev, attr, buf, len, upi_store_cb); +} + +static DEVICE_ATTR_RW(upi); + +static ssize_t feature_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "0x%x\n", ubc_dev->attr.dev_cap.feature.value); +} + +static ssize_t feature_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, feature_show_cb); +} + +static DEVICE_ATTR_RO(feature); + +static ssize_t max_jfc_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfc); +} + +static ssize_t max_jfc_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfc_show_cb); +} + +static DEVICE_ATTR_RO(max_jfc); + +static ssize_t max_jfs_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs); +} + +static ssize_t max_jfs_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfs_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs); + +static ssize_t max_jfr_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfr); +} + +static ssize_t max_jfr_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfr_show_cb); +} + +static DEVICE_ATTR_RO(max_jfr); + +static ssize_t max_jetty_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jetty); +} + +static ssize_t max_jetty_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jetty_show_cb); +} + +static DEVICE_ATTR_RO(max_jetty); + +static ssize_t max_jfc_depth_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfc_depth); +} + +static ssize_t max_jfc_depth_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfc_depth_show_cb); +} + +static DEVICE_ATTR_RO(max_jfc_depth); + +static ssize_t max_jfs_depth_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs_depth); +} + +static ssize_t max_jfs_depth_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfs_depth_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs_depth); + +static ssize_t max_jfr_depth_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfr_depth); +} + +static ssize_t max_jfr_depth_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfr_depth_show_cb); +} + +static DEVICE_ATTR_RO(max_jfr_depth); + +static ssize_t show_max_jfs_inline_size_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", + ubc_dev->attr.dev_cap.max_jfs_inline_size); +} + +static ssize_t max_jfs_inline_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, show_max_jfs_inline_size_cb); +} + +static DEVICE_ATTR_RO(max_jfs_inline_size); + +static ssize_t max_jfs_sge_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs_sge); +} + +static ssize_t max_jfs_sge_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfs_sge_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs_sge); + +static ssize_t max_jfs_rsge_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfs_rsge); +} + +static ssize_t max_jfs_rsge_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfs_rsge_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs_rsge); + +static ssize_t max_jfr_sge_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.max_jfr_sge); +} + +static ssize_t max_jfr_sge_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_jfr_sge_show_cb); +} + +static DEVICE_ATTR_RO(max_jfr_sge); + +static ssize_t max_msg_size_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%llu\n", ubc_dev->attr.dev_cap.max_msg_size); +} + +static ssize_t max_msg_size_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_msg_size_show_cb); +} + +static DEVICE_ATTR_RO(max_msg_size); + +static ssize_t max_rc_outstd_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%llu\n", + ubc_dev->attr.dev_cap.max_rc_outstd_cnt); +} + +static ssize_t max_rc_outstd_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, max_rc_outstd_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_rc_outstd_cnt); + +static ssize_t trans_mode_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.trans_mode); +} + +static ssize_t trans_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, trans_mode_show_cb); +} + +static DEVICE_ATTR_RO(trans_mode); + +static ssize_t congestion_ctrl_alg_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", + ubc_dev->attr.dev_cap.congestion_ctrl_alg); +} + +static ssize_t congestion_ctrl_alg_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, congestion_ctrl_alg_show_cb); +} + +static ssize_t congestion_ctrl_alg_store_cb(struct ubcore_device *ubc_dev, const char *buf, + size_t len) +{ + uint16_t value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret != 0) + return -EINVAL; + + ubc_dev->attr.dev_cap.congestion_ctrl_alg = value; + return (ssize_t)len; +} + +static ssize_t congestion_ctrl_alg_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + return uburma_store_dev_attr(dev, attr, buf, len, congestion_ctrl_alg_store_cb); +} + +static DEVICE_ATTR_RW(congestion_ctrl_alg); // 0644 + +static ssize_t comp_vector_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.comp_vector_cnt); +} + +static ssize_t comp_vector_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, comp_vector_cnt_show_cb); +} + +static DEVICE_ATTR_RO(comp_vector_cnt); + +static ssize_t utp_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.dev_cap.utp_cnt); +} + +static ssize_t utp_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, utp_cnt_show_cb); +} + +static DEVICE_ATTR_RO(utp_cnt); + +static ssize_t port_count_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.port_cnt); +} + +static ssize_t port_count_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, port_count_show_cb); +} + +static DEVICE_ATTR_RO(port_count); + +static ssize_t virtualization_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%s\n", + ubc_dev->attr.virtualization ? "true" : "false"); +} + +static ssize_t virtualization_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, virtualization_show_cb); +} + +static DEVICE_ATTR_RO(virtualization); + +static ssize_t vf_cnt_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", ubc_dev->attr.vf_cnt); +} + +static ssize_t vf_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, vf_cnt_show_cb); +} + +static DEVICE_ATTR_RO(vf_cnt); + +static ssize_t transport_type_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%d\n", (int)ubc_dev->transport_type); +} + +static ssize_t transport_type_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, transport_type_show_cb); +} + +static DEVICE_ATTR_RO(transport_type); + +static ssize_t driver_name_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + if (ubc_dev->ops == NULL) + return -EINVAL; + + return snprintf(buf, UBCORE_MAX_DRIVER_NAME, "%s\n", ubc_dev->ops->driver_name); +} + +static ssize_t driver_name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, driver_name_show_cb); +} + +static DEVICE_ATTR_RO(driver_name); + static struct attribute *uburma_dev_attrs[] = { &dev_attr_ubdev.attr, &dev_attr_eid.attr, + &dev_attr_guid.attr, + &dev_attr_max_upi_cnt.attr, + &dev_attr_upi.attr, + &dev_attr_feature.attr, + &dev_attr_max_jfc.attr, + &dev_attr_max_jfs.attr, + &dev_attr_max_jfr.attr, + &dev_attr_max_jetty.attr, + &dev_attr_max_jfc_depth.attr, + &dev_attr_max_jfs_depth.attr, + &dev_attr_max_jfr_depth.attr, + &dev_attr_max_jfs_inline_size.attr, + &dev_attr_max_jfs_sge.attr, + &dev_attr_max_jfs_rsge.attr, + &dev_attr_max_jfr_sge.attr, + &dev_attr_max_msg_size.attr, + &dev_attr_max_rc_outstd_cnt.attr, + &dev_attr_trans_mode.attr, + &dev_attr_congestion_ctrl_alg.attr, + &dev_attr_comp_vector_cnt.attr, + &dev_attr_utp_cnt.attr, + &dev_attr_port_count.attr, + &dev_attr_vf_cnt.attr, + &dev_attr_virtualization.attr, + &dev_attr_transport_type.attr, + &dev_attr_driver_name.attr, NULL, }; @@ -155,6 +611,318 @@ static const struct attribute_group uburma_dev_attr_group = { .attrs = uburma_dev_attrs, }; +static ssize_t uburma_show_port_attr(struct uburma_port *p, struct uburma_port_attribute *attr, + char *buf, uburma_show_port_attr_cb show_cb) +{ + struct uburma_device *ubu_dev = p->ubu_dev; + struct ubcore_device *ubc_dev; + ssize_t ret = -ENODEV; + int srcu_idx; + + if (!ubu_dev || !buf) { + uburma_log_err("Invalid argument.\n"); + return -EINVAL; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return -ENODEV; + } + + ret = show_cb(ubc_dev, buf, p->port_num); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return ret; +} + +static ssize_t max_mtu_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) +{ + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%d\n", + (int)ubc_dev->attr.port_attr[port_num].max_mtu); +} + +static ssize_t max_mtu_show(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf) +{ + return uburma_show_port_attr(p, attr, buf, max_mtu_show_cb); +} + +static PORT_ATTR_RO(max_mtu); + +static ssize_t state_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(ubc_dev, &status) != 0) { + uburma_log_err("query device status for state failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", + (uint32_t)status.port_status[port_num].state); +} + +static ssize_t state_show(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf) +{ + return uburma_show_port_attr(p, attr, buf, state_show_cb); +} + +static PORT_ATTR_RO(state); + +static ssize_t active_speed_show_cb(const struct ubcore_device *ubc_dev, char *buf, + uint8_t port_num) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(ubc_dev, &status) != 0) { + uburma_log_err("query device status for active speed failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", + status.port_status[port_num].active_speed); +} + +static ssize_t active_speed_show(struct uburma_port *p, struct uburma_port_attribute *attr, + char *buf) +{ + return uburma_show_port_attr(p, attr, buf, active_speed_show_cb); +} + +static PORT_ATTR_RO(active_speed); + +static ssize_t active_width_show_cb(const struct ubcore_device *ubc_dev, char *buf, + uint8_t port_num) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(ubc_dev, &status) != 0) { + uburma_log_err("query device status for active width failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", + status.port_status[port_num].active_width); +} + +static ssize_t active_width_show(struct uburma_port *p, struct uburma_port_attribute *attr, + char *buf) +{ + return uburma_show_port_attr(p, attr, buf, active_width_show_cb); +} + +static PORT_ATTR_RO(active_width); + +static ssize_t active_mtu_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint8_t port_num) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(ubc_dev, &status) != 0) { + uburma_log_err("query device status for active mtu failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBURMA_MAX_VALUE_LEN, "%u\n", + (uint32_t)status.port_status[port_num].active_mtu); +} + +static ssize_t active_mtu_show(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf) +{ + return uburma_show_port_attr(p, attr, buf, active_mtu_show_cb); +} + +static PORT_ATTR_RO(active_mtu); + +static struct attribute *uburma_port_attrs[] = { + &port_attr_max_mtu.attr, &port_attr_state.attr, &port_attr_active_speed.attr, + &port_attr_active_width.attr, &port_attr_active_mtu.attr, NULL, +}; + +static ssize_t uburma_port_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct uburma_port_attribute *port_attr = + container_of(attr, struct uburma_port_attribute, attr); + struct uburma_port *p = container_of(kobj, struct uburma_port, kobj); + + if (!port_attr->show) + return -EIO; + + return port_attr->show(p, port_attr, buf); +} + +static ssize_t uburma_port_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + struct uburma_port_attribute *port_attr = + container_of(attr, struct uburma_port_attribute, attr); + struct uburma_port *p = container_of(kobj, struct uburma_port, kobj); + + if (!port_attr->store) + return -EIO; + + return port_attr->store(p, port_attr, buf, count); +} + +static const struct sysfs_ops uburma_port_sysfs_ops = { .show = uburma_port_attr_show, + .store = uburma_port_attr_store }; + +static void uburma_port_release(struct kobject *kobj) +{ +} + +static const struct attribute_group uburma_port_groups = { + .attrs = uburma_port_attrs, +}; + +static struct kobj_type uburma_port_type = { .release = uburma_port_release, + .sysfs_ops = &uburma_port_sysfs_ops, + .default_attrs = uburma_port_attrs +}; + +static ssize_t uburma_show_vf_attr(struct uburma_vf *vf, struct uburma_vf_attribute *attr, + char *buf, uburma_show_vf_attr_cb show_cb) +{ + struct uburma_device *ubu_dev = vf->ubu_dev; + struct ubcore_device *ubc_dev; + int srcu_idx; + ssize_t ret; + + if (!ubu_dev) { + uburma_log_err("Invalid argument in show_vf_attr.\n"); + return -EINVAL; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return -ENODEV; + } + + ret = show_cb(ubc_dev, buf, vf->vf_idx); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return ret; +} + +static ssize_t uburma_store_vf_attr(struct uburma_vf *vf, struct uburma_vf_attribute *attr, + const char *buf, size_t len, uburma_store_vf_attr_cb store_cb) +{ + struct uburma_device *ubu_dev = vf->ubu_dev; + struct ubcore_device *ubc_dev; + int srcu_idx; + ssize_t ret; + + if (!ubu_dev) { + uburma_log_err("Invalid argument in store_vf_attr.\n"); + return -EINVAL; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL) { + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return -ENODEV; + } + + ret = store_cb(ubc_dev, buf, len, vf->vf_idx); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return ret; +} + +static ssize_t vf_upi_show_cb(const struct ubcore_device *ubc_dev, char *buf, uint16_t vf_id) +{ + return uburma_query_upi(ubc_dev, buf, vf_id); +} + +static ssize_t vf_upi_show(struct uburma_vf *vf, struct uburma_vf_attribute *attr, char *buf) +{ + return uburma_show_vf_attr(vf, attr, buf, vf_upi_show_cb); +} + +static ssize_t vf_upi_store_cb(struct ubcore_device *ubc_dev, const char *buf, size_t len, + uint16_t vf_id) +{ + if (ubc_dev == NULL || buf == NULL) + return -EINVAL; + + return uburma_upi_store(ubc_dev, buf, len, vf_id); +} + +static ssize_t vf_upi_store(struct uburma_vf *vf, struct uburma_vf_attribute *attr, const char *buf, + size_t len) +{ + return uburma_store_vf_attr(vf, attr, buf, len, vf_upi_store_cb); +} + +static VF_ATTR(upi, 0644, vf_upi_show, vf_upi_store); + +static struct attribute *uburma_vf_attrs[] = { + &vf_attr_upi.attr, + NULL, +}; + +static ssize_t uburma_vf_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct uburma_vf_attribute *vf_attr = container_of(attr, struct uburma_vf_attribute, attr); + struct uburma_vf *vf = container_of(kobj, struct uburma_vf, kobj); + + if (!vf_attr->show) + return -EIO; + + return vf_attr->show(vf, vf_attr, buf); +} + +static ssize_t uburma_vf_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + struct uburma_vf_attribute *vf_attr = container_of(attr, struct uburma_vf_attribute, attr); + struct uburma_vf *vf = container_of(kobj, struct uburma_vf, kobj); + + if (!vf_attr->store) + return -EIO; + return vf_attr->store(vf, vf_attr, buf, count); +} + +static const struct sysfs_ops uburma_vf_sysfs_ops = { .show = uburma_vf_attr_show, + .store = uburma_vf_attr_store }; + +static void uburma_vf_release(struct kobject *kobj) +{ +} + +static const struct attribute_group uburma_vf_groups = { + .attrs = uburma_vf_attrs, +}; + +static struct kobj_type uburma_vf_type = { .release = uburma_vf_release, + .sysfs_ops = &uburma_vf_sysfs_ops, + .default_attrs = uburma_vf_attrs +}; + +int uburma_create_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num) +{ + struct uburma_port *p; + + p = &ubu_dev->port[port_num]; + p->ubu_dev = ubu_dev; + p->port_num = port_num; + + return kobject_init_and_add(&p->kobj, &uburma_port_type, &ubu_dev->dev->kobj, "port%hhu", + port_num); +} + +int uburma_create_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num) +{ + struct uburma_vf *vf; + + vf = &ubu_dev->vf[vf_num]; + vf->ubu_dev = ubu_dev; + vf->vf_idx = vf_num; + + return kobject_init_and_add(&vf->kobj, &uburma_vf_type, &ubu_dev->dev->kobj, "vf%u", + vf_num); +} + int uburma_create_dev_attr_files(struct uburma_device *ubu_dev) { int ret; @@ -168,6 +936,16 @@ int uburma_create_dev_attr_files(struct uburma_device *ubu_dev) return 0; } +void uburma_remove_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num) +{ + kobject_put(&ubu_dev->port[port_num].kobj); +} + +void uburma_remove_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num) +{ + kobject_put(&ubu_dev->vf[vf_num].kobj); +} + void uburma_remove_dev_attr_files(struct uburma_device *ubu_dev) { sysfs_remove_group(&ubu_dev->dev->kobj, &uburma_dev_attr_group); diff --git a/drivers/ub/urma/uburma/uburma_cdev_file.h b/drivers/ub/urma/uburma/uburma_cdev_file.h index 4207358f1f9a525947f6e3ad0718aac987f84905..c0a4483ce2e76477ab676f8feb0b56cca93d1e28 100644 --- a/drivers/ub/urma/uburma/uburma_cdev_file.h +++ b/drivers/ub/urma/uburma/uburma_cdev_file.h @@ -23,7 +23,35 @@ #include "uburma_types.h" +struct uburma_port_attribute { + struct attribute attr; + ssize_t (*show)(struct uburma_port *p, struct uburma_port_attribute *attr, char *buf); + ssize_t (*store)(struct uburma_port *p, struct uburma_port_attribute *attr, const char *buf, + size_t count); +}; + +#define PORT_ATTR(_name, _mode, _show, _store) \ + struct uburma_port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store) + +#define PORT_ATTR_RO(_name) struct uburma_port_attribute port_attr_##_name = __ATTR_RO(_name) + +struct uburma_vf_attribute { + struct attribute attr; + ssize_t (*show)(struct uburma_vf *vf, struct uburma_vf_attribute *attr, char *buf); + ssize_t (*store)(struct uburma_vf *vf, struct uburma_vf_attribute *attr, const char *buf, + size_t count); +}; + +#define VF_ATTR(_name, _mode, _show, _store) \ + struct uburma_vf_attribute vf_attr_##_name = __ATTR(_name, _mode, _show, _store) + +#define VF_ATTR_RO(_name) struct uburma_vf_attribute vf_attr_##_name = __ATTR_RO(_name) + +int uburma_create_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num); +int uburma_create_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num); int uburma_create_dev_attr_files(struct uburma_device *ubu_dev); +void uburma_remove_port_attr_files(struct uburma_device *ubu_dev, uint8_t port_num); +void uburma_remove_vf_attr_files(struct uburma_device *ubu_dev, uint32_t vf_num); void uburma_remove_dev_attr_files(struct uburma_device *ubu_dev); #endif /* UBURMA_CDEV_FILE_H */ diff --git a/drivers/ub/urma/uburma/uburma_cmd.c b/drivers/ub/urma/uburma/uburma_cmd.c new file mode 100644 index 0000000000000000000000000000000000000000..af21dc76fc3f133b7a7294495a6ff21ce54fb602 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cmd.c @@ -0,0 +1,1595 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma cmd implementation + * Author: Qian Guoxin + * Create: 2021-08-04 + * Note: + * History: 2021-08-04: Create file + * History: 2022-07-25: Yan Fangfang Change the prefix uburma_ioctl_ to uburma_cmd_ + */ + +#include +#include +#include +#include +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_event.h" +#include "uburma_file_ops.h" +#include "uburma_uobj.h" +#include "uburma_cmd.h" + +#define UBURMA_INVALID_TPN UINT_MAX + +void uburma_cmd_inc(struct uburma_device *ubu_dev) +{ + atomic_inc(&ubu_dev->cmdcnt); +} + +void uburma_cmd_dec(struct uburma_device *ubu_dev) +{ + if (atomic_dec_and_test(&ubu_dev->cmdcnt)) + complete(&ubu_dev->cmddone); +} + +void uburma_cmd_flush(struct uburma_device *ubu_dev) +{ + uburma_cmd_dec(ubu_dev); + wait_for_completion(&ubu_dev->cmddone); +} + +static inline void fill_udata(struct ubcore_udata *out, struct ubcore_ucontext *ctx, + struct uburma_cmd_udrv_priv *udata) +{ + out->uctx = ctx; + out->udrv_data = (struct ubcore_udrv_priv *)(void *)udata; +} + +static int uburma_cmd_create_ctx(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_ucontext *ucontext; + struct uburma_cmd_create_ctx arg; + struct uburma_uobj *uobj; + struct uburma_jfae_uobj *jfae; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_create_ctx)); + if (ret != 0) + return ret; + + mutex_lock(&file->mutex); + + ucontext = ubcore_alloc_ucontext(ubc_dev, arg.in.uasid, + (struct ubcore_udrv_priv *)(void *)&arg.udata); + if (IS_ERR_OR_NULL(ucontext)) { + mutex_unlock(&file->mutex); + return -EPERM; + } + + uobj = uobj_alloc(UOBJ_CLASS_JFAE, file); + if (IS_ERR(uobj)) { + ret = PTR_ERR(uobj); + goto free_ctx; + } + + jfae = container_of(uobj, struct uburma_jfae_uobj, uobj); + uburma_init_jfae(jfae, ubc_dev); + ucontext->jfae = uobj; + arg.out.async_fd = uobj->id; + file->ucontext = ucontext; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_create_ctx)); + if (ret != 0) + goto free_jfae; + + uobj_alloc_commit(uobj); + mutex_unlock(&file->mutex); + uburma_log_info("uburma create context success.\n"); + return ret; + +free_jfae: + uobj_alloc_abort(uobj); +free_ctx: + ubcore_free_ucontext(ubc_dev, ucontext); + mutex_unlock(&file->mutex); + return ret; +} + +static int uburma_cmd_destroy_ctx(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + mutex_lock(&file->mutex); + if (file->ucontext == NULL) { + mutex_unlock(&file->mutex); + return -EINVAL; + } + uburma_cleanup_uobjs(file, UBURMA_REMOVE_CLOSE); + ubcore_free_ucontext(ubc_dev, file->ucontext); + file->ucontext = NULL; + uburma_log_info("uburma destroy context success.\n"); + mutex_unlock(&file->mutex); + return 0; +} + +static void uburma_fill_attr(struct ubcore_seg_cfg *cfg, struct uburma_cmd_register_seg *arg) +{ + cfg->va = arg->in.va; + cfg->len = arg->in.len; + cfg->flag.value = arg->in.flag; + cfg->ukey.key = arg->in.key; + cfg->iova = arg->in.va; +} + +static int uburma_cmd_alloc_key_id(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_alloc_key_id arg; + struct ubcore_udata udata = { 0 }; + struct ubcore_key_id *key; + struct uburma_uobj *uobj; + + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_alloc_key_id)); + if (ret != 0) + return ret; + + fill_udata(&udata, file->ucontext, &arg.udata); + uobj = uobj_alloc(UOBJ_CLASS_KEY, file); + if (IS_ERR(uobj)) { + uburma_log_err("UOBJ_CLASS_KEY alloc fail!\n"); + return -ENOMEM; + } + + key = ubcore_alloc_key_id(ubc_dev, &udata); + if (IS_ERR_OR_NULL(key)) { + uburma_log_err("ubcore alloc key id failed.\n"); + ret = -EPERM; + goto err_free_uobj; + } + uobj->object = key; + arg.out.key_id = key->key_id; + arg.out.handle = uobj->id; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_alloc_key_id)); + if (ret != 0) + goto err_free_key; + + return uobj_alloc_commit(uobj); + +err_free_key: + (void)ubcore_free_key_id(key); +err_free_uobj: + uobj_alloc_abort(uobj); + return ret; +} + +static int uburma_cmd_free_key_id(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_free_key_id arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_free_key_id)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_KEY, (int)arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find key id.\n"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore remove commit keyid failed.\n"); + return ret; +} + +static int uburma_cmd_register_seg(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_register_seg arg; + struct ubcore_seg_cfg cfg = { 0 }; + struct ubcore_target_seg *seg; + struct ubcore_udata udata = { 0 }; + struct uburma_uobj *uobj; + struct uburma_uobj *keyid_uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_register_seg)); + if (ret != 0) + return ret; + + keyid_uobj = uobj_get_read(UOBJ_CLASS_KEY, (int)arg.in.keyid_handle, file); + if (!IS_ERR(keyid_uobj)) + cfg.keyid = (struct ubcore_key_id *)keyid_uobj->object; + + uburma_fill_attr(&cfg, &arg); + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_alloc(UOBJ_CLASS_SEG, file); + if (IS_ERR(uobj)) { + uburma_log_err("UOBJ_CLASS_SEG alloc fail!\n"); + ret = -ENOMEM; + goto err_put_keyid; + } + + seg = ubcore_register_seg(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(seg)) { + uburma_log_err("ubcore_register_seg failed.\n"); + ret = -EPERM; + goto err_free_uobj; + } + uobj->object = seg; + arg.out.key_id = seg->seg.key_id; + arg.out.handle = uobj->id; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_register_seg)); + if (ret != 0) + goto err_delete_seg; + + if (!IS_ERR(keyid_uobj)) + uobj_put_read(keyid_uobj); + uobj_alloc_commit(uobj); + return 0; + +err_delete_seg: + ubcore_unregister_seg(seg); +err_free_uobj: + uobj_alloc_abort(uobj); +err_put_keyid: + if (!IS_ERR(keyid_uobj)) + uobj_put_read(keyid_uobj); + return ret; +} + +static int uburma_cmd_unregister_seg(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unregister_seg arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_unregister_seg)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_SEG, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find registered seg.\n"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore_unregister_seg failed.\n"); + return ret; +} + +static void uburma_write_async_event(struct ubcore_ucontext *ctx, uint64_t event_data, + uint32_t event_type, struct list_head *obj_event_list, + uint32_t *counter) +{ + struct uburma_jfae_uobj *jfae; + + rcu_read_lock(); + jfae = rcu_dereference(ctx->jfae); + if (jfae == NULL) { + rcu_read_unlock(); + return; + } + uburma_write_event(&jfae->jfe, event_data, event_type, obj_event_list, counter); + rcu_read_unlock(); +} + +void uburma_jfc_event_cb(struct ubcore_event *event, struct ubcore_ucontext *ctx) +{ + struct uburma_jfc_uobj *jfc_uobj; + + if (event->element.jfc == NULL) + return; + + jfc_uobj = (struct uburma_jfc_uobj *)event->element.jfc->jfc_cfg.jfc_context; + uburma_write_async_event(ctx, event->element.jfc->urma_jfc, event->event_type, + &jfc_uobj->async_event_list, &jfc_uobj->async_events_reported); +} + +void uburma_jfs_event_cb(struct ubcore_event *event, struct ubcore_ucontext *ctx) +{ + struct uburma_jfs_uobj *jfs_uobj; + + if (event->element.jfs == NULL) + return; + + jfs_uobj = (struct uburma_jfs_uobj *)event->element.jfs->jfs_cfg.jfs_context; + uburma_write_async_event(ctx, event->element.jfs->urma_jfs, event->event_type, + &jfs_uobj->async_event_list, &jfs_uobj->async_events_reported); +} + +void uburma_jfr_event_cb(struct ubcore_event *event, struct ubcore_ucontext *ctx) +{ + struct uburma_jfr_uobj *jfr_uobj; + + if (event->element.jfr == NULL) + return; + + jfr_uobj = (struct uburma_jfr_uobj *)event->element.jfr->jfr_cfg.jfr_context; + uburma_write_async_event(ctx, event->element.jfr->urma_jfr, event->event_type, + &jfr_uobj->async_event_list, &jfr_uobj->async_events_reported); +} + +void uburma_jetty_event_cb(struct ubcore_event *event, struct ubcore_ucontext *ctx) +{ + struct uburma_jetty_uobj *jetty_uobj; + + if (event->element.jetty == NULL) + return; + + jetty_uobj = (struct uburma_jetty_uobj *)event->element.jetty->jetty_cfg.jetty_context; + uburma_write_async_event(ctx, event->element.jetty->urma_jetty, event->event_type, + &jetty_uobj->async_event_list, &jetty_uobj->async_events_reported); +} + +static int uburma_cmd_create_jfs(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfs arg; + struct ubcore_jfs_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct uburma_jfs_uobj *jfs_uobj; + struct uburma_uobj *jfc_uobj; + struct ubcore_jfs *jfs; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_create_jfs)); + if (ret != 0) + return ret; + + cfg.depth = arg.in.depth; + cfg.flag.value = arg.in.flag; + cfg.trans_mode = arg.in.trans_mode; + cfg.max_sge = arg.in.max_sge; + cfg.max_rsge = arg.in.max_rsge; + cfg.max_inline_data = arg.in.max_inline_data; + cfg.retry_cnt = arg.in.retry_cnt; + cfg.rnr_retry = arg.in.rnr_retry; + cfg.err_timeout = arg.in.err_timeout; + cfg.priority = arg.in.priority; + + jfs_uobj = (struct uburma_jfs_uobj *)uobj_alloc(UOBJ_CLASS_JFS, file); + if (IS_ERR(jfs_uobj)) { + uburma_log_err("UOBJ_CLASS_JFS alloc fail!\n"); + return -ENOMEM; + } + jfs_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jfs_uobj->async_event_list); + cfg.jfs_context = jfs_uobj; + + jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.jfc_handle, file); + if (IS_ERR(jfc_uobj)) { + uburma_log_err("failed to find jfc, jfc_handle:%llu.\n", arg.in.jfc_handle); + ret = -EINVAL; + goto err_alloc_abort; + } + cfg.jfc = jfc_uobj->object; + fill_udata(&udata, file->ucontext, &arg.udata); + + jfs = ubcore_create_jfs(ubc_dev, &cfg, uburma_jfs_event_cb, &udata); + if (IS_ERR_OR_NULL(jfs)) { + uburma_log_err("create jfs or get jfs_id failed.\n"); + ret = -EPERM; + goto err_put_jfc; + } + jfs_uobj->uobj.object = jfs; + jfs->urma_jfs = arg.in.urma_jfs; + + /* Do not release jfae fd until jfs is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jfs; + + arg.out.id = jfs->id; + arg.out.depth = jfs->jfs_cfg.depth; + arg.out.max_sge = jfs->jfs_cfg.max_sge; + arg.out.max_rsge = jfs->jfs_cfg.max_rsge; + arg.out.max_inline_data = jfs->jfs_cfg.max_inline_data; + arg.out.handle = jfs_uobj->uobj.id; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_create_jfs)); + if (ret != 0) + goto err_put_jfae; + + uobj_put_read(jfc_uobj); + uobj_alloc_commit(&jfs_uobj->uobj); + return 0; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jfs: + ubcore_delete_jfs(jfs); +err_put_jfc: + uobj_put_read(jfc_uobj); +err_alloc_abort: + uobj_alloc_abort(&jfs_uobj->uobj); + return ret; +} + +static int uburma_cmd_delete_jfs(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jfs arg; + struct uburma_jfs_uobj *jfs_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_delete_jfs)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JFS, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfs"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jfs_uobj = container_of(uobj, struct uburma_jfs_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfs failed, ret:%d.\n", ret); + uobj_put(uobj); + return ret; + } + + arg.out.async_events_reported = jfs_uobj->async_events_reported; + uobj_put(uobj); + return uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_delete_jfs)); +} + +static int uburma_cmd_import_seg(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_import_seg arg; + struct ubcore_target_seg_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_target_seg *tseg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_import_seg)); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_SEG, file); + if (IS_ERR(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JFR alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.seg.ubva.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.seg.ubva.uasid = arg.in.uasid; + cfg.seg.ubva.va = arg.in.va; + cfg.seg.len = arg.in.len; + cfg.seg.attr.value = arg.in.flag; + cfg.seg.key_id = arg.in.key_id; + fill_udata(&udata, file->ucontext, &arg.udata); + + tseg = ubcore_import_seg(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(tseg)) { + uburma_log_err("import seg failed.\n"); + uobj_alloc_abort(uobj); + return -EPERM; + } + + uobj->object = tseg; + arg.out.handle = uobj->id; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_import_seg)); + if (ret != 0) { + (void)ubcore_unimport_seg(tseg); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return ret; +} + +static int uburma_cmd_unimport_seg(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unimport_seg arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_unimport_seg)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_TARGET_SEG, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find imported target seg.\n"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("unimport seg failed.\n"); + + return ret; +} + +static int uburma_cmd_create_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfr arg; + struct uburma_uobj *jfc_uobj; + struct uburma_jfr_uobj *jfr_uobj; + struct ubcore_jfr_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfr *jfr; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_create_jfr)); + if (ret != 0) + return ret; + + cfg.id = arg.in.id; + cfg.flag.value = arg.in.flag; + cfg.trans_mode = arg.in.trans_mode; + cfg.depth = arg.in.depth; + cfg.max_sge = arg.in.max_sge; + cfg.min_rnr_timer = arg.in.min_rnr_timer; + cfg.ukey.key = arg.in.key; + fill_udata(&udata, file->ucontext, &arg.udata); + + jfr_uobj = (struct uburma_jfr_uobj *)uobj_alloc(UOBJ_CLASS_JFR, file); + if (IS_ERR(jfr_uobj)) { + uburma_log_err("UOBJ_CLASS_JFR alloc fail!\n"); + return -ENOMEM; + } + jfr_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jfr_uobj->async_event_list); + cfg.jfr_context = jfr_uobj; + + jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.jfc_handle, file); + if (IS_ERR(jfc_uobj)) { + uburma_log_err("failed to find jfc, jfc_handle:%llu.\n", arg.in.jfc_handle); + ret = -EINVAL; + goto err_alloc_abort; + } + cfg.jfc = jfc_uobj->object; + + jfr = ubcore_create_jfr(ubc_dev, &cfg, uburma_jfr_event_cb, &udata); + if (IS_ERR_OR_NULL(jfr)) { + uburma_log_err("create jfr or get jfr_id failed.\n"); + ret = -EPERM; + goto err_put_jfc; + } + jfr_uobj->uobj.object = jfr; + jfr->urma_jfr = arg.in.urma_jfr; + + /* Do not release jfae fd until jfr is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jfr; + + arg.out.id = jfr->id; + arg.out.depth = jfr->jfr_cfg.depth; + arg.out.max_sge = jfr->jfr_cfg.max_sge; + arg.out.handle = jfr_uobj->uobj.id; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_create_jfr)); + if (ret != 0) + goto err_put_jfae; + + uobj_put_read(jfc_uobj); + uobj_alloc_commit(&jfr_uobj->uobj); + return ret; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jfr: + (void)ubcore_delete_jfr(jfr); +err_put_jfc: + uobj_put_read(jfc_uobj); +err_alloc_abort: + uobj_alloc_abort(&jfr_uobj->uobj); + return ret; +} + +static int uburma_cmd_modify_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jfr arg; + struct uburma_uobj *uobj; + struct ubcore_jfr_attr attr = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfr *jfr; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_modify_jfr)); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.rx_threshold = arg.in.rx_threshold; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JFR, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfr.\n"); + return -EINVAL; + } + + jfr = (struct ubcore_jfr *)uobj->object; + ret = ubcore_modify_jfr(jfr, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jfr failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_modify_jfr)); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_delete_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jfr arg; + struct uburma_jfr_uobj *jfr_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_delete_jfr)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JFR, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfr"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jfr_uobj = container_of(uobj, struct uburma_jfr_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfr failed, ret:%d.\n", ret); + uobj_put(uobj); + return ret; + } + + arg.out.async_events_reported = jfr_uobj->async_events_reported; + uobj_put(uobj); + return uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_delete_jfr)); +} + +static int uburma_cmd_create_jfc(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfc arg; + struct uburma_jfc_uobj *jfc_uobj; + struct uburma_jfce_uobj *jfce; + struct ubcore_jfc_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfc *jfc; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_create_jfc)); + if (ret != 0) + return ret; + + cfg.depth = arg.in.depth; + cfg.flag.value = arg.in.flag; + + /* jfce may be ERR_PTR */ + jfce = uburma_get_jfce_uobj(arg.in.jfce_fd, file); + if (arg.in.jfce_fd >= 0 && IS_ERR(jfce)) { + uburma_log_err("Failed to get jfce.\n"); + return -EINVAL; + } + + fill_udata(&udata, file->ucontext, &arg.udata); + + jfc_uobj = (struct uburma_jfc_uobj *)uobj_alloc(UOBJ_CLASS_JFC, file); + if (IS_ERR(jfc_uobj)) { + uburma_log_err("UOBJ_CLASS_JFC alloc fail!\n"); + ret = -1; + goto err_put_jfce; + } + jfc_uobj->comp_events_reported = 0; + jfc_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jfc_uobj->comp_event_list); + INIT_LIST_HEAD(&jfc_uobj->async_event_list); + cfg.jfc_context = jfc_uobj; + + jfc = ubcore_create_jfc(ubc_dev, &cfg, uburma_jfce_handler, uburma_jfc_event_cb, &udata); + if (IS_ERR_OR_NULL(jfc)) { + uburma_log_err("create jfc or get jfc_id failed.\n"); + ret = -EPERM; + goto err_alloc_abort; + } + + jfc_uobj->jfce = (struct uburma_uobj *)jfce; + jfc_uobj->uobj.object = jfc; + jfc->urma_jfc = arg.in.urma_jfc; + + /* Do not release jfae fd until jfc is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jfc; + + arg.out.id = jfc->id; + arg.out.depth = jfc->jfc_cfg.depth; + arg.out.handle = jfc_uobj->uobj.id; + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_create_jfc)); + if (ret != 0) + goto err_put_jfae; + + uobj_alloc_commit(&jfc_uobj->uobj); + return 0; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jfc: + (void)ubcore_delete_jfc(jfc); +err_alloc_abort: + uobj_alloc_abort(&jfc_uobj->uobj); +err_put_jfce: + if (!IS_ERR(jfce)) + uobj_put(&jfce->uobj); + return ret; +} + +static int uburma_cmd_modify_jfc(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jfc arg; + struct uburma_uobj *uobj; + struct ubcore_jfc_attr attr = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfc *jfc; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_modify_jfc)); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.moderate_count = arg.in.moderate_count; + attr.moderate_period = arg.in.moderate_period; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JFC, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfc.\n"); + return -EINVAL; + } + + jfc = (struct ubcore_jfc *)uobj->object; + ret = ubcore_modify_jfc(jfc, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jfc failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_modify_jfc)); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_delete_jfc(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jfc arg; + struct uburma_uobj *uobj; + struct uburma_jfc_uobj *jfc_uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_delete_jfc)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JFC, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfc.\n"); + return -EINVAL; + } + + /* To get events_reported after obj removed. */ + uobj_get(uobj); + jfc_uobj = container_of(uobj, struct uburma_jfc_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfc failed, ret:%d.\n", ret); + uobj_put(uobj); + return ret; + } + + arg.out.comp_events_reported = jfc_uobj->comp_events_reported; + arg.out.async_events_reported = jfc_uobj->async_events_reported; + uobj_put(uobj); + return uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_delete_jfc)); +} + +static void fill_create_jetty_attr(struct ubcore_jetty_cfg *cfg, + const struct uburma_cmd_create_jetty *arg) +{ + cfg->id = arg->in.id; + cfg->jfs_depth = arg->in.jfs_depth; + cfg->jfr_depth = arg->in.jfr_depth; + cfg->flag.value = arg->in.flag; + cfg->trans_mode = arg->in.trans_mode; + cfg->max_send_sge = arg->in.max_send_sge; + cfg->max_send_rsge = arg->in.max_send_rsge; + cfg->max_recv_sge = arg->in.max_recv_sge; + cfg->max_inline_data = arg->in.max_inline_data; + cfg->priority = arg->in.priority; + cfg->retry_cnt = arg->in.retry_cnt; + cfg->rnr_retry = arg->in.rnr_retry; + cfg->err_timeout = arg->in.err_timeout; + cfg->min_rnr_timer = arg->in.min_rnr_timer; +} + +static void fill_create_jetty_out(struct uburma_cmd_create_jetty *arg, + const struct ubcore_jetty *jetty) +{ + arg->out.id = jetty->id; + arg->out.jfs_depth = jetty->jetty_cfg.jfs_depth; + arg->out.jfr_depth = jetty->jetty_cfg.jfr_depth; + arg->out.max_send_sge = jetty->jetty_cfg.max_send_sge; + arg->out.max_send_rsge = jetty->jetty_cfg.max_send_rsge; + arg->out.max_recv_sge = jetty->jetty_cfg.max_recv_sge; + arg->out.max_inline_data = jetty->jetty_cfg.max_inline_data; +} + +static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jetty arg; + struct uburma_uobj *send_jfc_uobj = ERR_PTR(-ENOENT); + struct uburma_uobj *recv_jfc_uobj = ERR_PTR(-ENOENT); + struct uburma_uobj *jfr_uobj = ERR_PTR(-ENOENT); + struct ubcore_jetty_cfg cfg = { 0 }; + struct uburma_jetty_uobj *jetty_uobj; + struct ubcore_udata udata; + struct ubcore_jetty *jetty; + int ret = 0; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_create_jetty)); + if (ret != 0) + return ret; + + jetty_uobj = (struct uburma_jetty_uobj *)uobj_alloc(UOBJ_CLASS_JETTY, file); + if (IS_ERR(jetty_uobj)) { + uburma_log_err("UOBJ_CLASS_JETTY alloc fail!\n"); + return -ENOMEM; + } + jetty_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jetty_uobj->async_event_list); + cfg.jetty_context = jetty_uobj; + + fill_create_jetty_attr(&cfg, &arg); + send_jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.send_jfc_handle, file); + recv_jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.recv_jfc_handle, file); + if (IS_ERR(send_jfc_uobj) || IS_ERR(recv_jfc_uobj)) { + uburma_log_err("failed to find send %llu or recv jfc %llu.\n", + arg.in.send_jfc_handle, arg.in.recv_jfc_handle); + ret = -EINVAL; + goto err_put; + } + cfg.send_jfc = send_jfc_uobj->object; + cfg.recv_jfc = recv_jfc_uobj->object; + if (cfg.flag.bs.share_jfr != 0) { + jfr_uobj = uobj_get_read(UOBJ_CLASS_JFR, arg.in.jfr_handle, file); + if (IS_ERR(jfr_uobj)) { + uburma_log_err("failed to find jfr, jfr_handle:%llu.\n", arg.in.jfr_handle); + ret = -EINVAL; + goto err_put; + } + cfg.jfr = jfr_uobj->object; + } + cfg.ukey.key = arg.in.key; + fill_udata(&udata, file->ucontext, &arg.udata); + + jetty = ubcore_create_jetty(ubc_dev, &cfg, uburma_jetty_event_cb, &udata); + if (IS_ERR_OR_NULL(jetty)) { + uburma_log_err("create jetty or get jetty_id failed.\n"); + ret = -EPERM; + goto err_put; + } + + jetty_uobj->uobj.object = jetty; + jetty->urma_jetty = arg.in.urma_jetty; + /* Do not release jfae fd until jetty is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jetty; + + fill_create_jetty_out(&arg, jetty); + arg.out.handle = jetty_uobj->uobj.id; + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_create_jetty)); + if (ret != 0) + goto err_put_jfae; + + if (cfg.jfr) + uobj_put_read(jfr_uobj); + uobj_put_read(send_jfc_uobj); + uobj_put_read(recv_jfc_uobj); + uobj_alloc_commit(&jetty_uobj->uobj); + return 0; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jetty: + (void)ubcore_delete_jetty(jetty); +err_put: + if (!IS_ERR(jfr_uobj)) + uobj_put_read(jfr_uobj); + if (!IS_ERR(recv_jfc_uobj)) + uobj_put_read(recv_jfc_uobj); + if (!IS_ERR(send_jfc_uobj)) + uobj_put_read(send_jfc_uobj); + uobj_alloc_abort(&jetty_uobj->uobj); + return ret; +} + +static int uburma_cmd_modify_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jetty arg; + struct uburma_uobj *uobj; + struct ubcore_jetty_attr attr = { 0 }; + struct ubcore_jetty *jetty; + struct ubcore_udata udata; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_modify_jetty)); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.rx_threshold = arg.in.rx_threshold; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JETTY, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jetty.\n"); + return -EINVAL; + } + + jetty = (struct ubcore_jetty *)uobj->object; + ret = ubcore_modify_jetty(jetty, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jetty failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_modify_jetty)); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_delete_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jetty arg; + struct uburma_jetty_uobj *jetty_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_delete_jetty)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JETTY, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jetty"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jetty_uobj = container_of(uobj, struct uburma_jetty_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jetty failed, ret:%d.\n", ret); + uobj_put(uobj); + return ret; + } + + arg.out.async_events_reported = jetty_uobj->async_events_reported; + uobj_put(uobj); + return uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_delete_jetty)); +} + +static int uburma_cmd_create_jfce(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfce arg; + struct uburma_jfce_uobj *jfce; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_create_jfce)); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_JFCE, file); + if (IS_ERR(uobj)) + return PTR_ERR(uobj); + + jfce = container_of(uobj, struct uburma_jfce_uobj, uobj); + uburma_init_jfe(&jfce->jfe); + + arg.out.fd = uobj->id; /* should get fd before commit uobj */ + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_create_jfce)); + if (ret != 0) { + uobj_alloc_abort(uobj); + return ret; + } + + uobj_alloc_commit(uobj); + return ret; +} + +static int uburma_cmd_import_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_import_jfr arg; + struct ubcore_tjetty_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_tjetty *tjfr; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_import_jfr)); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_JFR, file); + if (IS_ERR(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JFR alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.id.uasid = arg.in.uasid; + cfg.id.id = arg.in.id; + cfg.ukey.key = arg.in.key; + cfg.trans_mode = arg.in.trans_mode; + fill_udata(&udata, file->ucontext, &arg.udata); + + tjfr = ubcore_import_jfr(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(tjfr)) { + uburma_log_err("ubcore_import_jfr failed.\n"); + uobj_alloc_abort(uobj); + return -EPERM; + } + + uobj->object = tjfr; + arg.out.handle = uobj->id; + if (tjfr->tp != NULL) { + arg.out.tp_type = 1; + arg.out.tpn = tjfr->tp->tpn; + } else { + arg.out.tpn = UBURMA_INVALID_TPN; + } + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_import_jfr)); + if (ret != 0) { + ubcore_unimport_jfr(tjfr); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return 0; +} + +static int uburma_cmd_unimport_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unimport_jfr arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_unimport_jfr)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_TARGET_JFR, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find tjfr"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore_unimport_jfr failed.\n"); + return ret; +} + +static int uburma_cmd_import_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_import_jetty arg; + struct ubcore_tjetty_cfg cfg = { 0 }; + struct ubcore_tjetty *tjetty; + struct ubcore_udata udata; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_import_jetty)); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_JETTY, file); + if (IS_ERR(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JETTY alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.id.uasid = arg.in.uasid; + cfg.id.id = arg.in.id; + cfg.ukey.key = arg.in.key; + cfg.trans_mode = (enum ubcore_transport_mode)arg.in.trans_mode; + fill_udata(&udata, file->ucontext, &arg.udata); + + tjetty = ubcore_import_jetty(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(tjetty)) { + uburma_log_err("ubcore_import_jetty failed.\n"); + uobj_alloc_abort(uobj); + return -EPERM; + } + + uobj->object = tjetty; + arg.out.handle = uobj->id; + if (tjetty->tp != NULL) { + arg.out.tp_type = 1; + arg.out.tpn = tjetty->tp->tpn; + } else { + arg.out.tpn = UBURMA_INVALID_TPN; + } + + ret = uburma_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct uburma_cmd_import_jetty)); + if (ret != 0) { + (void)ubcore_unimport_jetty(tjetty); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return 0; +} + +static int uburma_cmd_unimport_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unimport_jetty arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_unimport_jetty)); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_TARGET_JETTY, arg.in.handle, file); + if (IS_ERR(uobj)) { + uburma_log_err("failed to find tjetty"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore_unimport_jetty failed.\n"); + return ret; +} + +static int uburma_get_jetty_tjetty_objs(struct uburma_file *file, uint64_t jetty_handle, + uint64_t tjetty_handle, struct uburma_uobj **jetty_uobj, + struct uburma_uobj **tjetty_uobj) +{ + *jetty_uobj = uobj_get_read(UOBJ_CLASS_JETTY, jetty_handle, file); + if (IS_ERR(*jetty_uobj)) { + uburma_log_err("failed to find jetty with handle %llu", jetty_handle); + return -EINVAL; + } + + *tjetty_uobj = uobj_get_read(UOBJ_CLASS_TARGET_JETTY, tjetty_handle, file); + if (IS_ERR(*tjetty_uobj)) { + uobj_put_read(*jetty_uobj); + uburma_log_err("failed to find target jetty with handle %llu", tjetty_handle); + return -EINVAL; + } + return 0; +} + +static inline void uburma_put_jetty_tjetty_objs(struct uburma_uobj *jetty_uobj, + struct uburma_uobj *tjetty_uobj) +{ + uobj_put_read(jetty_uobj); + uobj_put_read(tjetty_uobj); +} + +static int uburma_get_jfs_tjfr_objs(struct uburma_file *file, uint64_t jetty_handle, + uint64_t tjetty_handle, struct uburma_uobj **jetty_uobj, + struct uburma_uobj **tjetty_uobj) +{ + *jetty_uobj = uobj_get_read(UOBJ_CLASS_JFS, jetty_handle, file); + if (IS_ERR(*jetty_uobj)) { + uburma_log_err("failed to find jfs with handle %llu", jetty_handle); + return -EINVAL; + } + + *tjetty_uobj = uobj_get_read(UOBJ_CLASS_TARGET_JFR, tjetty_handle, file); + if (IS_ERR(*tjetty_uobj)) { + uobj_put_read(*jetty_uobj); + uburma_log_err("failed to find target jfr with handle %llu", tjetty_handle); + return -EINVAL; + } + return 0; +} + +static inline void uburma_put_jfs_tjfr_objs(struct uburma_uobj *jetty_uobj, + struct uburma_uobj *tjetty_uobj) +{ + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); +} + +static int uburma_cmd_advise_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_advise_jetty arg; + struct uburma_uobj *tjfr_uobj; + struct uburma_uobj *jfs_uobj; + struct ubcore_udata udata; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_advise_jetty)); + if (ret != 0) + return ret; + + if (uburma_get_jfs_tjfr_objs(file, arg.in.jetty_handle, arg.in.tjetty_handle, &jfs_uobj, + &tjfr_uobj)) + return -EINVAL; + + fill_udata(&udata, file->ucontext, &arg.udata); + + ret = ubcore_advise_jfr(jfs_uobj->object, tjfr_uobj->object, &udata); + if (ret != 0) + uburma_log_err("advise jfr failed.\n"); + + uburma_put_jfs_tjfr_objs(jfs_uobj, tjfr_uobj); + return ret; +} + +static int uburma_cmd_unadvise_jfr(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unadvise_jetty arg; + struct uburma_uobj *tjfr_uobj; + struct uburma_uobj *jfs_uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_unadvise_jetty)); + if (ret != 0) + return ret; + + if (uburma_get_jfs_tjfr_objs(file, arg.in.jetty_handle, arg.in.tjetty_handle, &jfs_uobj, + &tjfr_uobj)) + return -EINVAL; + + ret = ubcore_unadvise_jfr(jfs_uobj->object, tjfr_uobj->object); + if (ret != 0) + uburma_log_err("failed to unadvise jfr.\n"); + + uburma_put_jfs_tjfr_objs(jfs_uobj, tjfr_uobj); + return ret; +} + +static int uburma_cmd_advise_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_advise_jetty arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + struct ubcore_udata udata; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_advise_jetty)); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, arg.in.tjetty_handle, + &jetty_uobj, &tjetty_uobj)) + return -EINVAL; + + fill_udata(&udata, file->ucontext, &arg.udata); + + ret = ubcore_advise_jetty(jetty_uobj->object, tjetty_uobj->object, &udata); + if (ret != 0) + uburma_log_err("advise_jetty failed.\n"); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_unadvise_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unadvise_jetty arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_unadvise_jetty)); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, arg.in.tjetty_handle, + &jetty_uobj, &tjetty_uobj)) + return -EINVAL; + + ret = ubcore_unadvise_jetty(jetty_uobj->object, tjetty_uobj->object); + if (ret != 0) + uburma_log_err("failed to unadvise jetty, ret: %d.\n", ret); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_bind_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_advise_jetty arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + struct ubcore_udata udata; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_advise_jetty)); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, arg.in.tjetty_handle, + &jetty_uobj, &tjetty_uobj)) + return -EINVAL; + + fill_udata(&udata, file->ucontext, &arg.udata); + + ret = ubcore_bind_jetty(jetty_uobj->object, tjetty_uobj->object, &udata); + if (ret != 0) + uburma_log_err("bind jetty failed.\n"); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_unbind_jetty(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unadvise_jetty arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + int ret; + + ret = uburma_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_unadvise_jetty)); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, arg.in.tjetty_handle, + &jetty_uobj, &tjetty_uobj)) + return -EINVAL; + + ret = ubcore_unbind_jetty(jetty_uobj->object, tjetty_uobj->object); + if (ret != 0) + uburma_log_err("failed to unbind jetty, ret: %d.\n", ret); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_fill_user_ctl_info(struct ubcore_ucontext *ctx, + struct uburma_cmd_user_ctl *user_ctl, + struct ubcore_user_ctl *k_user_ctl) +{ + if (ctx == NULL) { + uburma_log_err("parameter invalid with ctx nullptr.\n"); + return -EINVAL; + } + + k_user_ctl->uctx = ctx; + k_user_ctl->in.addr = user_ctl->in.addr; + k_user_ctl->in.len = user_ctl->in.len; + k_user_ctl->in.opcode = user_ctl->in.opcode; + + k_user_ctl->out.addr = user_ctl->out.addr; + k_user_ctl->out.len = user_ctl->out.len; + + k_user_ctl->udrv_data.in_addr = user_ctl->udrv.in_addr; + k_user_ctl->udrv_data.in_len = user_ctl->udrv.in_len; + k_user_ctl->udrv_data.out_addr = user_ctl->udrv.out_addr; + k_user_ctl->udrv_data.out_len = user_ctl->udrv.out_len; + + return 0; +} + +static int uburma_cmd_user_ctl(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_user_ctl k_user_ctl = { 0 }; + struct uburma_cmd_user_ctl user_ctl; + int ret; + + ret = uburma_copy_from_user(&user_ctl, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct uburma_cmd_user_ctl)); + if (ret != 0) + return ret; + + ret = uburma_fill_user_ctl_info(file->ucontext, &user_ctl, &k_user_ctl); + if (ret != 0) + return ret; + + ret = ubcore_user_control(&k_user_ctl); + if (ret != 0) + return ret; + + return 0; +} + +typedef int (*uburma_cmd_handler)(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr); + +static uburma_cmd_handler g_uburma_cmd_handlers[] = { + [0] = NULL, + [UBURMA_CMD_CREATE_CTX] = uburma_cmd_create_ctx, + [UBURMA_CMD_DESTROY_CTX] = uburma_cmd_destroy_ctx, + [UBURMA_CMD_ALLOC_KEY_ID] = uburma_cmd_alloc_key_id, + [UBURMA_CMD_FREE_KEY_ID] = uburma_cmd_free_key_id, + [UBURMA_CMD_REGISTER_SEG] = uburma_cmd_register_seg, + [UBURMA_CMD_UNREGISTER_SEG] = uburma_cmd_unregister_seg, + [UBURMA_CMD_IMPORT_SEG] = uburma_cmd_import_seg, + [UBURMA_CMD_UNIMPORT_SEG] = uburma_cmd_unimport_seg, + [UBURMA_CMD_CREATE_JFR] = uburma_cmd_create_jfr, + [UBURMA_CMD_MODIFY_JFR] = uburma_cmd_modify_jfr, + [UBURMA_CMD_DELETE_JFR] = uburma_cmd_delete_jfr, + [UBURMA_CMD_CREATE_JFS] = uburma_cmd_create_jfs, + [UBURMA_CMD_DELETE_JFS] = uburma_cmd_delete_jfs, + [UBURMA_CMD_CREATE_JFC] = uburma_cmd_create_jfc, + [UBURMA_CMD_MODIFY_JFC] = uburma_cmd_modify_jfc, + [UBURMA_CMD_DELETE_JFC] = uburma_cmd_delete_jfc, + [UBURMA_CMD_CREATE_JFCE] = uburma_cmd_create_jfce, + [UBURMA_CMD_IMPORT_JFR] = uburma_cmd_import_jfr, + [UBURMA_CMD_UNIMPORT_JFR] = uburma_cmd_unimport_jfr, + [UBURMA_CMD_CREATE_JETTY] = uburma_cmd_create_jetty, + [UBURMA_CMD_MODIFY_JETTY] = uburma_cmd_modify_jetty, + [UBURMA_CMD_DELETE_JETTY] = uburma_cmd_delete_jetty, + [UBURMA_CMD_IMPORT_JETTY] = uburma_cmd_import_jetty, + [UBURMA_CMD_UNIMPORT_JETTY] = uburma_cmd_unimport_jetty, + [UBURMA_CMD_ADVISE_JFR] = uburma_cmd_advise_jfr, + [UBURMA_CMD_UNADVISE_JFR] = uburma_cmd_unadvise_jfr, + [UBURMA_CMD_ADVISE_JETTY] = uburma_cmd_advise_jetty, + [UBURMA_CMD_UNADVISE_JETTY] = uburma_cmd_unadvise_jetty, + [UBURMA_CMD_BIND_JETTY] = uburma_cmd_bind_jetty, + [UBURMA_CMD_UNBIND_JETTY] = uburma_cmd_unbind_jetty, + [UBURMA_CMD_USER_CTL] = uburma_cmd_user_ctl +}; + +static int uburma_cmd_parse(struct ubcore_device *ubc_dev, struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + if (hdr->command < UBURMA_CMD_CREATE_CTX || hdr->command > UBURMA_CMD_USER_CTL || + g_uburma_cmd_handlers[hdr->command] == NULL) { + uburma_log_err("bad uburma command: %d.\n", (int)hdr->command); + return -EINVAL; + } + return g_uburma_cmd_handlers[hdr->command](ubc_dev, file, hdr); +} + +long uburma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct uburma_cmd_hdr *user_hdr = (struct uburma_cmd_hdr *)arg; + struct uburma_file *file = filp->private_data; + struct uburma_device *ubu_dev = file->ubu_dev; + struct ubcore_device *ubc_dev; + struct uburma_cmd_hdr hdr; + int srcu_idx; + long ret; + + uburma_cmd_inc(ubu_dev); + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (!ubc_dev) { + uburma_log_err("can not find ubcore device.\n"); + ret = -EIO; + goto srcu_unlock; + } + + if (cmd == UBURMA_CMD) { + ret = (long)copy_from_user(&hdr, user_hdr, sizeof(struct uburma_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBURMA_CMD_MAX_ARGS_SIZE) || + (hdr.command > UBURMA_CMD_CREATE_CTX && file->ucontext == NULL)) { + uburma_log_err( + "invalid input, hdr.command: %d, ret:%ld, hdr.args_len: %d\n", + hdr.command, ret, hdr.args_len); + ret = -EINVAL; + } else { + ret = (long)uburma_cmd_parse(ubc_dev, file, &hdr); + } + } else { + uburma_log_err("bad ioctl command.\n"); + ret = -ENOIOCTLCMD; + } + +srcu_unlock: + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + uburma_cmd_dec(ubu_dev); + return ret; +} diff --git a/drivers/ub/urma/uburma/uburma_cmd.h b/drivers/ub/urma/uburma/uburma_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..631ba2dd103d0c9271e894182233c34c8a84f79e --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cmd.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma cmd header file + * Author: Qian Guoxin + * Create: 2023-2-28 + * Note: + * History: 2023-2-28: Create file + */ + +#ifndef UBURMA_CMD_H +#define UBURMA_CMD_H +#include +#include + +#include "uburma_types.h" + +struct uburma_cmd_hdr { + uint32_t command; + uint32_t args_len; + uint64_t args_addr; +}; + +#define UBURMA_CMD_MAX_ARGS_SIZE 4096 + +/* only for uburma device ioctl */ +#define UBURMA_CMD_MAGIC 'U' +#define UBURMA_CMD _IOWR(UBURMA_CMD_MAGIC, 1, struct uburma_cmd_hdr) + +enum uburma_cmd { + UBURMA_CMD_CREATE_CTX = 1, + UBURMA_CMD_DESTROY_CTX, + UBURMA_CMD_ALLOC_KEY_ID, + UBURMA_CMD_FREE_KEY_ID, + UBURMA_CMD_REGISTER_SEG, + UBURMA_CMD_UNREGISTER_SEG, + UBURMA_CMD_IMPORT_SEG, + UBURMA_CMD_UNIMPORT_SEG, + UBURMA_CMD_CREATE_JFS, + UBURMA_CMD_DELETE_JFS, + UBURMA_CMD_CREATE_JFR, + UBURMA_CMD_MODIFY_JFR, + UBURMA_CMD_DELETE_JFR, + UBURMA_CMD_CREATE_JFC, + UBURMA_CMD_MODIFY_JFC, + UBURMA_CMD_DELETE_JFC, + UBURMA_CMD_CREATE_JFCE, + UBURMA_CMD_IMPORT_JFR, + UBURMA_CMD_UNIMPORT_JFR, + UBURMA_CMD_CREATE_JETTY, + UBURMA_CMD_MODIFY_JETTY, + UBURMA_CMD_DELETE_JETTY, + UBURMA_CMD_IMPORT_JETTY, + UBURMA_CMD_UNIMPORT_JETTY, + UBURMA_CMD_ADVISE_JFR, + UBURMA_CMD_UNADVISE_JFR, + UBURMA_CMD_ADVISE_JETTY, + UBURMA_CMD_UNADVISE_JETTY, + UBURMA_CMD_BIND_JETTY, + UBURMA_CMD_UNBIND_JETTY, + UBURMA_CMD_USER_CTL +}; + +struct uburma_cmd_udrv_priv { + uint64_t in_addr; + uint32_t in_len; + uint64_t out_addr; + uint32_t out_len; +}; + +struct uburma_cmd_create_ctx { + struct { + uint32_t uasid; + } in; + struct { + int async_fd; + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_alloc_key_id { + struct { + uint32_t key_id; + uint64_t handle; /* handle of the allocated key_id obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_free_key_id { + struct { + uint64_t handle; /* handle of the allocated key_id obj in kernel */ + } in; +}; + +struct uburma_cmd_register_seg { + struct { + uint64_t va; + uint64_t len; + uint32_t key_id; + uint64_t keyid_handle; + uint32_t key; + uint32_t flag; + } in; + struct { + uint32_t key_id; + uint64_t handle; /* handle of the allocated seg obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unregister_seg { + struct { + uint64_t handle; /* handle of seg, used to find seg obj in kernel */ + } in; +}; + +struct uburma_cmd_import_seg { + struct { + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t uasid; + uint64_t va; + uint64_t len; + uint32_t flag; + uint32_t key; + uint32_t key_id; + uint64_t mva; + } in; + struct { + uint64_t handle; /* handle of the allocated tseg obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unimport_seg { + struct { + uint64_t handle; /* handle of the seg to be unimported */ + } in; +}; + +struct uburma_cmd_create_jfr { + struct { + uint32_t depth; /* in terms of WQEBB */ + uint32_t flag; + uint32_t trans_mode; + uint8_t max_sge; + uint8_t min_rnr_timer; + uint32_t jfc_id; + uint64_t jfc_handle; + uint32_t key; + uint32_t id; + uint64_t urma_jfr; /* urma jfr pointer */ + } in; + struct { + uint32_t id; + uint32_t depth; + uint8_t max_sge; + uint64_t handle; /* handle of the allocated jfr obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_modify_jfr { + struct { + uint64_t handle; /* handle of jfr, used to find jfr obj in kernel */ + uint32_t mask; /* see urma_jfr_attr_mask_t */ + uint32_t rx_threshold; + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_delete_jfr { + struct { + uint64_t handle; /* handle of jfr, used to find jfr obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_create_jfs { + struct { + uint32_t depth; /* in terms of WQEBB */ + uint32_t flag; + uint32_t trans_mode; + uint8_t priority; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + uint32_t jfc_id; + uint64_t jfc_handle; + uint64_t urma_jfs; /* urma jfs pointer */ + } in; + struct { + uint32_t id; + uint32_t depth; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint64_t handle; /* handle of the allocated jfs obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_delete_jfs { + struct { + uint64_t handle; /* handle of jfs, used to find jfs obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_create_jfc { + struct { + uint32_t depth; /* in terms of CQEBB */ + uint32_t flag; + int jfce_fd; + uint64_t urma_jfc; /* urma jfc pointer */ + } in; + struct { + uint32_t id; + uint32_t depth; + uint64_t handle; /* handle of the allocated jfc obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_modify_jfc { + struct { + uint64_t handle; /* handle of jfc, used to find jfc obj in kernel */ + uint32_t mask; /* see urma_jfc_attr_mask_t */ + uint16_t moderate_count; + uint16_t moderate_period; /* in micro seconds */ + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_delete_jfc { + struct { + uint64_t handle; /* handle of jfc, used to find jfc obj in kernel */ + } in; + struct { + uint32_t comp_events_reported; + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_create_jfce { + struct { + int fd; + } out; +}; + +struct uburma_cmd_import_jfr { + struct { + /* correspond to urma_jfr_id */ + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t uasid; + uint32_t id; + /* correspond to urma_key_t */ + uint32_t key; + uint32_t trans_mode; + } in; + struct { + uint8_t tp_type; /* TP or TPG */ + uint32_t tpn; + uint64_t handle; /* handle of the allocated tjfr obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unimport_jfr { + struct { + uint64_t handle; /* handle of tjfr, used to find tjfr obj in kernel */ + } in; +}; + +struct uburma_cmd_create_jetty { + struct { + uint32_t id; /* user may assign id */ + uint32_t jfs_depth; + uint32_t jfr_depth; + uint32_t flag; + uint32_t trans_mode; + uint32_t send_jfc_id; + uint32_t recv_jfc_id; + uint32_t jfr_id; /* shared jfr */ + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint8_t max_recv_sge; + uint32_t max_inline_data; + uint8_t priority; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + uint8_t min_rnr_timer; + uint32_t key; + uint64_t send_jfc_handle; /* handle of the related send jfc */ + uint64_t recv_jfc_handle; /* handle of the related recv jfc */ + uint64_t jfr_handle; /* handle of the shared jfr */ + uint64_t urma_jetty; /* urma jetty pointer */ + } in; + struct { + uint32_t id; /* jetty id allocated by ubcore */ + uint64_t handle; /* handle of the allocated jetty obj in kernel */ + uint32_t jfs_depth; + uint32_t jfr_depth; + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint8_t max_recv_sge; + uint32_t max_inline_data; + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_modify_jetty { + struct { + uint64_t handle; /* handle of jetty, used to find jetty obj in kernel */ + uint32_t mask; /* see urma_jetty_attr_mask_t */ + uint32_t rx_threshold; + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_delete_jetty { + struct { + uint64_t handle; /* handle of jetty, used to find jetty obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_import_jetty { + struct { + /* correspond to urma_jetty_id */ + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t uasid; + uint32_t id; + uint32_t flag; + /* correspond to urma_key_t */ + uint32_t key; + uint32_t trans_mode; + } in; + struct { + uint8_t tp_type; /* TP or TPG */ + uint32_t tpn; + uint64_t handle; /* handle of the allocated tjetty obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unimport_jetty { + struct { + uint64_t handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; +}; + +struct uburma_cmd_advise_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unadvise_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; +}; + +struct uburma_cmd_user_ctl { + struct { + uint64_t addr; + uint32_t len; + uint32_t opcode; + } in; /* struct [in] should be consistent with [urma_user_ctl_in_t] */ + struct { + uint64_t addr; + uint32_t len; + uint32_t rsv; + } out; /* struct [out] should be consistent with [urma_user_ctl_out_t] */ + struct { + uint64_t in_addr; + uint32_t in_len; + uint64_t out_addr; + uint32_t out_len; + } udrv; /* struct [udrv] should be consistent with [urma_udrv_t] */ +}; + +/* only for event ioctl */ +#define MAX_JFCE_EVENT_CNT 16 +#define UBURMA_EVENT_CMD_MAGIC 'E' +#define JFCE_CMD_WAIT_EVENT 0 +#define JFAE_CMD_GET_ASYNC_EVENT 0 +#define UBURMA_CMD_WAIT_JFC \ + _IOWR(UBURMA_EVENT_CMD_MAGIC, JFCE_CMD_WAIT_EVENT, struct uburma_cmd_jfce_wait) +#define UBURMA_CMD_GET_ASYNC_EVENT \ + _IOWR(UBURMA_EVENT_CMD_MAGIC, JFAE_CMD_GET_ASYNC_EVENT, struct uburma_cmd_async_event) + +struct uburma_cmd_jfce_wait { + struct { + uint32_t max_event_cnt; + int time_out; + } in; + struct { + uint32_t event_cnt; + uint64_t event_data[MAX_JFCE_EVENT_CNT]; + } out; +}; + +struct uburma_cmd_async_event { + uint32_t event_type; + uint64_t event_data; + uint32_t pad; +}; + +/* copy from user_space addr to kernel args */ +static inline int uburma_copy_from_user(void *args, const void *args_addr, unsigned long args_size) +{ + int ret = (int)copy_from_user(args, args_addr, args_size); + + if (ret != 0) { + uburma_log_err("copy from user failed, ret:%d.\n", ret); + return -EFAULT; + } + return 0; +} + +/* copy kernel args to user_space addr */ +static inline int uburma_copy_to_user(void *args_addr, const void *args, unsigned long args_size) +{ + int ret = (int)copy_to_user(args_addr, args, args_size); + + if (ret != 0) { + uburma_log_err("copy to user failed ret:%d.\n", ret); + return -EFAULT; + } + return 0; +} + +void uburma_cmd_inc(struct uburma_device *ubu_dev); +void uburma_cmd_dec(struct uburma_device *ubu_dev); +void uburma_cmd_flush(struct uburma_device *ubu_dev); + +#endif /* UBURMA_CMD_H */ diff --git a/drivers/ub/urma/uburma/uburma_dev_ops.c b/drivers/ub/urma/uburma/uburma_dev_ops.c new file mode 100644 index 0000000000000000000000000000000000000000..625193d2a04abc43eea34fd2e83f4c24fbceff5f --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_dev_ops.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma device ops file + * Author: Qian Guoxin + * Create: 2021-08-04 + * Note: + * History: 2021-08-04: Create file + */ + +#include +#include +#include + +#include +#include + +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_uobj.h" +#include "uburma_cmd.h" + +int uburma_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct uburma_file *file = filp->private_data; + struct uburma_device *ubu_dev; + struct ubcore_device *ubc_dev; + int srcu_idx; + int ret; + + if (file == NULL || file->ucontext == NULL) { + uburma_log_err("can not find ucontext.\n"); + return -EINVAL; + } + + ubu_dev = file->ubu_dev; + uburma_cmd_inc(ubu_dev); + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL || ubc_dev->ops == NULL || ubc_dev->ops->mmap == NULL) { + uburma_log_err("can not find ubcore device.\n"); + ret = -ENODEV; + goto out; + } + + ret = ubc_dev->ops->mmap(file->ucontext, vma); + +out: + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + uburma_cmd_dec(ubu_dev); + return ret; +} + +void uburma_release_file(struct kref *ref) +{ + struct uburma_file *file = container_of(ref, struct uburma_file, ref); + int srcu_idx; + + srcu_idx = srcu_read_lock(&file->ubu_dev->ubc_dev_srcu); + srcu_dereference(file->ubu_dev->ubc_dev, &file->ubu_dev->ubc_dev_srcu); + + srcu_read_unlock(&file->ubu_dev->ubc_dev_srcu, srcu_idx); + + if (atomic_dec_and_test(&file->ubu_dev->refcnt)) + complete(&file->ubu_dev->comp); + + kobject_put(&file->ubu_dev->kobj); + kfree(file); +} + +int uburma_open(struct inode *inode, struct file *filp) +{ + struct uburma_device *ubu_dev; + struct ubcore_device *ubc_dev; + struct uburma_file *file; + int srcu_idx; + int ret; + + ubu_dev = container_of(inode->i_cdev, struct uburma_device, cdev); + if (!atomic_inc_not_zero(&ubu_dev->refcnt)) { + uburma_log_err("device was not ready.\n"); + return -ENXIO; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + mutex_lock(&ubu_dev->lists_mutex); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL) { + uburma_log_err("can not find ubcore device.\n"); + ret = EIO; + goto err; + } + + file = kzalloc(sizeof(struct uburma_file), GFP_KERNEL); + if (!file) { + ret = -ENOMEM; + uburma_log_err("can not alloc memory.\n"); + goto err; + } + + file->ubu_dev = ubu_dev; + file->ucontext = NULL; + kref_init(&file->ref); + mutex_init(&file->mutex); + uburma_init_uobj_context(file); + filp->private_data = file; + + list_add_tail(&file->list, &ubu_dev->uburma_file_list); + kobject_get(&ubu_dev->kobj); // Increase reference count for file. + + mutex_unlock(&ubu_dev->lists_mutex); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + + uburma_log_info("device: %s open succeed.\n", ubc_dev->dev_name); + return nonseekable_open(inode, filp); + +err: + mutex_unlock(&ubu_dev->lists_mutex); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + if (atomic_dec_and_test(&ubu_dev->refcnt)) + complete(&ubu_dev->comp); + return ret; +} + +int uburma_close(struct inode *inode, struct file *filp) +{ + struct uburma_file *file = filp->private_data; + + mutex_lock(&file->mutex); + uburma_cleanup_uobjs(file, UBURMA_REMOVE_CLOSE); + if (file->ucontext) { + ubcore_free_ucontext(file->ubu_dev->ubc_dev, file->ucontext); + file->ucontext = NULL; + } + mutex_unlock(&file->mutex); + + mutex_lock(&file->ubu_dev->lists_mutex); + if (file->is_closed == 0) { + list_del(&file->list); + file->is_closed = 1; + } + mutex_unlock(&file->ubu_dev->lists_mutex); + + kref_put(&file->ref, uburma_release_file); + + return 0; +} diff --git a/drivers/ub/urma/uburma/uburma_event.c b/drivers/ub/urma/uburma/uburma_event.c new file mode 100644 index 0000000000000000000000000000000000000000..b8ef510968e86258ad3d73f75dd844c4b245711e --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_event.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma event implementation + * Author: Yan Fangfang + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: create file + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_cmd.h" +#include "uburma_uobj.h" +#include "uburma_event.h" + +#define UBURMA_JFCE_DELETE_EVENT 0 +struct uburma_jfe_event { + struct list_head node; + uint32_t event_type; /* support async event */ + uint64_t event_data; + struct list_head obj_node; + uint32_t *counter; +}; + +struct uburma_jfce_uobj *uburma_get_jfce_uobj(int fd, struct uburma_file *ufile) +{ + struct uburma_uobj *uobj; + struct uburma_jfce_uobj *jfce; + + if (fd < 0) + return ERR_PTR(-ENOENT); + + uobj = uobj_get_read(UOBJ_CLASS_JFCE, fd, ufile); + if (IS_ERR(uobj)) { + uburma_log_err("get jfce uobj fail with fd %d\n", fd); + return (void *)uobj; + } + + jfce = container_of(uobj, struct uburma_jfce_uobj, uobj); + uobj_get(uobj); // To keep the event file until jfce destroy. + uobj_put_read(uobj); + return jfce; +} + +void uburma_write_event(struct uburma_jfe *jfe, uint64_t event_data, uint32_t event_type, + struct list_head *obj_event_list, uint32_t *counter) +{ + struct uburma_jfe_event *event; + unsigned long flags; + + spin_lock_irqsave(&jfe->lock, flags); + if (jfe->deleting) { + spin_unlock_irqrestore(&jfe->lock, flags); + return; + } + event = kmalloc(sizeof(struct uburma_jfe_event), GFP_ATOMIC); + if (event == NULL) { + spin_unlock_irqrestore(&jfe->lock, flags); + return; + } + event->event_data = event_data; + event->event_type = event_type; + event->counter = counter; + + list_add_tail(&event->node, &jfe->event_list); + if (obj_event_list) + list_add_tail(&event->obj_node, obj_event_list); + spin_unlock_irqrestore(&jfe->lock, flags); + wake_up_interruptible(&jfe->poll_wait); +} + +void uburma_jfce_handler(struct ubcore_jfc *jfc) +{ + struct uburma_jfc_uobj *jfc_uobj; + struct uburma_jfce_uobj *jfce; + + if (jfc == NULL) + return; + + rcu_read_lock(); + jfc_uobj = rcu_dereference(jfc->jfc_cfg.jfc_context); + if (jfc_uobj != NULL && !IS_ERR(jfc_uobj) && !IS_ERR(jfc_uobj->jfce)) { + jfce = container_of(jfc_uobj->jfce, struct uburma_jfce_uobj, uobj); + uburma_write_event(&jfce->jfe, jfc->urma_jfc, 0, &jfc_uobj->comp_event_list, + &jfc_uobj->comp_events_reported); + } + + rcu_read_unlock(); +} + +void uburma_uninit_jfe(struct uburma_jfe *jfe) +{ + struct list_head *p, *next; + struct uburma_jfe_event *event; + + spin_lock_irq(&jfe->lock); + list_for_each_safe(p, next, &jfe->event_list) { + event = list_entry(p, struct uburma_jfe_event, node); + if (event->counter) + list_del(&event->obj_node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); +} + +static int uburma_delete_jfce(struct inode *inode, struct file *filp) +{ + struct uburma_uobj *uobj = filp->private_data; + + uobj_get(uobj); + /* will call uburma_hot_unplug_jfce if clean up is not going on */ + uburma_close_uobj_fd(filp); + uobj_put(uobj); + return 0; +} + +/* Read up to event_cnt events from jfe */ +static uint32_t uburma_read_jfe_event(struct uburma_jfe *jfe, uint32_t event_cnt, + struct list_head *event_list) +{ + struct list_head *p, *next; + struct uburma_jfe_event *event; + uint32_t cnt = 0; + + spin_lock_irq(&jfe->lock); + if (jfe->deleting) { + spin_unlock_irq(&jfe->lock); + return 0; + } + list_for_each_safe(p, next, &jfe->event_list) { + if (cnt == event_cnt) + break; + event = list_entry(p, struct uburma_jfe_event, node); + if (event->counter) { + ++(*event->counter); + list_del(&event->obj_node); + } + list_del(p); + list_add_tail(p, event_list); + cnt++; + } + spin_unlock_irq(&jfe->lock); + return cnt; +} + +static int uburma_wait_event_timeout(struct uburma_jfe *jfe, unsigned long max_timeout, + uint32_t max_event_cnt, uint32_t *event_cnt, + struct list_head *event_list) +{ + long timeout = (long)max_timeout; + + *event_cnt = 0; + while (!jfe->deleting) { + asm volatile("" : : : "memory"); + *event_cnt = uburma_read_jfe_event(jfe, max_event_cnt, event_list); + /* Stop waiting once we have read at least one event */ + if (jfe->deleting) + return -EIO; + else if (*event_cnt > 0) + break; + /* + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * the remaining jiffies (at least 1) if the @condition evaluated to true + * before the @timeout elapsed, + * or -%ERESTARTSYS if it was interrupted by a signal. + */ + timeout = wait_event_interruptible_timeout( + jfe->poll_wait, (!list_empty(&jfe->event_list) || jfe->deleting), timeout); + if (timeout <= 0) + return timeout; + } + + return 0; +} + +static int uburma_wait_event(struct uburma_jfe *jfe, bool nonblock, uint32_t max_event_cnt, + uint32_t *event_cnt, struct list_head *event_list) +{ + int ret; + + *event_cnt = 0; + while (!jfe->deleting) { + asm volatile("" : : : "memory"); + *event_cnt = uburma_read_jfe_event(jfe, max_event_cnt, event_list); + /* Stop waiting once we have read at least one event */ + if (jfe->deleting) + return -EIO; + else if (nonblock && *event_cnt == 0) + return 0; + else if (*event_cnt > 0) + break; + /* The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ + ret = wait_event_interruptible(jfe->poll_wait, + (!list_empty(&jfe->event_list) || jfe->deleting)); + if (ret != 0) + return ret; + } + return 0; +} + +static __poll_t uburma_jfe_poll(struct uburma_jfe *jfe, struct file *filp, + struct poll_table_struct *wait) +{ + __poll_t flag = 0; + + poll_wait(filp, &jfe->poll_wait, wait); + + spin_lock_irq(&jfe->lock); + if (!list_empty(&jfe->event_list)) + flag = EPOLLIN | EPOLLRDNORM; + + spin_unlock_irq(&jfe->lock); + + return flag; +} + +static __poll_t uburma_jfce_poll(struct file *filp, struct poll_table_struct *wait) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfce_uobj *jfce = container_of(uobj, struct uburma_jfce_uobj, uobj); + + return uburma_jfe_poll(&jfce->jfe, filp, wait); +} + +static int uburma_jfce_wait(struct uburma_jfce_uobj *jfce, struct file *filp, unsigned long arg) +{ + struct uburma_cmd_jfce_wait we; + struct list_head event_list; + struct uburma_jfe_event *event; + uint32_t max_event_cnt; + uint32_t i = 0; + struct list_head *p, *next; + int ret; + + if (arg == 0) + return -EINVAL; + + if (copy_from_user(&we, (const void __user *)(uintptr_t)arg, + sizeof(struct uburma_cmd_jfce_wait)) != 0) + return -EFAULT; + + /* urma lib ensures that max_event_cnt > 0 */ + max_event_cnt = (we.in.max_event_cnt < MAX_JFCE_EVENT_CNT ? we.in.max_event_cnt : + MAX_JFCE_EVENT_CNT); + INIT_LIST_HEAD(&event_list); + if (we.in.time_out <= 0) { + ret = uburma_wait_event(&jfce->jfe, + (filp->f_flags & O_NONBLOCK) | (we.in.time_out == 0), + max_event_cnt, &we.out.event_cnt, &event_list); + } else { + ret = uburma_wait_event_timeout(&jfce->jfe, msecs_to_jiffies(we.in.time_out), + max_event_cnt, &we.out.event_cnt, &event_list); + } + + if (ret < 0) { + uburma_log_err("Failed to wait jfce event"); + return ret; + } + + list_for_each_safe(p, next, &event_list) { + event = list_entry(p, struct uburma_jfe_event, node); + we.out.event_data[i++] = event->event_data; + list_del(p); + kfree(event); + } + + if (we.out.event_cnt > 0 && copy_to_user((void *)arg, &we, sizeof(we))) + return -EFAULT; + + return 0; +} + +static long uburma_jfce_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + unsigned int nr; + int ret; + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfce_uobj *jfce = container_of(uobj, struct uburma_jfce_uobj, uobj); + + if (_IOC_TYPE(cmd) != UBURMA_EVENT_CMD_MAGIC) + return -EINVAL; + + nr = (unsigned int)_IOC_NR(cmd); + switch (nr) { + case JFCE_CMD_WAIT_EVENT: + ret = uburma_jfce_wait(jfce, filp, arg); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return (long)ret; +} + +const struct file_operations uburma_jfce_fops = { + .owner = THIS_MODULE, + .poll = uburma_jfce_poll, + .release = uburma_delete_jfce, + .unlocked_ioctl = uburma_jfce_ioctl, +}; + +void uburma_init_jfe(struct uburma_jfe *jfe) +{ + spin_lock_init(&jfe->lock); + INIT_LIST_HEAD(&jfe->event_list); + init_waitqueue_head(&jfe->poll_wait); +} + +static int uburma_delete_jfae(struct inode *inode, struct file *filp) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfae_uobj *jfae = container_of(uobj, struct uburma_jfae_uobj, uobj); + + /* todonext: handle uobj == NULL */ + uobj_get(uobj); + /* call uburma_hot_unplug_jfae when cleanup is not going on */ + uburma_close_uobj_fd(filp); + uburma_uninit_jfe(&jfae->jfe); + uobj_put(uobj); + return 0; +} + +static __poll_t uburma_jfae_poll(struct file *filp, struct poll_table_struct *wait) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfae_uobj *jfae = container_of(uobj, struct uburma_jfae_uobj, uobj); + + return uburma_jfe_poll(&jfae->jfe, filp, wait); +} + +static inline void uburma_set_async_event(struct uburma_cmd_async_event *async_event, + const struct uburma_jfe_event *event) +{ + async_event->event_data = event->event_data; + async_event->event_type = event->event_type; +} + +static int uburma_get_async_event(struct uburma_jfae_uobj *jfae, struct file *filp, + unsigned long arg) +{ + struct uburma_cmd_async_event async_event = { 0 }; + struct list_head event_list; + struct uburma_jfe_event *event; + uint32_t event_cnt; + int ret; + + if (arg == 0) + return -EINVAL; + + INIT_LIST_HEAD(&event_list); + ret = uburma_wait_event(&jfae->jfe, filp->f_flags & O_NONBLOCK, 1, &event_cnt, &event_list); + if (ret < 0) + return ret; + + event = list_first_entry(&event_list, struct uburma_jfe_event, node); + uburma_set_async_event(&async_event, event); + list_del(&event->node); + kfree(event); + + if (event_cnt > 0 && copy_to_user((void *)arg, &async_event, sizeof(async_event))) + return -EFAULT; + + return 0; +} + +static long uburma_jfae_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfae_uobj *jfae = container_of(uobj, struct uburma_jfae_uobj, uobj); + unsigned int nr; + int ret; + + if (_IOC_TYPE(cmd) != UBURMA_EVENT_CMD_MAGIC) + return -EINVAL; + + nr = (unsigned int)_IOC_NR(cmd); + switch (nr) { + case JFAE_CMD_GET_ASYNC_EVENT: + ret = uburma_get_async_event(jfae, filp, arg); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return (long)ret; +} + +const struct file_operations uburma_jfae_fops = { + .owner = THIS_MODULE, + .poll = uburma_jfae_poll, + .release = uburma_delete_jfae, + .unlocked_ioctl = uburma_jfae_ioctl, +}; + +static void uburma_async_event_callback(struct ubcore_event *event, + struct ubcore_event_handler *handler) +{ + struct uburma_jfae_uobj *jfae = + container_of(handler, struct uburma_jfae_uobj, event_handler); + + if (WARN_ON(IS_ERR_OR_NULL(jfae))) + return; + + uburma_write_event(&jfae->jfe, event->element.port_id, event->event_type, NULL, NULL); +} + + +static inline void uburma_init_jfae_handler(struct ubcore_event_handler *handler) +{ + INIT_LIST_HEAD(&handler->node); + handler->event_callback = uburma_async_event_callback; +} + + +void uburma_init_jfae(struct uburma_jfae_uobj *jfae, struct ubcore_device *ubc_dev) +{ + uburma_init_jfe(&jfae->jfe); + uburma_init_jfae_handler(&jfae->event_handler); + ubcore_register_event_handler(ubc_dev, &jfae->event_handler); + jfae->dev = ubc_dev; +} + +void uburma_release_comp_event(struct uburma_jfce_uobj *jfce, struct list_head *event_list) +{ + struct uburma_jfe *jfe = &jfce->jfe; + struct uburma_jfe_event *event, *tmp; + + spin_lock_irq(&jfe->lock); + list_for_each_entry_safe(event, tmp, event_list, obj_node) { + list_del(&event->node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); +} + +void uburma_release_async_event(struct uburma_file *ufile, struct list_head *event_list) +{ + struct uburma_jfae_uobj *jfae = ufile->ucontext->jfae; + struct uburma_jfe *jfe = &jfae->jfe; + struct uburma_jfe_event *event, *tmp; + + spin_lock_irq(&jfe->lock); + list_for_each_entry_safe(event, tmp, event_list, obj_node) { + list_del(&event->node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); + uburma_put_jfae(ufile); +} + +int uburma_get_jfae(struct uburma_file *ufile) +{ + struct uburma_jfae_uobj *jfae; + + if (ufile->ucontext == NULL) { + uburma_log_err("ucontext is NULL"); + return -ENODEV; + } + + jfae = ufile->ucontext->jfae; + if (IS_ERR_OR_NULL(jfae)) { + uburma_log_err("Failed to get jfae"); + return -EINVAL; + } + + uobj_get(&jfae->uobj); + return 0; +} + +void uburma_put_jfae(struct uburma_file *ufile) +{ + struct uburma_jfae_uobj *jfae; + + if (ufile->ucontext == NULL) + return; + + jfae = ufile->ucontext->jfae; + if (IS_ERR_OR_NULL(jfae)) + return; + + uobj_put(&jfae->uobj); +} diff --git a/drivers/ub/urma/uburma/uburma_event.h b/drivers/ub/urma/uburma/uburma_event.h new file mode 100644 index 0000000000000000000000000000000000000000..96d92a1d0e5a09c2b2d21446fc442197caeca1ed --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_event.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma event header + * Author: Yan Fangfang + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: create file + */ + +#ifndef UBURMA_EVENT_H +#define UBURMA_EVENT_H + +#include +#include "uburma_uobj.h" + +void uburma_init_jfe(struct uburma_jfe *jfe); +void uburma_uninit_jfe(struct uburma_jfe *jfe); +void uburma_write_event(struct uburma_jfe *jfe, uint64_t event_data, uint32_t event_type, + struct list_head *obj_event_list, uint32_t *counter); + +struct uburma_jfce_uobj *uburma_get_jfce_uobj(int fd, struct uburma_file *ufile); +void uburma_jfce_handler(struct ubcore_jfc *jfc); +void uburma_release_comp_event(struct uburma_jfce_uobj *jfce, struct list_head *event_list); + +void uburma_init_jfae(struct uburma_jfae_uobj *jfae, struct ubcore_device *ubc_dev); +void uburma_release_async_event(struct uburma_file *ufile, struct list_head *event_list); +int uburma_get_jfae(struct uburma_file *ufile); +void uburma_put_jfae(struct uburma_file *ufile); +#endif /* UBURMA_EVENT_H */ diff --git a/drivers/ub/urma/uburma/uburma_file_ops.h b/drivers/ub/urma/uburma/uburma_file_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..62a1a10399bf763e717a3929daea01e9a1d182cb --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_file_ops.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma device file ops file + * Author: Qian Guoxin + * Create: 2021-8-4 + * Note: + * History: 2021-8-4: Create file + */ + +#ifndef UBURMA_FILE_OPS_H +#define UBURMA_FILE_OPS_H + +#include +#include +#include + +void uburma_release_file(struct kref *ref); +int uburma_mmap(struct file *filp, struct vm_area_struct *vma); +int uburma_open(struct inode *inode, struct file *filp); +int uburma_close(struct inode *inode, struct file *filp); +long uburma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#endif /* UBURMA_FILE_OPS_H */ diff --git a/drivers/ub/urma/uburma/uburma_main.c b/drivers/ub/urma/uburma/uburma_main.c index 06c0c0c5b0393ea8a3f39597d60278b9b4369426..ce013da5e8ab9cb96558f98b94a3760c40051a32 100644 --- a/drivers/ub/urma/uburma/uburma_main.c +++ b/drivers/ub/urma/uburma/uburma_main.c @@ -34,7 +34,10 @@ #include "uburma_log.h" #include "uburma_types.h" +#include "uburma_file_ops.h" #include "uburma_cdev_file.h" +#include "uburma_uobj.h" +#include "uburma_cmd.h" #define UBURMA_MAX_DEVICE 1024 #define UBURMA_DYNAMIC_MINOR_NUM UBURMA_MAX_DEVICE @@ -49,7 +52,12 @@ static struct class *g_uburma_class; static const struct file_operations g_uburma_fops = { .owner = THIS_MODULE, // .write = uburma_write, + .mmap = uburma_mmap, + .open = uburma_open, + .release = uburma_close, .llseek = no_llseek, + .unlocked_ioctl = uburma_ioctl, + .compat_ioctl = uburma_ioctl, }; static int uburma_add_device(struct ubcore_device *ubc_dev); @@ -88,6 +96,8 @@ static int uburma_get_devt(dev_t *devt) static int uburma_device_create(struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) { + uint8_t i, j, k; + /* create /dev/uburma/dev_name> */ ubu_dev->dev = device_create(g_uburma_class, ubc_dev->dev.parent, ubu_dev->cdev.dev, ubu_dev, "%s", ubc_dev->dev_name); @@ -101,8 +111,28 @@ static int uburma_device_create(struct uburma_device *ubu_dev, struct ubcore_dev goto destroy_dev; } + /* create /dev/uburma/dev_name>/port* */ + for (i = 0; i < ubc_dev->attr.port_cnt; i++) { + if (uburma_create_port_attr_files(ubu_dev, i) != 0) + goto err_port_attr; + } + + /* create /dev/uburma/dev_name>/vf* */ + for (k = 0; k < ubc_dev->attr.vf_cnt; k++) { + if (uburma_create_vf_attr_files(ubu_dev, k) != 0) + goto err_vf_attr; + } + return 0; +err_vf_attr: + for (j = 0; j < k; j++) + uburma_remove_vf_attr_files(ubu_dev, j); +err_port_attr: + for (j = 0; j < i; j++) + uburma_remove_port_attr_files(ubu_dev, j); + + uburma_remove_dev_attr_files(ubu_dev); destroy_dev: device_destroy(g_uburma_class, ubu_dev->cdev.dev); return -EPERM; @@ -111,6 +141,15 @@ static int uburma_device_create(struct uburma_device *ubu_dev, struct ubcore_dev static void uburma_device_destroy(struct uburma_device *ubu_dev, const struct ubcore_device *ubc_dev) { + uint8_t i; + + for (i = 0; i < ubc_dev->attr.vf_cnt; i++) + uburma_remove_vf_attr_files(ubu_dev, i); + + for (i = 0; i < ubc_dev->attr.port_cnt; i++) + uburma_remove_port_attr_files(ubu_dev, i); + + uburma_remove_dev_attr_files(ubu_dev); device_destroy(g_uburma_class, ubu_dev->cdev.dev); } @@ -193,6 +232,38 @@ static int uburma_add_device(struct ubcore_device *ubc_dev) return -EPERM; } +static void uburma_free_ucontext(struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) +{ + struct uburma_file *file; + + rcu_assign_pointer(ubu_dev->ubc_dev, NULL); + synchronize_srcu(&ubu_dev->ubc_dev_srcu); + + mutex_lock(&ubu_dev->lists_mutex); + while (list_empty(&ubu_dev->uburma_file_list) == false) { + struct ubcore_ucontext *ucontext; + + file = list_first_entry(&ubu_dev->uburma_file_list, struct uburma_file, list); + file->is_closed = true; + list_del(&file->list); + kref_get(&file->ref); + mutex_unlock(&ubu_dev->lists_mutex); + + mutex_lock(&file->mutex); + uburma_cleanup_uobjs(file, UBURMA_REMOVE_DRIVER_REMOVE); + ucontext = file->ucontext; + file->ucontext = NULL; + if (ucontext != NULL) + ubcore_free_ucontext(ubc_dev, ucontext); + + mutex_unlock(&file->mutex); + + mutex_lock(&ubu_dev->lists_mutex); + (void)kref_put(&file->ref, uburma_release_file); + } + mutex_unlock(&ubu_dev->lists_mutex); +} + static void uburma_remove_device(struct ubcore_device *ubc_dev, void *client_ctx) { struct uburma_device *ubu_dev = client_ctx; @@ -204,10 +275,13 @@ static void uburma_remove_device(struct ubcore_device *ubc_dev, void *client_ctx cdev_del(&ubu_dev->cdev); clear_bit(ubu_dev->devnum, g_dev_bitmap); + uburma_free_ucontext(ubu_dev, ubc_dev); + if (atomic_dec_and_test(&ubu_dev->refcnt)) complete(&ubu_dev->comp); /* do not wait_for_completion(&ubu_dev->comp) */ + uburma_cmd_flush(ubu_dev); kobject_put(&ubu_dev->kobj); } diff --git a/drivers/ub/urma/uburma/uburma_types.h b/drivers/ub/urma/uburma/uburma_types.h index fb691e2b14d8cd399e43282063f19e9709f3a176..4cbaa5c1677679efd446079c08756a4c26baa000 100644 --- a/drivers/ub/urma/uburma/uburma_types.h +++ b/drivers/ub/urma/uburma/uburma_types.h @@ -30,6 +30,48 @@ #include +enum uburma_remove_reason { + /* Userspace requested uobject deletion. Call could fail */ + UBURMA_REMOVE_DESTROY, + /* Context deletion. This call should delete the actual object itself */ + UBURMA_REMOVE_CLOSE, + /* Driver is being hot-unplugged. This call should delete the actual object itself */ + UBURMA_REMOVE_DRIVER_REMOVE, + /* Context is being cleaned-up, but commit was just completed */ + UBURMA_REMOVE_DURING_CLEANUP +}; + +struct uburma_file { + struct kref ref; + struct mutex mutex; + struct uburma_device *ubu_dev; + struct ubcore_ucontext *ucontext; + + /* uobj */ + struct mutex uobjects_lock; + struct list_head uobjects; + struct idr idr; + spinlock_t idr_lock; + struct rw_semaphore cleanup_rwsem; + enum uburma_remove_reason cleanup_reason; + + struct list_head list; + int is_closed; +}; + +struct uburma_port { + struct kobject kobj; + struct uburma_device *ubu_dev; + uint8_t port_num; +}; + +struct uburma_vf { + struct kobject kobj; + struct uburma_device *ubu_dev; + uint32_t vf_idx; +}; + + struct uburma_device { atomic_t refcnt; struct completion comp; /* When refcnt becomes 0, it will wake up */ @@ -39,6 +81,8 @@ struct uburma_device { unsigned int devnum; struct cdev cdev; struct device *dev; + struct uburma_port port[UBCORE_MAX_PORT_CNT]; + struct uburma_vf vf[UBCORE_MAX_VF_CNT]; struct ubcore_device *__rcu ubc_dev; struct srcu_struct ubc_dev_srcu; /* protect ubc_dev */ struct kobject kobj; /* when equal to 0 , free uburma_device. */ diff --git a/drivers/ub/urma/uburma/uburma_uobj.c b/drivers/ub/urma/uburma/uburma_uobj.c new file mode 100644 index 0000000000000000000000000000000000000000..72bf6ead5d7707060dd0df06fe7c4f7663398c58 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_uobj.c @@ -0,0 +1,699 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uobj framework in uburma + * Author: Chen yujie + * Create: 2022-09-07 + * Note: + * History: 2022-09-07: create file + */ + +#include +#include +#include +#include + +#include +#include + +#include "uburma_types.h" +#include "uburma_file_ops.h" +#include "uburma_log.h" +#include "uburma_event.h" +#include "uburma_uobj.h" + +static void uobj_free(struct kref *ref) +{ + kfree_rcu(container_of(ref, struct uburma_uobj, ref), rcu); +} + +struct uburma_uobj *uobj_alloc_begin(const struct uobj_type *type, struct uburma_file *ufile) +{ + struct uburma_uobj *uobj; + + /* Cleanup is running. Calling this should have been impossible */ + if (!down_read_trylock(&ufile->cleanup_rwsem)) { + uburma_log_warn("uobj: cleanup is running while allocating an uobject\n"); + return ERR_PTR(-EIO); + } + uobj = type->type_class->alloc_begin(type, ufile); + if (IS_ERR(uobj)) + up_read(&ufile->cleanup_rwsem); + return uobj; +} + +int uobj_alloc_commit(struct uburma_uobj *uobj) +{ + /* relase write lock */ + atomic_set(&uobj->rcnt, 0); + + /* add uobj to list */ + mutex_lock(&uobj->ufile->uobjects_lock); + list_add(&uobj->list, &uobj->ufile->uobjects); + mutex_unlock(&uobj->ufile->uobjects_lock); + + uobj->type->type_class->alloc_commit(uobj); + + up_read(&uobj->ufile->cleanup_rwsem); + return 0; +} + +void uobj_alloc_abort(struct uburma_uobj *uobj) +{ + uburma_log_info("%s.\n", __func__); + uobj->type->type_class->alloc_abort(uobj); + up_read(&uobj->ufile->cleanup_rwsem); +} + +void uobj_get(struct uburma_uobj *uobj) +{ + kref_get(&uobj->ref); +} + +void uobj_put(struct uburma_uobj *uobj) +{ + kref_put(&uobj->ref, uobj_free); +} + +/* Alloc buffer and init params. */ +static struct uburma_uobj *alloc_uobj(struct uburma_file *ufile, const struct uobj_type *type) +{ + struct ubcore_device *ubc_dev; + struct uburma_uobj *uobj; + + /* block read and write uobj if we are removing device */ + ubc_dev = srcu_dereference(ufile->ubu_dev->ubc_dev, &ufile->ubu_dev->ubc_dev_srcu); + if (!ubc_dev) + return ERR_PTR(-EIO); + + uobj = kzalloc(type->obj_size, GFP_KERNEL); + if (uobj == NULL) + return ERR_PTR(-ENOMEM); + + uobj->ufile = ufile; + uobj->type = type; + + atomic_set(&uobj->rcnt, -1); + kref_init(&uobj->ref); + + return uobj; +} + +static int uobj_alloc_idr(struct uburma_uobj *uobj) +{ + int ret; + + idr_preload(GFP_KERNEL); + spin_lock(&uobj->ufile->idr_lock); + + /* Alloc idr pointing to NULL. Will replace it once we commit. */ + ret = idr_alloc(&uobj->ufile->idr, NULL, 0, min_t(unsigned long, U32_MAX - 1U, INT_MAX), + GFP_NOWAIT); + if (ret >= 0) + uobj->id = ret; + + spin_unlock(&uobj->ufile->idr_lock); + idr_preload_end(); + + return ret < 0 ? ret : 0; +} + +static void uobj_remove_idr(struct uburma_uobj *uobj) +{ + spin_lock(&uobj->ufile->idr_lock); + idr_remove(&uobj->ufile->idr, uobj->id); + spin_unlock(&uobj->ufile->idr_lock); +} + +static int uobj_try_lock(struct uburma_uobj *uobj, bool exclusive) +{ + /* + * When a shared access is required, we use a positive counter. Each + * shared access request checks that the value != -1 and increment it. + * Exclusive access is required for operations like write or destroy. + * In exclusive access mode, we check that the counter is zero (nobody + * claimed this object) and we set it to -1. Releasing a shared access + * lock is done simply by decreasing the counter. As for exclusive + * access locks, since only a single one of them is allowed + * concurrently, setting the counter to zero is enough for releasing + * this lock. + */ + if (!exclusive) + return atomic_add_unless(&uobj->rcnt, 1, -1) ? 0 : -EBUSY; + + /* lock is either WRITE or DESTROY - should be exclusive */ + return atomic_cmpxchg(&uobj->rcnt, 0, -1) == 0 ? 0 : -EBUSY; +} + +static void uobj_unlock(struct uburma_uobj *uobj, bool exclusive) +{ + /* + * In order to unlock an object, either decrease its rcnt for + * read access or zero it in case of exclusive access. See + * uverbs_try_lock_object for locking schema information. + */ + if (!exclusive) + atomic_dec(&uobj->rcnt); + else + atomic_set(&uobj->rcnt, 0); +} + +static int __must_check uobj_remove_commit_internal(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_file *ufile = uobj->ufile; + int ret; + + ret = uobj->type->type_class->remove_commit(uobj, why); + if (ret && why == UBURMA_REMOVE_DESTROY) { + /* We couldn't remove the object, so just unlock the uobject */ + atomic_set(&uobj->rcnt, 0); + uobj->type->type_class->lookup_put(uobj, true); + } else if (!list_empty(&uobj->list)) { + mutex_lock(&ufile->uobjects_lock); + list_del_init(&uobj->list); + mutex_unlock(&ufile->uobjects_lock); + /* put the ref we took when we created the object */ + uobj_put(uobj); + } + + return ret; +} + +static struct uburma_uobj *uobj_idr_alloc_begin(const struct uobj_type *type, + struct uburma_file *ufile) +{ + struct uburma_uobj *uobj; + int ret; + + uobj = alloc_uobj(ufile, type); + if (IS_ERR(uobj)) + return uobj; + + ret = uobj_alloc_idr(uobj); + if (ret) { + uobj_put(uobj); + return ERR_PTR(ret); + } + + return uobj; +} + +static void uobj_idr_alloc_commit(struct uburma_uobj *uobj) +{ + spin_lock(&uobj->ufile->idr_lock); + WARN_ON(idr_replace(&uobj->ufile->idr, uobj, uobj->id)); + spin_unlock(&uobj->ufile->idr_lock); +} + +static void uobj_idr_alloc_abort(struct uburma_uobj *uobj) +{ + uobj_remove_idr(uobj); + uobj_put(uobj); +} + +static struct uburma_uobj *uobj_idr_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, int id, + enum uobj_access flag) +{ + struct uburma_uobj *uobj = NULL; + + rcu_read_lock(); + /* Object won't be released as we're protected in rcu. */ + uobj = idr_find(&ufile->idr, id); + if (uobj == NULL) { + uobj = ERR_PTR(-ENOENT); + goto free; + } + + /* Object associated with uobj may have been released. */ + if (!kref_get_unless_zero(&uobj->ref)) + uobj = ERR_PTR(-ENOENT); + +free: + rcu_read_unlock(); + return uobj; +} + +static void uobj_idr_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag) +{ + /* Empty for now. */ +} + +static int __must_check uobj_idr_remove_commit(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + const struct uobj_idr_type *idr_type = container_of(uobj->type, struct uobj_idr_type, type); + /* Call object destroy function. */ + int ret = idr_type->destroy_func(uobj, why); + + /* Only user req destroy may fail. */ + if (why == UBURMA_REMOVE_DESTROY && ret) + return ret; + + uobj_remove_idr(uobj); + return ret; +} + +static struct uburma_uobj *uobj_fd_alloc_begin(const struct uobj_type *type, + struct uburma_file *ufile) +{ + const struct uobj_fd_type *fd_type = container_of(type, struct uobj_fd_type, type); + struct uburma_uobj *uobj; + struct file *filp; + int new_fd; + + new_fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (new_fd < 0) + return ERR_PTR(new_fd); + + uobj = alloc_uobj(ufile, type); + if (IS_ERR(uobj)) { + put_unused_fd(new_fd); + return uobj; + } + + filp = anon_inode_getfile(fd_type->name, fd_type->fops, uobj, fd_type->flags); + if (IS_ERR(filp)) { + put_unused_fd(new_fd); + uobj_put(uobj); + return (void *)filp; + } + + uobj->id = new_fd; + uobj->object = filp; + + kref_get(&ufile->ref); + + return uobj; +} + +static void uobj_fd_alloc_commit(struct uburma_uobj *uobj) +{ + struct file *filp = (struct file *)uobj->object; + + fd_install(uobj->id, filp); + + /* Do not set uobj->id = 0 as it may be read when remove uobj */ + + /* Get another reference as we export this to the fops */ + uobj_get(uobj); +} + +static void uobj_fd_alloc_abort(struct uburma_uobj *uobj) +{ + struct file *filp = uobj->object; + + /* Unsuccessful NEW */ + fput(filp); + put_unused_fd(uobj->id); +} + +static struct uburma_uobj *uobj_fd_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, int id, + enum uobj_access flag) +{ + const struct uobj_fd_type *fd_type = container_of(type, struct uobj_fd_type, type); + struct uburma_uobj *uobj; + struct file *f; + + if (flag != UOBJ_ACCESS_READ) + return ERR_PTR(-EOPNOTSUPP); + + f = fget(id); + if (f == NULL) + return ERR_PTR(-EBADF); + + uobj = f->private_data; + /* + * fget(id) ensures we are not currently running close_fd, + * and the caller is expected to ensure that close_fd is never + * done while a call top lookup is possible. + */ + if (f->f_op != fd_type->fops) { + fput(f); + return ERR_PTR(-EBADF); + } + + uobj_get(uobj); + return uobj; +} + +static void uobj_fd_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag) +{ + struct file *filp = uobj->object; + + WARN_ON(flag != UOBJ_ACCESS_READ); + /* This indirectly calls close_fd and free the object */ + fput(filp); +} + +static int __must_check uobj_fd_remove_commit(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + const struct uobj_fd_type *fd_type = container_of(uobj->type, struct uobj_fd_type, type); + /* Call user close function. */ + int ret = fd_type->context_closed(uobj, why); + + if (why == UBURMA_REMOVE_DESTROY && ret) + return ret; + + if (why == UBURMA_REMOVE_DURING_CLEANUP) { + uobj_fd_alloc_abort(uobj); + return ret; + } + + return ret; +} + +struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, struct uburma_file *ufile, int id, + enum uobj_access flag) +{ + struct ubcore_device *ubc_dev; + struct uburma_uobj *uobj; + int ret; + + uobj = type->type_class->lookup_get(type, ufile, id, flag); + if (IS_ERR(uobj)) + return uobj; + + if (uobj->type != type) { + ret = -EINVAL; + goto free; + } + + /* block read and write uobj if we are removing device */ + ubc_dev = srcu_dereference(ufile->ubu_dev->ubc_dev, &ufile->ubu_dev->ubc_dev_srcu); + if (!ubc_dev) { + ret = -EIO; + goto free; + } + + if (flag == UOBJ_ACCESS_NOLOCK) + return uobj; + + ret = uobj_try_lock(uobj, flag == UOBJ_ACCESS_WRITE); + if (ret) { + WARN(ufile->cleanup_reason, "uburma: Trying to lookup_get while cleanup context\n"); + goto free; + } + + return uobj; +free: + uobj->type->type_class->lookup_put(uobj, flag); + /* pair with uobj_get in uobj_fd_lookup_get */ + uobj_put(uobj); + return ERR_PTR(ret); +} + +void uobj_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag) +{ + uobj->type->type_class->lookup_put(uobj, flag); + + if (flag != UOBJ_ACCESS_NOLOCK) + uobj_unlock(uobj, flag == UOBJ_ACCESS_WRITE); /* match with uobj_try_lock */ + + uobj_put(uobj); +} + +int __must_check uobj_remove_commit(struct uburma_uobj *uobj) +{ + struct uburma_file *ufile = uobj->ufile; + int ret; + + /* put the ref count we took at lookup_get */ + uobj_put(uobj); + + down_read(&ufile->cleanup_rwsem); + /* try Lock uobj for write with cleanup_rwsem locked */ + ret = uobj_try_lock(uobj, true); + if (ret) { + /* Do not rollback uobj_put here */ + up_read(&ufile->cleanup_rwsem); + uburma_log_warn("Failed to lock uobj\n"); + return ret; + } + + ret = uobj_remove_commit_internal(uobj, UBURMA_REMOVE_DESTROY); + + up_read(&ufile->cleanup_rwsem); + return ret; +} + +void uburma_init_uobj_context(struct uburma_file *ufile) +{ + ufile->cleanup_reason = 0; + idr_init(&ufile->idr); + spin_lock_init(&ufile->idr_lock); + INIT_LIST_HEAD(&ufile->uobjects); + mutex_init(&ufile->uobjects_lock); + init_rwsem(&ufile->cleanup_rwsem); +} + +void uburma_cleanup_uobjs(struct uburma_file *ufile, enum uburma_remove_reason why) +{ + unsigned int cur_order = 0; + + ufile->cleanup_reason = why; + down_write(&ufile->cleanup_rwsem); + + while (!list_empty(&ufile->uobjects)) { + struct uburma_uobj *obj, *next_obj; + unsigned int next_order = UINT_MAX; + + mutex_lock(&ufile->uobjects_lock); + list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) { + if (obj->type->destroy_order == cur_order) { + int ret; + /* if we hit this WARN_ON, + * that means we are racing with a lookup_get. + */ + WARN_ON(uobj_try_lock(obj, true)); + ret = obj->type->type_class->remove_commit(obj, why); + if (ret) + pr_warn("uburma: failed to remove uobject id %d order %u\n", + obj->id, cur_order); + + list_del_init(&obj->list); + + /* uburma_close_uobj_fd will also try lock the uobj for write */ + if (uobj_type_is_fd(obj)) + uobj_unlock(obj, true); /* match with uobj_try_lock */ + + /* put the ref we took when we created the object */ + uobj_put(obj); + } else { + next_order = min(next_order, obj->type->destroy_order); + } + } + mutex_unlock(&ufile->uobjects_lock); + cur_order = next_order; + } + + up_write(&ufile->cleanup_rwsem); +} + +static int uburma_free_key(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + return ubcore_free_key_id((struct ubcore_key_id *)uobj->object); +} + +static int uburma_free_seg(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + return ubcore_unregister_seg((struct ubcore_target_seg *)uobj->object); +} + +static int uburma_free_jfc(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_jfc_uobj *jfc_uobj = container_of(uobj, struct uburma_jfc_uobj, uobj); + struct ubcore_jfc *jfc = (struct ubcore_jfc *)uobj->object; + struct uburma_jfce_uobj *jfce_uobj; + int ret = ubcore_delete_jfc(jfc); + + if (ret) + return ret; + + if (!IS_ERR(jfc_uobj->jfce)) { + jfce_uobj = container_of(jfc_uobj->jfce, struct uburma_jfce_uobj, uobj); + uburma_release_comp_event(jfce_uobj, &jfc_uobj->comp_event_list); + uobj_put(jfc_uobj->jfce); + } + + uburma_release_async_event(uobj->ufile, &jfc_uobj->async_event_list); + return ret; +} + +static int uburma_free_jfs(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_jfs_uobj *jfs_uobj = container_of(uobj, struct uburma_jfs_uobj, uobj); + int ret = ubcore_delete_jfs((struct ubcore_jfs *)uobj->object); + + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, &jfs_uobj->async_event_list); + return ret; +} + +static int uburma_free_jfr(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_jfr_uobj *jfr_uobj = container_of(uobj, struct uburma_jfr_uobj, uobj); + int ret = ubcore_delete_jfr((struct ubcore_jfr *)uobj->object); + + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, &jfr_uobj->async_event_list); + return ret; +} + +static int uburma_free_jetty(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_jetty_uobj *jetty_uobj = container_of(uobj, struct uburma_jetty_uobj, uobj); + int ret = ubcore_delete_jetty((struct ubcore_jetty *)uobj->object); + + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, &jetty_uobj->async_event_list); + return ret; +} + +static int uburma_free_tjfr(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + return ubcore_unimport_jfr((struct ubcore_tjetty *)uobj->object); +} + +static int uburma_free_tjetty(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + return ubcore_unimport_jetty((struct ubcore_tjetty *)uobj->object); +} + +static int uburma_free_tseg(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + return ubcore_unimport_seg((struct ubcore_target_seg *)uobj->object); +} + +void uburma_close_uobj_fd(struct file *f) +{ + struct uburma_uobj *uobj = f->private_data; + struct uburma_file *ufile = uobj->ufile; + int ret; + + if (down_read_trylock(&ufile->cleanup_rwsem)) { + /* + * uobj_fd_lookup_get holds the kref on the struct file any + * time a FD uobj is locked, which prevents this release + * method from being invoked. Meaning we can always get the + * write lock here, or we have a kernel bug. + */ + WARN_ON(uobj_try_lock(uobj, true)); + ret = uobj_remove_commit_internal(uobj, UBURMA_REMOVE_CLOSE); + up_read(&ufile->cleanup_rwsem); + if (ret) + pr_warn("uburma: unable to clean up uobj file.\n"); + } + + /* Matches the get in alloc_begin_fd_uobject */ + kref_put(&ufile->ref, uburma_release_file); + + /* Pairs with filp->private_data in alloc_begin_fd_uobject */ + uobj_put(uobj); +} + +static int uburma_hot_unplug_jfce(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_jfce_uobj *jfce = container_of(uobj, struct uburma_jfce_uobj, uobj); + struct uburma_jfe *jfe = &jfce->jfe; + + spin_lock_irq(&jfe->lock); + if (jfe->deleting == true) { + spin_unlock_irq(&jfe->lock); + return 0; + } + jfe->deleting = true; + spin_unlock_irq(&jfe->lock); + + if (why == UBURMA_REMOVE_DRIVER_REMOVE) + wake_up_interruptible(&jfe->poll_wait); + + uburma_uninit_jfe(jfe); + return 0; +} + +static int uburma_hot_unplug_jfae(struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_jfae_uobj *jfae = container_of(uobj, struct uburma_jfae_uobj, uobj); + struct uburma_jfe *jfe = &jfae->jfe; + + spin_lock_irq(&jfe->lock); + if (jfe->deleting == true) { + spin_unlock_irq(&jfe->lock); + return 0; + } + jfe->deleting = true; + spin_unlock_irq(&jfe->lock); + + ubcore_unregister_event_handler(jfae->dev, &jfae->event_handler); + + if (why == UBURMA_REMOVE_DRIVER_REMOVE) + uburma_write_event(&jfae->jfe, 0, UBCORE_EVENT_DEV_FATAL, NULL, NULL); + + return 0; +} + +const struct uobj_type_class uobj_idr_type_class = { + .alloc_begin = uobj_idr_alloc_begin, + .alloc_commit = uobj_idr_alloc_commit, + .alloc_abort = uobj_idr_alloc_abort, + .lookup_get = uobj_idr_lookup_get, + .lookup_put = uobj_idr_lookup_put, + .remove_commit = uobj_idr_remove_commit, +}; + +const struct uobj_type_class uobj_fd_type_class = { + .alloc_begin = uobj_fd_alloc_begin, + .alloc_commit = uobj_fd_alloc_commit, + .alloc_abort = uobj_fd_alloc_abort, + .lookup_get = uobj_fd_lookup_get, + .lookup_put = uobj_fd_lookup_put, + .remove_commit = uobj_fd_remove_commit, +}; + +/* The destroy process start from order 0. */ +declare_uobj_class(UOBJ_CLASS_JFCE, + &uobj_type_alloc_fd(3, sizeof(struct uburma_jfce_uobj), uburma_hot_unplug_jfce, + &uburma_jfce_fops, "[jfce]", O_RDWR | O_CLOEXEC)); + +declare_uobj_class(UOBJ_CLASS_JFAE, + &uobj_type_alloc_fd(3, sizeof(struct uburma_jfae_uobj), uburma_hot_unplug_jfae, + &uburma_jfae_fops, "[jfae]", O_RDWR | O_CLOEXEC)); + +declare_uobj_class(UOBJ_CLASS_JFC, + &uobj_type_alloc_idr(sizeof(struct uburma_jfc_uobj), 2, uburma_free_jfc)); +declare_uobj_class(UOBJ_CLASS_KEY, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, uburma_free_key)); +declare_uobj_class(UOBJ_CLASS_SEG, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, uburma_free_seg)); +declare_uobj_class(UOBJ_CLASS_JFS, + &uobj_type_alloc_idr(sizeof(struct uburma_jfs_uobj), 1, uburma_free_jfs)); +declare_uobj_class(UOBJ_CLASS_JFR, + &uobj_type_alloc_idr(sizeof(struct uburma_jfr_uobj), 1, uburma_free_jfr)); +declare_uobj_class(UOBJ_CLASS_JETTY, + &uobj_type_alloc_idr(sizeof(struct uburma_jetty_uobj), 1, uburma_free_jetty)); +declare_uobj_class(UOBJ_CLASS_TARGET_JFR, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, uburma_free_tjfr)); +declare_uobj_class(UOBJ_CLASS_TARGET_JETTY, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, uburma_free_tjetty)); +declare_uobj_class(UOBJ_CLASS_TARGET_SEG, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, uburma_free_tseg)); diff --git a/drivers/ub/urma/uburma/uburma_uobj.h b/drivers/ub/urma/uburma/uburma_uobj.h new file mode 100644 index 0000000000000000000000000000000000000000..5a92c0025f2cf3a1450e48fe0f2348e2771fa7d0 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_uobj.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uobj framework in uburma + * Author: Chen yujie + * Create: 2022-8-11 + * Note: + * History: 2022-8-11: Create file + */ + +#ifndef UBURMA_UOBJ_H +#define UBURMA_UOBJ_H + +#include + +enum UOBJ_CLASS_ID { + UOBJ_CLASS_ROOT, /* used by framework */ + UOBJ_CLASS_KEY, + UOBJ_CLASS_SEG, + UOBJ_CLASS_TARGET_SEG, + UOBJ_CLASS_JFR, + UOBJ_CLASS_JFS, + UOBJ_CLASS_JFC, + UOBJ_CLASS_JFCE, + UOBJ_CLASS_JFAE, + UOBJ_CLASS_TARGET_JFR, + UOBJ_CLASS_JETTY, + UOBJ_CLASS_TARGET_JETTY +}; + +enum uobj_access { + UOBJ_ACCESS_NOLOCK, + UOBJ_ACCESS_READ, /* LOCK READ */ + UOBJ_ACCESS_WRITE /* LOCK WRITE */ +}; + +struct uburma_uobj { + struct uburma_file *ufile; /* associated uburma file */ + void *object; /* containing object */ + struct list_head list; /* link to context's list */ + int id; /* index into kernel idr */ + struct kref ref; /* ref of object associated with uobj */ + atomic_t rcnt; /* protects exclusive access */ + struct rcu_head rcu; /* kfree_rcu() overhead */ + + const struct uobj_type *type; +}; + +struct uobj_type { + const struct uobj_type_class *const type_class; + size_t obj_size; + unsigned int destroy_order; +}; + +struct uobj_type_class { + struct uburma_uobj *(*alloc_begin)(const struct uobj_type *type, struct uburma_file *ufile); + void (*alloc_commit)(struct uburma_uobj *uobj); + void (*alloc_abort)(struct uburma_uobj *uobj); + struct uburma_uobj *(*lookup_get)(const struct uobj_type *type, struct uburma_file *ufile, + int id, enum uobj_access flag); + void (*lookup_put)(struct uburma_uobj *uobj, enum uobj_access flag); + int __must_check (*remove_commit)(struct uburma_uobj *uobj, enum uburma_remove_reason why); +}; + +struct uobj_idr_type { + struct uobj_type type; + int __must_check (*destroy_func)(struct uburma_uobj *uobj, enum uburma_remove_reason why); +}; + +struct uobj_fd_type { + struct uobj_type type; + const char *name; + const struct file_operations *fops; + int flags; + int (*context_closed)(struct uburma_uobj *uobj, enum uburma_remove_reason why); +}; + +struct uobj_class_def { + uint16_t id; + const struct uobj_type *type_attrs; +}; + +struct uburma_jfe { + spinlock_t lock; + struct list_head event_list; + wait_queue_head_t poll_wait; + + bool deleting; +}; + +struct uburma_jfce_uobj { + struct uburma_uobj uobj; + struct uburma_jfe jfe; +}; + +struct uburma_jfc_uobj { + struct uburma_uobj uobj; /* base uobj struct */ + struct uburma_uobj *jfce; /* associated jfce uobj */ + struct list_head comp_event_list; + struct list_head async_event_list; + uint32_t comp_events_reported; + uint32_t async_events_reported; +}; + +struct uburma_jfs_uobj { + struct uburma_uobj uobj; /* base uobj struct */ + struct list_head async_event_list; + uint32_t async_events_reported; +}; + +struct uburma_jfr_uobj { + struct uburma_uobj uobj; /* base uobj struct */ + struct list_head async_event_list; + uint32_t async_events_reported; +}; + +struct uburma_jetty_uobj { + struct uburma_uobj uobj; /* base uobj struct */ + struct list_head async_event_list; + uint32_t async_events_reported; +}; + +struct uburma_jfae_uobj { + struct uburma_uobj uobj; + struct uburma_jfe jfe; + struct ubcore_event_handler event_handler; + struct ubcore_device *dev; +}; + +extern const struct uobj_type_class uobj_idr_type_class; +extern const struct uobj_type_class uobj_fd_type_class; + +/* uobj base ops */ +struct uburma_uobj *uobj_alloc_begin(const struct uobj_type *type, struct uburma_file *ufile); +int uobj_alloc_commit(struct uburma_uobj *uobj); +void uobj_alloc_abort(struct uburma_uobj *uobj); +struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, struct uburma_file *ufile, int id, + enum uobj_access flag); +void uobj_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag); +int __must_check uobj_remove_commit(struct uburma_uobj *uobj); +void uobj_get(struct uburma_uobj *uobj); +void uobj_put(struct uburma_uobj *uobj); + +/* internal api */ +void uburma_init_uobj_context(struct uburma_file *ufile); +void uburma_cleanup_uobjs(struct uburma_file *ufile, enum uburma_remove_reason why); + +void uburma_close_uobj_fd(struct file *f); + +#define uobj_class_name(class_id) uobj_class_##class_id + +#define uobj_get_type(class_id) uobj_class_name(class_id).type_attrs + +#define _uobj_class_set(_id, _type_attrs) \ + ((const struct uobj_class_def){ .id = (_id), .type_attrs = (_type_attrs) }) + +#define _declare_uobj_class(_name, _id, _type_attrs) \ + const struct uobj_class_def _name = _uobj_class_set(_id, _type_attrs) + +#define declare_uobj_class(class_id, ...) \ + _declare_uobj_class(uobj_class_name(class_id), class_id, ##__VA_ARGS__) + + +#define uobj_type_alloc_idr(_size, _order, _destroy_func) \ + ((&((const struct uobj_idr_type) { \ + .type = { \ + .type_class = &uobj_idr_type_class, \ + .obj_size = (_size), \ + .destroy_order = (_order), \ + }, \ + .destroy_func = (_destroy_func), \ + }))->type) + +#define uobj_type_alloc_fd(_order, _obj_size, _context_closed, _fops, _name, _flags) \ + ((&((const struct uobj_fd_type) { \ + .type = { \ + .destroy_order = (_order), \ + .type_class = &uobj_fd_type_class, \ + .obj_size = (_obj_size), \ + }, \ + .context_closed = (_context_closed), \ + .fops = (_fops), \ + .name = (_name), \ + .flags = (_flags) \ + }))->type) + +static inline bool uobj_type_is_fd(const struct uburma_uobj *uobj) +{ + return uobj->type->type_class == &uobj_fd_type_class; +} + +#define uobj_alloc(class_id, ufile) uobj_alloc_begin(uobj_get_type(class_id), ufile) + +#define uobj_get_read(class_id, _id, ufile) \ + uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_READ) + +#define uobj_put_read(uobj) uobj_lookup_put(uobj, UOBJ_ACCESS_READ) + +#define uobj_get_write(class_id, _id, ufile) \ + uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_WRITE) + +#define uobj_put_write(uobj) uobj_lookup_put(uobj, UOBJ_ACCESS_WRITE) + +/* Do not lock uobj without cleanup_rwsem locked */ +#define uobj_get_del(class_id, _id, ufile) \ + uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_NOLOCK) + +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_KEY; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_SEG; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFCE; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFAE; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFC; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFR; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFS; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JETTY; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_JFR; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_SEG; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_JETTY; + +extern const struct file_operations uburma_jfce_fops; +extern const struct file_operations uburma_jfae_fops; + +#endif /* UBURMA_UOBJ_H */ diff --git a/include/urma/ubcore_types.h b/include/urma/ubcore_types.h index 73bae89574d6e48a82e62e832ddc5cf127ed95b4..c80a072680bf022e6700c719a25a74a871df0b08 100644 --- a/include/urma/ubcore_types.h +++ b/include/urma/ubcore_types.h @@ -217,9 +217,9 @@ union ubcore_reg_seg_flag { }; struct ubcore_udrv_priv { - uint64_t in_addr; + uintptr_t in_addr; uint32_t in_len; - uint64_t out_addr; + uintptr_t out_addr; uint32_t out_len; }; @@ -745,7 +745,7 @@ struct ubcore_tp_cfg { }; struct ubcore_tp_ext { - uint64_t addr; + uintptr_t addr; uint32_t len; }; @@ -923,7 +923,7 @@ struct ubcore_res_key { }; struct ubcore_res_val { - uint64_t addr; /* allocated and free by ubcore */ + uintptr_t addr; /* allocated and free by ubcore */ uint32_t len; /* in&out. As a input parameter, * it indicates the length allocated by the ubcore * As a output parameter, it indicates the actual data length. diff --git a/include/urma/ubcore_uapi.h b/include/urma/ubcore_uapi.h index 008915072ad2f06a67101bf7f31cbf06f9924cd2..8241775399a6840da1023e0deb2e45c0ebfb85fd 100644 --- a/include/urma/ubcore_uapi.h +++ b/include/urma/ubcore_uapi.h @@ -24,7 +24,6 @@ #define UBCORE_UAPI_H #include - /** * Application specifies the device to allocate an context. * @param[in] dev: ubcore_device found by add ops in the client. @@ -51,6 +50,47 @@ void ubcore_free_ucontext(const struct ubcore_device *dev, struct ubcore_ucontex * @return: 0 on success, other value on error */ int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid); +/** + * set upi + * @param[in] dev: the ubcore_device handle; + * @param[in] vf_id: vf_id; + * @param[in] idx: idx of upi in vf; + * @param[in] upi: upi of vf to set + * @return: 0 on success, other value on error + */ +int ubcore_set_upi(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx, uint32_t upi); +/** + * add a function entity id (eid) to ub device, the upi of vf to which the eid belongs + * can be specified + * @param[in] dev: the ubcore_device handle; + * @param[in] eid: function entity id (eid) to be added; + * @param[in] upi: upi of vf; + * @return: the index of eid/upi, less than 0 indicating error + */ +int ubcore_add_eid(struct ubcore_device *dev, union ubcore_eid *eid); +/** + * remove a function entity id (eid) specified by idx from ub device + * @param[in] dev: the ubcore_device handle; + * @param[in] idx: the idx of function entity id (eid) to be deleted; + * @return: 0 on success, other value on error + */ +int ubcore_delete_eid(struct ubcore_device *dev, uint16_t idx); +/** + * add a function entity id (eid) to ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] vf_id: vf_id; + * @param[in] cfg: eid and the upi of vf to which the eid belongs can be specified; + * @return: the index of eid/upi, less than 0 indicating error + */ +int ubcore_add_ueid(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_ueid_cfg *cfg); +/** + * remove a function entity id (eid) specified by idx from ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] vf_id: vf_id; + * @param[in] idx: the idx of function entity id (eid) to be deleted; + * @return: 0 on success, other value on error + */ +int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t vf_id, uint16_t idx); /** * query device attributes * @param[in] dev: the ubcore_device handle; @@ -58,6 +98,32 @@ int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid); * @return: 0 on success, other value on error */ int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_attr *attr); +/** + * query device status + * @param[in] dev: the ubcore_device handle; + * @param[out] status: status returned to client + * @return: 0 on success, other value on error + */ +int ubcore_query_device_status(const struct ubcore_device *dev, + struct ubcore_device_status *status); +/** + * query stats + * @param[in] dev: the ubcore_device handle; + * @param[in] key: stats type and key; + * @param[in/out] val: addr and len of value + * @return: 0 on success, other value on error + */ +int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val); +/** + * query resource + * @param[in] dev: the ubcore_device handle; + * @param[in] key: resource type and key; + * @param[in/out] val: addr and len of value + * @return: 0 on success, other value on error + */ +int ubcore_query_resource(const struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val); /** * config device * @param[in] dev: the ubcore_device handle; @@ -95,13 +161,368 @@ int ubcore_register_client(struct ubcore_client *new_client); */ void ubcore_unregister_client(struct ubcore_client *rm_client); /** - * query stats - * @param[in] dev: the ubcore_device handle; - * @param[in] key: stats type and key; - * @param[in/out] val: addr and len of value + * alloc key to ubcore device + * @param[in] dev: the ubcore device handle; + * @param[in] udata (optional): ucontext and user space driver data + * @return: key id pointer on success, NULL on error + */ +struct ubcore_key_id *ubcore_alloc_key_id(struct ubcore_device *dev, struct ubcore_udata *udata); +/** + * free key id from ubcore device + * @param[in] key: the key id alloced before; * @return: 0 on success, other value on error */ -int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, - struct ubcore_stats_val *val); +int ubcore_free_key_id(struct ubcore_key_id *key); +/** + * register segment to ubcore device + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: segment configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target segment pointer on success, NULL on error + */ +struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, + const struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); +/** + * unregister segment from ubcore device + * @param[in] tseg: the segment registered before; + * @return: 0 on success, other value on error + */ +int ubcore_unregister_seg(struct ubcore_target_seg *tseg); +/** + * import a remote segment to ubcore device + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: import configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target segment handle on success, NULL on error + */ +struct ubcore_target_seg *ubcore_import_seg(struct ubcore_device *dev, + const struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); +/** + * unimport seg from ubcore device + * @param[in] tseg: the segment imported before; + * @return: 0 on success, other value on error + */ +int ubcore_unimport_seg(struct ubcore_target_seg *tseg); +/** + * create jfc with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jfc attributes and configurations + * @param[in] jfce_handler (optional): completion event handler + * @param[in] jfae_handler (optional): jfc async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jfc pointer on success, NULL on error + */ +struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, const struct ubcore_jfc_cfg *cfg, + ubcore_comp_callback_t jfce_handler, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jfc from ubcore device. + * @param[in] jfc: the jfc created before; + * @param[in] attr: ubcore jfc attributes; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jfc(struct ubcore_jfc *jfc, const struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata); +/** + * destroy jfc from ubcore device. + * @param[in] jfc: the jfc created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jfc(struct ubcore_jfc *jfc); +/** + * rearm jfc. + * @param[in] jfc: the jfc created before; + * @param[in] solicited_only: rearm notify by message marked with solicited flag + * @return: 0 on success, other value on error + */ +int ubcore_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only); +/** + * create jfs with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jfs configurations + * @param[in] jfae_handler (optional): jfs async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jfs pointer on success, NULL on error + */ +struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, const struct ubcore_jfs_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jfs from ubcore device. + * @param[in] jfs: the jfs created before; + * @param[in] attr: ubcore jfs attributes; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jfs(struct ubcore_jfs *jfs, const struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata); +/** + * query jfs from ubcore device. + * @param[in] jfs: the jfs created before; + * @param[out] cfg: jfs configurations; + * @param[out] attr: ubcore jfs attributes; + * @return: 0 on success, other value on error + */ +int ubcore_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr); +/** + * destroy jfs from ubcore device. + * @param[in] jfs: the jfs created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jfs(struct ubcore_jfs *jfs); +/** + * return the wrs in JFS that is not consumed to the application through cr. + * @param[in] jfs: the jfs created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, 0 means no completion record returned, + * -1 on error + */ +int ubcore_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr); +/** + * create jfr with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jfr configurations + * @param[in] jfae_handler (optional): jfr async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jfr pointer on success, NULL on error + */ +struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, const struct ubcore_jfr_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jfr from ubcore device. + * @param[in] jfr: the jfr created before; + * @param[in] attr: ubcore jfr attr; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jfr(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata); +/** + * query jfr from ubcore device. + * @param[in] jfr: the jfr created before; + * @param[out] cfg: jfr configurations; + * @param[out] attr: ubcore jfr attributes; + * @return: 0 on success, other value on error + */ +int ubcore_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr); +/** + * destroy jfr from ubcore device. + * @param[in] jfr: the jfr created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jfr(struct ubcore_jfr *jfr); +/** + * create jetty with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jetty attributes and configurations + * @param[in] jfae_handler (optional): jetty async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jetty pointer on success, NULL on error + */ +struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, + const struct ubcore_jetty_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jetty attributes. + * @param[in] jetty: the jetty created before; + * @param[in] attr: ubcore jetty attributes; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jetty(struct ubcore_jetty *jetty, const struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata); +/** + * query jetty from ubcore device. + * @param[in] jetty: the jetty created before; + * @param[out] cfg: jetty configurations; + * @param[out] attr: ubcore jetty attributes; + * @return: 0 on success, other value on error + */ +int ubcore_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr); +/** + * destroy jetty from ubcore device. + * @param[in] jetty: the jetty created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jetty(struct ubcore_jetty *jetty); +/** + * return the wrs in JETTY that is not consumed to the application through cr. + * @param[in] jetty: the jetty created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, 0 means no completion record returned, + * -1 on error + */ +int ubcore_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr); +/** + * import jfr to ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: remote jfr attributes and import configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target jfr pointer on success, NULL on error + */ +struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev, + const struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); +/** + * unimport jfr from ubcore device. + * @param[in] tjfr: the target jfr imported before; + * @return: 0 on success, other value on error + */ +int ubcore_unimport_jfr(struct ubcore_tjetty *tjfr); +/** + * import jetty to ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: remote jetty attributes and import configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target jetty pointer on success, NULL on error + */ +struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev, + const struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); +/** + * unimport jetty from ubcore device. + * @param[in] tjetty: the target jetty imported before; + * @return: 0 on success, other value on error + */ +int ubcore_unimport_jetty(struct ubcore_tjetty *tjetty); +/** + * Advise jfr: construct the transport channel for jfs and remote jfr. + * @param[in] jfs: jfs to use to construct the transport channel; + * @param[in] tjfr: target jfr to reach; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_advise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr, + struct ubcore_udata *udata); +/** + * Unadvise jfr: Tear down the transport channel from jfs to remote jfr. + * @param[in] jfs: jfs to use to destruct the transport channel; + * @param[in] tjfr: target jfr advised before; + * @return: 0 on success, other value on error + */ +int ubcore_unadvise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr); +/** + * Advise jetty: construct the transport channel between local jetty and remote jetty. + * @param[in] jetty: local jetty to construct the transport channel; + * @param[in] tjetty: target jetty to reach imported before; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_advise_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata); +/** + * Unadvise jetty: deconstruct the transport channel between local jetty and remote jetty. + * @param[in] jetty: local jetty to destruct the transport channel; + * @param[in] tjetty: target jetty advised before; + * @return: 0 on success, other value on error + */ +int ubcore_unadvise_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty); +/** + * Bind jetty: Bind local jetty with remote jetty, and construct a transport channel between them. + * @param[in] jetty: local jetty to bind; + * @param[in] tjetty: target jetty imported before; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + * Note: A local jetty can be binded with only one remote jetty. + * Only supported by jetty with URMA_TM_RC. + */ +int ubcore_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata); +/** + * Unbind jetty: Unbind local jetty with remote jetty, + * and tear down the transport channel between them. + * @param[in] jetty: local jetty to unbind; + * @param[in] tjetty: target jetty advised before; + * @return: 0 on success, other value on error + */ +int ubcore_unbind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty); +/** + * operation of user ioctl cmd. + * @param[in] k_user_ctl: kdrv user control command pointer; + * @return: 0 on success, other value on error + */ +int ubcore_user_control(struct ubcore_user_ctl *k_user_ctl); +/** + * Client register an async_event handler to ubcore + * @param[in] dev: the ubcore device handle; + * @param[in] handler: async_event handler to be registered + * Note: the handler will be called when driver reports an async_event with + * ubcore_dispatch_async_event + */ +void ubcore_register_event_handler(struct ubcore_device *dev, struct ubcore_event_handler *handler); +/** + * Client unregister async_event handler from ubcore + * @param[in] dev: the ubcore device handle; + * @param[in] handler: async_event handler to be unregistered + */ +void ubcore_unregister_event_handler(struct ubcore_device *dev, + struct ubcore_event_handler *handler); + +/* data path API */ +/** + * post jfs wr. + * @param[in] jfs: the jfs created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); +/** + * post jfr wr. + * @param[in] jfr: the jfr created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); +/** + * post jetty send wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, const struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); +/** + * post jetty receive wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, const struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); +/** + * poll jfc. + * @param[in] jfc: the jfc created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be polled; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, 0 means no completion record returned, + * -1 on error + */ +int ubcore_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); + +/* The APIs below are deprecated, should not be called by driver or ubcore client */ +struct ubcore_jfc *ubcore_find_jfc(struct ubcore_device *dev, uint32_t jfc_id); + +struct ubcore_jfs *ubcore_find_jfs(struct ubcore_device *dev, uint32_t jfs_id); + +struct ubcore_jfr *ubcore_find_jfr(struct ubcore_device *dev, uint32_t jfr_id); + +struct ubcore_jetty *ubcore_find_jetty(struct ubcore_device *dev, uint32_t jetty_id); #endif