diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 86b0792b23538cbb305360634dafe6f90c72ae30..267b15cfb998f88a8b92d4b6ff2c3f85c9f8cf6c 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -6196,6 +6196,7 @@ CONFIG_TEE=m # CONFIG_MOST is not set CONFIG_ROH=m CONFIG_ROH_HNS=m +CONFIG_UB=m # end of Device Drivers # diff --git a/drivers/Kconfig b/drivers/Kconfig index b1b3d958f065ab4efc171d40d43b90c9338d64ad..9f339bed684788494ff084545a2bd0e27ce5f432 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -240,4 +240,6 @@ source "drivers/most/Kconfig" source "drivers/roh/Kconfig" +source "drivers/ub/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index bfbf6553316902893a39d0648360fac4c786dc70..c9b8abae034960ffdf129dc2ffb703b1ba3439a9 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -192,3 +192,4 @@ obj-$(CONFIG_INTERCONNECT) += interconnect/ obj-$(CONFIG_COUNTER) += counter/ obj-$(CONFIG_MOST) += most/ obj-$(CONFIG_ROH) += roh/ +obj-$(CONFIG_UB) += ub/ diff --git a/drivers/ub/Kconfig b/drivers/ub/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..404d2bc22413658c15ccbe7da7e8ccc91ac9796c --- /dev/null +++ b/drivers/ub/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 + +menuconfig UB + tristate "Unified Bus (UB) support" + default m + help + Core support for Unified Bus (UB). + To compile UB core as module, choose M here. diff --git a/drivers/ub/Makefile b/drivers/ub/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9cd43d23bffa2175c2c5e2b5b91003cbeefccbc0 --- /dev/null +++ b/drivers/ub/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# + +obj-$(CONFIG_UB) += urma/ diff --git a/drivers/ub/urma/Makefile b/drivers/ub/urma/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..fd6bd44e5b0de9536bcbff5935b725d6a5708e28 --- /dev/null +++ b/drivers/ub/urma/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# + +obj-$(CONFIG_UB) += ubcore/ +obj-$(CONFIG_UB) += uburma/ diff --git a/drivers/ub/urma/ubcore/Makefile b/drivers/ub/urma/ubcore/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..eba8e210fda39bbf2a71de8c3a38b89c03c22fa4 --- /dev/null +++ b/drivers/ub/urma/ubcore/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# + +ubcore-objs := ubcore_main.o \ + ubcore_device.o \ + ubcore_umem.o \ + ubcore_tp.o \ + ubcore_netlink.o + +obj-$(CONFIG_UB) += ubcore.o diff --git a/drivers/ub/urma/ubcore/ubcore_cmd.h b/drivers/ub/urma/ubcore/ubcore_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..51dbbb8aae7687a835c09b64beb7948579493e71 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_cmd.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore cmd header file + * Author: Qian Guoxin + * Create: 2023-2-28 + * Note: + * History: 2023-2-28: Create file + */ + +#ifndef UBCORE_CMD_H +#define UBCORE_CMD_H + +#include +#include +#include "ubcore_log.h" +#include + +struct ubcore_cmd_hdr { + uint32_t command; + uint32_t args_len; + uint64_t args_addr; +}; + +#define UBCORE_CMD_MAGIC 'C' +#define UBCORE_CMD _IOWR(UBCORE_CMD_MAGIC, 1, struct ubcore_cmd_hdr) +#define UBCORE_MAX_CMD_SIZE 4096 +#define UBCORE_CMD_EID_SIZE 16 + +/* only for ubcore device ioctl */ +enum ubcore_cmd { + UBCORE_CMD_SET_UASID = 1, + UBCORE_CMD_PUT_UASID, + UBCORE_CMD_SET_UTP, + UBCORE_CMD_SHOW_UTP, + UBCORE_CMD_QUERY_STATS, + UBCORE_CMD_QUERY_RES +}; + +struct ubcore_cmd_set_uasid { + struct { + uint64_t token; + uint32_t uasid; + } in; + struct { + uint32_t uasid; + } out; +}; + +struct ubcore_cmd_put_uasid { + struct { + uint32_t uasid; + } in; +}; + +struct ubcore_cmd_query_stats { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint8_t eid[UBCORE_CMD_EID_SIZE]; + uint32_t tp_type; + uint32_t type; + uint32_t key; + } in; + struct { + uint64_t tx_pkt; + uint64_t rx_pkt; + uint64_t tx_bytes; + uint64_t rx_bytes; + uint64_t tx_pkt_err; + uint64_t rx_pkt_err; + } out; +}; + +/* copy from user_space addr to kernel args */ +static inline int ubcore_copy_from_user(void *args, const void *args_addr, unsigned long args_size) +{ + int ret = (int)copy_from_user(args, args_addr, args_size); + + if (ret != 0) + ubcore_log_err("copy from user failed, ret:%d.\n", ret); + return ret; +} + +/* copy kernel args to user_space addr */ +static inline int ubcore_copy_to_user(void *args_addr, const void *args, unsigned long args_size) +{ + int ret = (int)copy_to_user(args_addr, args, args_size); + + if (ret != 0) + ubcore_log_err("copy to user failed ret:%d.\n", ret); + return ret; +} +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_device.c b/drivers/ub/urma/ubcore/ubcore_device.c new file mode 100644 index 0000000000000000000000000000000000000000..56f8ac5364b7e02092a5761787cb74fd068ef5c3 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_device.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore device add and remove ops file + * Author: Qian Guoxin + * Create: 2021-08-03 + * Note: + * History: 2021-08-03: create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ubcore_log.h" +#include +#include +#include "ubcore_priv.h" + +static LIST_HEAD(g_client_list); +static LIST_HEAD(g_device_list); + +/* + * g_device_mutex and g_lists_rwsem protect both g_device_list and g_client_list. + * g_device_mutex protects writer access by device and client + * g_lists_rwsem protects reader access to these lists. + * Iterators of these lists must lock it for read, while updates + * to the lists must be done with a write lock. + */ +static DEFINE_MUTEX(g_device_mutex); +static DECLARE_RWSEM(g_lists_rwsem); + +void ubcore_set_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client, + void *data) +{ + struct ubcore_client_ctx *ctx; + unsigned long flags; + + spin_lock_irqsave(&dev->client_ctx_lock, flags); + list_for_each_entry(ctx, &dev->client_ctx_list, list_node) { + if (ctx->client == client) { + ctx->data = data; + goto out; + } + } + ubcore_log_err("no client ctx found, device_name: %s, client_name: %s.\n", dev->dev_name, + client->client_name); + +out: + spin_unlock_irqrestore(&dev->client_ctx_lock, flags); +} +EXPORT_SYMBOL(ubcore_set_client_ctx_data); + +void *ubcore_get_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client) +{ + struct ubcore_client_ctx *found_ctx = NULL; + struct ubcore_client_ctx *ctx, *tmp; + unsigned long flags; + + spin_lock_irqsave(&dev->client_ctx_lock, flags); + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) { + if (ctx->client == client) { + found_ctx = ctx; + break; + } + } + + if (found_ctx == NULL) { + spin_unlock_irqrestore(&dev->client_ctx_lock, flags); + ubcore_log_warn("no client ctx found, dev_name: %s, client_name: %s.\n", + dev->dev_name, client->client_name); + return NULL; + } + spin_unlock_irqrestore(&dev->client_ctx_lock, flags); + + return found_ctx->data; +} +EXPORT_SYMBOL(ubcore_get_client_ctx_data); + +static struct ubcore_client_ctx *create_client_ctx(struct ubcore_device *dev, + struct ubcore_client *client) +{ + struct ubcore_client_ctx *ctx; + unsigned long flags; + + ctx = kmalloc(sizeof(struct ubcore_client_ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + ctx->data = NULL; + ctx->client = client; + + down_write(&g_lists_rwsem); + spin_lock_irqsave(&dev->client_ctx_lock, flags); + list_add(&ctx->list_node, &dev->client_ctx_list); + spin_unlock_irqrestore(&dev->client_ctx_lock, flags); + up_write(&g_lists_rwsem); + + return ctx; +} + +static void destroy_client_ctx(struct ubcore_device *dev, struct ubcore_client_ctx *ctx) +{ + unsigned long flags; + + if (dev == NULL || ctx == NULL) + return; + + down_write(&g_lists_rwsem); + spin_lock_irqsave(&dev->client_ctx_lock, flags); + list_del(&ctx->list_node); + spin_unlock_irqrestore(&dev->client_ctx_lock, flags); + up_write(&g_lists_rwsem); + kfree(ctx); +} + +int ubcore_register_client(struct ubcore_client *new_client) +{ + struct ubcore_device *dev; + struct ubcore_client_ctx *ctx = NULL; + + mutex_lock(&g_device_mutex); + + list_for_each_entry(dev, &g_device_list, list_node) { + ctx = create_client_ctx(dev, new_client); + if (ctx == NULL) + continue; + + if (new_client->add && new_client->add(dev) != 0) { + destroy_client_ctx(dev, ctx); + ubcore_log_err("ubcore client: %s register dev:%s failed.\n", + new_client->client_name, dev->dev_name); + } + } + down_write(&g_lists_rwsem); + list_add_tail(&new_client->list_node, &g_client_list); + up_write(&g_lists_rwsem); + + mutex_unlock(&g_device_mutex); + + ubcore_log_info("ubcore client: %s register success.\n", new_client->client_name); + return 0; +} +EXPORT_SYMBOL(ubcore_register_client); + +void ubcore_unregister_client(struct ubcore_client *rm_client) +{ + struct ubcore_client_ctx *ctx, *tmp; + struct ubcore_device *dev; + unsigned long flags; + + mutex_lock(&g_device_mutex); + + down_write(&g_lists_rwsem); + list_del(&rm_client->list_node); + up_write(&g_lists_rwsem); + + list_for_each_entry(dev, &g_device_list, list_node) { + struct ubcore_client_ctx *found_ctx = NULL; + + down_write(&g_lists_rwsem); + spin_lock_irqsave(&dev->client_ctx_lock, flags); + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) { + if (ctx->client == rm_client) { + found_ctx = ctx; + break; + } + } + spin_unlock_irqrestore(&dev->client_ctx_lock, flags); + up_write(&g_lists_rwsem); + + if (found_ctx == NULL) { + ubcore_log_warn("no client ctx found, dev_name: %s, client_name: %s.\n", + dev->dev_name, rm_client->client_name); + continue; + } + if (rm_client->remove) + rm_client->remove(dev, found_ctx->data); + + destroy_client_ctx(dev, found_ctx); + ubcore_log_info("dev remove client, dev_name: %s, client_name: %s.\n", + dev->dev_name, rm_client->client_name); + } + + mutex_unlock(&g_device_mutex); + ubcore_log_info("ubcore client: %s unregister success.\n", rm_client->client_name); +} +EXPORT_SYMBOL(ubcore_unregister_client); + +struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, enum ubcore_transport_type type) +{ + struct ubcore_device *dev, *target = NULL; + + mutex_lock(&g_device_mutex); + list_for_each_entry(dev, &g_device_list, list_node) { + if (memcmp(&dev->attr.eid, eid, sizeof(union ubcore_eid)) == 0 && + dev->transport_type == type) { + target = dev; + ubcore_get_device(target); + break; + } + } + mutex_unlock(&g_device_mutex); + return target; +} + +/* Find only, without get_device */ +static struct ubcore_device *ubcore_find_device_with_name(const char *dev_name) +{ + struct ubcore_device *dev, *target = NULL; + + mutex_lock(&g_device_mutex); + list_for_each_entry(dev, &g_device_list, list_node) { + if (strcmp(dev->dev_name, dev_name) == 0) { + target = dev; + break; + } + } + mutex_unlock(&g_device_mutex); + return target; +} + +struct ubcore_device **ubcore_get_devices_from_netdev(struct net_device *netdev, uint32_t *cnt) +{ + struct ubcore_device **devices; + struct ubcore_device *dev; + uint32_t i = 0; + + mutex_lock(&g_device_mutex); + list_for_each_entry(dev, &g_device_list, list_node) { + /* Assume netdev is related to the first and only port */ + if (dev->netdev == netdev) + i++; + } + + if (i == 0) { + *cnt = 0; + mutex_unlock(&g_device_mutex); + return NULL; + } + + devices = kzalloc(i * sizeof(struct ubcore_device *), GFP_ATOMIC); + if (devices == NULL) { + *cnt = 0; + mutex_unlock(&g_device_mutex); + return NULL; + } + + *cnt = i; + i = 0; + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->netdev == netdev) { + ubcore_get_device(dev); + devices[i] = dev; + i++; + } + } + mutex_unlock(&g_device_mutex); + return devices; +} + +void ubcore_put_devices(struct ubcore_device **devices, uint32_t cnt) +{ + uint32_t i; + + if (devices == NULL) + return; + + for (i = 0; i < cnt; i++) + ubcore_put_device(devices[i]); + + kfree(devices); +} + +void ubcore_get_device(struct ubcore_device *dev) +{ + if (IS_ERR_OR_NULL(dev)) { + ubcore_log_err("Invalid parameter"); + return; + } + + atomic_inc(&dev->use_cnt); +} + +void ubcore_put_device(struct ubcore_device *dev) +{ + if (IS_ERR_OR_NULL(dev)) { + ubcore_log_err("Invalid parameter"); + return; + } + + if (atomic_dec_and_test(&dev->use_cnt)) + complete(&dev->comp); +} + +static void ubcore_device_release(struct device *device) +{ +} + +static int init_ubcore_device(struct ubcore_device *dev) +{ + if (dev->ops->query_device_attr != NULL && + dev->ops->query_device_attr(dev, &dev->attr) != 0) { + ubcore_log_err("Failed to query device attributes"); + return -1; + } + + device_initialize(&dev->dev); + dev_set_drvdata(&dev->dev, dev); + dev_set_name(&dev->dev, "%s", dev->dev_name); + dev->dev.release = ubcore_device_release; + + INIT_LIST_HEAD(&dev->list_node); + spin_lock_init(&dev->client_ctx_lock); + INIT_LIST_HEAD(&dev->client_ctx_list); + INIT_LIST_HEAD(&dev->port_list); + spin_lock_init(&dev->event_handler_lock); + INIT_LIST_HEAD(&dev->event_handler_list); + + init_completion(&dev->comp); + atomic_set(&dev->use_cnt, 1); + + ubcore_set_default_eid(dev); + return 0; +} + +static void uninit_ubcore_device(struct ubcore_device *dev) +{ + put_device(&dev->dev); +} + +int ubcore_register_device(struct ubcore_device *dev) +{ + struct ubcore_client *client = NULL; + struct ubcore_client_ctx *ctx = NULL; + + if (dev == NULL || dev->ops == NULL || strlen(dev->dev_name) == 0) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ubcore_find_device_with_name(dev->dev_name) != NULL) { + ubcore_log_err("Duplicate device name %s.\n", dev->dev_name); + return -EEXIST; + } + + if (init_ubcore_device(dev) != 0) { + ubcore_log_err("failed to init ubcore device.\n"); + return -EINVAL; + } + + mutex_lock(&g_device_mutex); + + list_for_each_entry(client, &g_client_list, list_node) { + ctx = create_client_ctx(dev, client); + if (ctx == NULL) + continue; + if (client->add && client->add(dev) != 0) { + destroy_client_ctx(dev, ctx); + ubcore_log_err("ubcore device: %s register client:%s failed.\n", + dev->dev_name, client->client_name); + } + } + + down_write(&g_lists_rwsem); + list_add_tail(&dev->list_node, &g_device_list); + up_write(&g_lists_rwsem); + + mutex_unlock(&g_device_mutex); + + ubcore_log_info("ubcore device: %s register success.\n", dev->dev_name); + return 0; +} +EXPORT_SYMBOL(ubcore_register_device); + +void ubcore_unregister_device(struct ubcore_device *dev) +{ + struct ubcore_client_ctx *ctx, *tmp; + + mutex_lock(&g_device_mutex); + + /* Remove device from g_device_list */ + down_write(&g_lists_rwsem); + list_del(&dev->list_node); + + /* Destroy uburma device, may be scheduled. + * This should not be done within a spin_lock_irqsave + */ + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) { + if (ctx->client != NULL && ctx->client->remove != NULL) + ctx->client->remove(dev, ctx->data); + } + up_write(&g_lists_rwsem); + + uninit_ubcore_device(dev); + + mutex_unlock(&g_device_mutex); + + /* Finally, free client ctx */ + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) + destroy_client_ctx(dev, ctx); + + /* Pair with set use_cnt = 1 when init device */ + ubcore_put_device(dev); + /* Wait for use cnt == 0 */ + wait_for_completion(&dev->comp); + + ubcore_log_info("ubcore device: %s unregister success.\n", dev->dev_name); +} +EXPORT_SYMBOL(ubcore_unregister_device); + +void ubcore_dispatch_async_event(struct ubcore_event *event) +{ +} +EXPORT_SYMBOL(ubcore_dispatch_async_event); + +struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t uasid, + struct ubcore_udrv_priv *udrv_data) +{ + struct ubcore_ucontext *ucontext; + + if (dev == NULL || dev->ops == NULL || dev->ops->alloc_ucontext == NULL) { + ubcore_log_err("alloc_ucontext not registered.\n"); + return NULL; + } + ucontext = dev->ops->alloc_ucontext(dev, uasid, udrv_data); + if (ucontext == NULL) { + ubcore_log_err("failed to alloc ucontext.\n"); + return NULL; + } + ucontext->uasid = uasid; + ucontext->ub_dev = dev; + ubcore_log_info("success to alloc ucontext with uasid = %u", uasid); + return ucontext; +} +EXPORT_SYMBOL(ubcore_alloc_ucontext); + +void ubcore_free_ucontext(const struct ubcore_device *dev, struct ubcore_ucontext *ucontext) +{ + int ret; + + if (dev == NULL || ucontext == NULL || dev->ops == NULL || + dev->ops->free_ucontext == NULL) { + ubcore_log_err("Invalid argument.\n"); + return; + } + + ret = dev->ops->free_ucontext(ucontext); + if (ret != 0) + ubcore_log_err("failed to free_adu, ret: %d.\n", ret); +} +EXPORT_SYMBOL(ubcore_free_ucontext); + +int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid) +{ + int ret; + + if (dev == NULL || eid == NULL || dev->ops == NULL || dev->ops->set_eid == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->set_eid(dev, *eid); + if (ret != 0) { + ubcore_log_err("failed to set eid, ret: %d.\n", ret); + return -EPERM; + } + dev->attr.eid = *eid; + return 0; +} +EXPORT_SYMBOL(ubcore_set_eid); + +int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_attr *attr) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->query_device_attr == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->query_device_attr(dev, attr); + if (ret != 0) { + ubcore_log_err("failed to query device attr, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_device_attr); + +int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg) +{ + int ret; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || dev->ops->config_device == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->config_device(dev, cfg); + if (ret != 0) { + ubcore_log_err("failed to config device, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_config_device); + +int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val) +{ + int ret; + + if (dev == NULL || key == NULL || val == NULL || dev->ops == NULL || + dev->ops->query_stats == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->query_stats(dev, key, val); + if (ret != 0) { + ubcore_log_err("Failed to query stats, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_stats); diff --git a/drivers/ub/urma/ubcore/ubcore_log.h b/drivers/ub/urma/ubcore/ubcore_log.h new file mode 100644 index 0000000000000000000000000000000000000000..c9a8021d60eb280e0d45a183e0ef8bfdc9e7da83 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_log.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore log head file + * Author: Qian Guoxin + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + */ + +#ifndef UBCORE_LOG_H +#define UBCORE_LOG_H + +/* add log head info, "LogTag_UBCORE|function|[line]| */ +#define UBCORE_LOG_TAG "LogTag_UBCORE" +#define ubcore_log(l, format, args...) \ + ((void)pr_##l("%s|%s:[%d]|" format, UBCORE_LOG_TAG, __func__, __LINE__, ##args)) + +#define ubcore_log_info(...) ubcore_log(info, __VA_ARGS__) + +#define ubcore_log_err(...) ubcore_log(err, __VA_ARGS__) + +#define ubcore_log_warn(...) ubcore_log(warn, __VA_ARGS__) + +#define ubcore_log_debug(...) ubcore_log(debug, __VA_ARGS__) +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_main.c b/drivers/ub/urma/ubcore/ubcore_main.c new file mode 100644 index 0000000000000000000000000000000000000000..db1f82d62275b4deccb05123d3a79696fae6604f --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_main.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore kernel module + * Author: Qian Guoxin + * Create: 2021-08-03 + * Note: + * History: 2021-08-03: create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ubcore_cmd.h" +#include "ubcore_log.h" +#include "ubcore_netlink.h" +#include +#include +#include "ubcore_priv.h" + +/* ubcore create independent cdev and ioctl channels + * to handle public work. + */ +#define UBCORE_DEVICE_NAME "ubcore" +#define UBCORE_CLASS_NAME "ubus" +#define UBCORE_IPV4_MAP_IPV6_PREFIX 0x0000ffff + +struct ubcore_ctx { + dev_t ubcore_devno; + struct cdev ubcore_cdev; + struct class *ubcore_class; + struct device *ubcore_dev; +}; + +static struct ubcore_ctx g_ubcore_ctx; +#define UBCORE_MAX_UASID (1 << 24) +static DECLARE_BITMAP(g_uasid_bitmap, UBCORE_MAX_UASID); +static DEFINE_SPINLOCK(g_uasid_spinlock); + +struct ubcore_net_addr_node { + struct list_head node; + struct ubcore_net_addr addr; +}; + +int ubcore_open(struct inode *i_node, struct file *filp) +{ + return 0; +} + +static uint32_t ubcore_uasid_alloc(uint32_t uasid) +{ + spin_lock(&g_uasid_spinlock); + if (uasid > 0) { + uint32_t ret = 0; + + if (test_bit(uasid, g_uasid_bitmap) == 0) { + set_bit(uasid, g_uasid_bitmap); + spin_unlock(&g_uasid_spinlock); + ret = uasid; + } else { + spin_unlock(&g_uasid_spinlock); + ubcore_log_err("uasid allocation failed.\n"); + return 0; + } + if (ret != 0) + return ret; + } + uasid = (uint32_t)find_first_zero_bit(g_uasid_bitmap, UBCORE_MAX_UASID); + if (uasid >= UBCORE_MAX_UASID) { + ubcore_log_err("uasid allocation failed.\n"); + spin_unlock(&g_uasid_spinlock); + return 0; + } + set_bit(uasid, g_uasid_bitmap); + spin_unlock(&g_uasid_spinlock); + return uasid; +} + +static int ubcore_uasid_free(uint32_t uasid) +{ + spin_lock(&g_uasid_spinlock); + if (uasid == 0) { + spin_unlock(&g_uasid_spinlock); + ubcore_log_err("uasid is zero.\n"); + return -EINVAL; + } + if (test_bit(uasid, g_uasid_bitmap) == false) { + spin_unlock(&g_uasid_spinlock); + ubcore_log_err("uasid is used.\n"); + return -EINVAL; + } + clear_bit(uasid, g_uasid_bitmap); + spin_unlock(&g_uasid_spinlock); + return 0; +} + +static int ubcore_cmd_set_uasid(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_set_uasid arg; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_set_uasid)); + if (ret != 0) + return -EPERM; + + arg.out.uasid = ubcore_uasid_alloc(arg.in.uasid); + if (arg.out.uasid == 0) { + ubcore_log_err("set uasid allocation failed, in_uasid: %u.\n", arg.in.uasid); + return -ENOMEM; + } + ubcore_log_info("set uasid allocation success, uasid: %u.\n", arg.out.uasid); + + ret = ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_set_uasid)); + if (ret != 0) + return -EPERM; + + return 0; +} + +static int ubcore_cmd_put_uasid(struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_put_uasid arg; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_put_uasid)); + if (ret != 0) + return -EPERM; + + if (ubcore_uasid_free(arg.in.uasid) != 0) + return -EINVAL; + + ubcore_log_info("put uasid free success, uasid: %u.\n", arg.in.uasid); + return 0; +} + +static int ubcore_cmd_query_stats(struct ubcore_cmd_hdr *hdr) +{ + enum ubcore_transport_type trans_type; + struct ubcore_cmd_query_stats arg = { 0 }; + struct ubcore_stats_com_val com_val; + struct ubcore_stats_key key = { 0 }; + struct ubcore_stats_val val; + struct ubcore_device *dev; + union ubcore_eid eid; + int ret; + + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)hdr->args_addr, + sizeof(struct ubcore_cmd_query_stats)); + if (ret != 0) + return ret; + + (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); + trans_type = (enum ubcore_transport_type)arg.in.tp_type; + dev = ubcore_find_device(&eid, trans_type); + if (dev == NULL || ubcore_check_dev_name_invalid(dev, arg.in.dev_name)) { + ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", + dev == NULL ? "NULL" : dev->dev_name, arg.in.dev_name); + return -EINVAL; + } + + key.type = (uint8_t)arg.in.type; + key.key = arg.in.key; + val.addr = (uint64_t)&com_val; + val.len = sizeof(struct ubcore_stats_com_val); + + ret = ubcore_query_stats(dev, &key, &val); + if (ret != 0) { + ubcore_put_device(dev); + return ret; + } + + ubcore_put_device(dev); + (void)memcpy(&arg.out, &com_val, sizeof(struct ubcore_stats_com_val)); + return ubcore_copy_to_user((void __user *)(uintptr_t)hdr->args_addr, &arg, + sizeof(struct ubcore_cmd_query_stats)); +} + +static int ubcore_cmd_parse(struct ubcore_cmd_hdr *hdr) +{ + switch (hdr->command) { + case UBCORE_CMD_SET_UASID: + return ubcore_cmd_set_uasid(hdr); + case UBCORE_CMD_PUT_UASID: + return ubcore_cmd_put_uasid(hdr); + case UBCORE_CMD_QUERY_STATS: + return ubcore_cmd_query_stats(hdr); + default: + ubcore_log_err("bad ubcore command: %d.\n", (int)hdr->command); + return -EINVAL; + } +} + +static long ubcore_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ubcore_cmd_hdr hdr; + int ret; + + if (cmd == UBCORE_CMD) { + ret = ubcore_copy_from_user(&hdr, (void *)arg, sizeof(struct ubcore_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBCORE_MAX_CMD_SIZE)) { + ubcore_log_err("length of ioctl input parameter is out of range.\n"); + return -EINVAL; + } + } else { + ubcore_log_err("bad ioctl command.\n"); + return -ENOIOCTLCMD; + } + return ubcore_cmd_parse(&hdr); +} + +static int ubcore_close(struct inode *i_node, struct file *filp) +{ + return 0; +} + +static const struct file_operations g_ubcore_ops = { + .owner = THIS_MODULE, + .open = ubcore_open, + .release = ubcore_close, + .unlocked_ioctl = ubcore_ioctl, + .compat_ioctl = ubcore_ioctl, +}; + +static int ubcore_register_sysfs(void) +{ + int ret; + + ret = alloc_chrdev_region(&g_ubcore_ctx.ubcore_devno, 0, 1, UBCORE_DEVICE_NAME); + if (ret != 0) { + ubcore_log_err("alloc chrdev region failed, ret:%d.\n", ret); + return ret; + } + + cdev_init(&g_ubcore_ctx.ubcore_cdev, &g_ubcore_ops); + ret = cdev_add(&g_ubcore_ctx.ubcore_cdev, g_ubcore_ctx.ubcore_devno, 1); + if (ret != 0) { + ubcore_log_err("chrdev add failed, ret:%d.\n", ret); + goto unreg_cdev_region; + } + + /* /sys/class/ubus/ubcore */ + g_ubcore_ctx.ubcore_class = class_create(THIS_MODULE, UBCORE_CLASS_NAME); + if (IS_ERR(g_ubcore_ctx.ubcore_class)) { + ret = (int)PTR_ERR(g_ubcore_ctx.ubcore_class); + ubcore_log_err("couldn't create class %s, ret:%d.\n", UBCORE_CLASS_NAME, ret); + goto del_cdev; + } + + /* /dev/ubcore */ + g_ubcore_ctx.ubcore_dev = + device_create(g_ubcore_ctx.ubcore_class, NULL, g_ubcore_ctx.ubcore_devno, NULL, + UBCORE_DEVICE_NAME); + if (IS_ERR(g_ubcore_ctx.ubcore_dev)) { + ret = (int)PTR_ERR(g_ubcore_ctx.ubcore_dev); + ubcore_log_err("couldn't create device %s, ret:%d.\n", UBCORE_DEVICE_NAME, ret); + goto destroy_class; + } + ubcore_log_info("ubcore device created success.\n"); + return 0; + +destroy_class: + class_destroy(g_ubcore_ctx.ubcore_class); +del_cdev: + cdev_del(&g_ubcore_ctx.ubcore_cdev); +unreg_cdev_region: + unregister_chrdev_region(g_ubcore_ctx.ubcore_devno, 1); + return ret; +} + +static void ubcore_unregister_sysfs(void) +{ + device_destroy(g_ubcore_ctx.ubcore_class, g_ubcore_ctx.ubcore_cdev.dev); + class_destroy(g_ubcore_ctx.ubcore_class); + cdev_del(&g_ubcore_ctx.ubcore_cdev); + unregister_chrdev_region(g_ubcore_ctx.ubcore_devno, 1); + ubcore_log_info("ubcore device destroyed success.\n"); +} + +static void ubcore_ipv4_to_netaddr(struct ubcore_net_addr *netaddr, __be32 ipv4) +{ + netaddr->net_addr.in4.resv1 = 0; + netaddr->net_addr.in4.resv2 = htonl(UBCORE_IPV4_MAP_IPV6_PREFIX); + netaddr->net_addr.in4.addr = ipv4; +} + +static void ubcore_set_net_addr(struct ubcore_device *dev, const struct ubcore_net_addr *netaddr) +{ + if (dev->ops->set_net_addr != NULL && dev->ops->set_net_addr(dev, netaddr) != 0) + ubcore_log_err("Failed to set net addr"); +} + +static void ubcore_unset_net_addr(struct ubcore_device *dev, const struct ubcore_net_addr *netaddr) +{ + if (dev->ops->unset_net_addr != NULL && dev->ops->unset_net_addr(dev, netaddr) != 0) + ubcore_log_err("Failed to unset net addr"); +} + +static void ubcore_update_eid(struct ubcore_device *dev, struct ubcore_net_addr *netaddr) +{ + if (dev->transport_type <= UBCORE_TRANSPORT_INVALID || + dev->transport_type >= UBCORE_TRANSPORT_MAX) + return; + + if (ubcore_set_eid(dev, (union ubcore_eid *)(void *)&netaddr->net_addr) != 0) + ubcore_log_warn("Failed to update eid"); +} + +static int ubcore_handle_inetaddr_event(struct net_device *netdev, unsigned long event, + struct ubcore_net_addr *netaddr) +{ + struct ubcore_device **devices; + struct ubcore_device *dev; + uint32_t num_devices = 0; + uint32_t i; + + if (netdev == NULL || netdev->reg_state >= NETREG_UNREGISTERING) + return NOTIFY_DONE; + + devices = ubcore_get_devices_from_netdev(netdev, &num_devices); + if (devices == NULL) + return NOTIFY_DONE; + + for (i = 0; i < num_devices; i++) { + dev = devices[i]; + switch (event) { + case NETDEV_UP: + ubcore_set_net_addr(dev, netaddr); + ubcore_update_eid(dev, netaddr); + break; + case NETDEV_DOWN: + ubcore_unset_net_addr(dev, netaddr); + break; + default: + break; + } + } + + ubcore_put_devices(devices, num_devices); + return NOTIFY_OK; +} + +static int ubcore_ipv6_notifier_call(struct notifier_block *nb, unsigned long event, void *arg) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; + struct ubcore_net_addr netaddr; + struct net_device *netdev; + + if (ifa == NULL || ifa->idev == NULL || ifa->idev->dev == NULL) + return NOTIFY_DONE; + + netdev = ifa->idev->dev; + ubcore_log_info("Get a ipv6 event %s from netdev %s%s ip %pI6c", netdev_cmd_to_name(event), + netdev_name(netdev), netdev_reg_state(netdev), &ifa->addr); + + (void)memcpy(&netaddr.net_addr, &ifa->addr, sizeof(struct in6_addr)); + return ubcore_handle_inetaddr_event(netdev, event, &netaddr); +} + +static int ubcore_ipv4_notifier_call(struct notifier_block *nb, unsigned long event, void *arg) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)arg; + struct ubcore_net_addr netaddr; + struct net_device *netdev; + + if (ifa == NULL || ifa->ifa_dev == NULL || ifa->ifa_dev->dev == NULL) + return NOTIFY_DONE; + + netdev = ifa->ifa_dev->dev; + ubcore_log_info("Get a ipv4 event %s netdev %s%s ip %pI4", netdev_cmd_to_name(event), + netdev_name(netdev), netdev_reg_state(netdev), &ifa->ifa_address); + + memset(&netaddr, 0, sizeof(struct ubcore_net_addr)); + ubcore_ipv4_to_netaddr(&netaddr, ifa->ifa_address); + return ubcore_handle_inetaddr_event(netdev, event, &netaddr); +} + +static void ubcore_add_ipv4_entry(struct list_head *list, __be32 ipv4) +{ + struct ubcore_net_addr_node *na_entry; + + na_entry = kzalloc(sizeof(struct ubcore_net_addr_node), GFP_ATOMIC); + + ubcore_ipv4_to_netaddr(&na_entry->addr, ipv4); + list_add_tail(&na_entry->node, list); +} + +static void ubcore_add_ipv6_entry(struct list_head *list, const struct in6_addr *ipv6) +{ + struct ubcore_net_addr_node *na_entry; + + na_entry = kzalloc(sizeof(struct ubcore_net_addr_node), GFP_ATOMIC); + if (na_entry == NULL) + return; + + (void)memcpy(&na_entry->addr.net_addr, ipv6, sizeof(struct in6_addr)); + list_add_tail(&na_entry->node, list); +} + +static void ubcore_netdev_get_ipv4(struct net_device *netdev, struct list_head *list) +{ + const struct in_ifaddr *ifa; + struct in_device *in_dev; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(netdev); + if (in_dev == NULL) { + rcu_read_unlock(); + return; + } + + in_dev_for_each_ifa_rcu(ifa, in_dev) { + if (ifa->ifa_flags & IFA_F_SECONDARY) + continue; + ubcore_add_ipv4_entry(list, ifa->ifa_address); + } + rcu_read_unlock(); +} + +static void ubcore_netdev_get_ipv6(struct net_device *netdev, struct list_head *list) +{ + const struct inet6_ifaddr *ifa; + struct inet6_dev *in_dev; + + in_dev = in6_dev_get(netdev); + if (in_dev == NULL) + return; + + read_lock_bh(&in_dev->lock); + list_for_each_entry(ifa, &in_dev->addr_list, if_list) { + if (ifa->flags & IFA_F_SECONDARY) + continue; + ubcore_add_ipv6_entry(list, (const struct in6_addr *)&ifa->addr); + } + read_unlock_bh(&in_dev->lock); + in6_dev_put(in_dev); +} + +void ubcore_set_default_eid(struct ubcore_device *dev) +{ + struct net_device *netdev = dev->netdev; + struct ubcore_net_addr_node *na_entry; + struct ubcore_net_addr_node *next; + LIST_HEAD(na_list); + + /* Do not modify eid if the driver already set default eid other than 0 */ + if (netdev == NULL || + !(dev->attr.eid.in6.interface_id == 0 && dev->attr.eid.in6.subnet_prefix == 0)) + return; + + ubcore_netdev_get_ipv4(netdev, &na_list); + list_for_each_entry_safe(na_entry, next, &na_list, node) { + ubcore_update_eid(dev, &na_entry->addr); + list_del(&na_entry->node); + kfree(na_entry); + } +} + +static void ubcore_update_netaddr(struct ubcore_device *dev, struct net_device *netdev, bool add) +{ + struct ubcore_net_addr_node *na_entry; + struct ubcore_net_addr_node *next; + LIST_HEAD(na_list); + + /* ipv4 */ + ubcore_netdev_get_ipv4(netdev, &na_list); + ubcore_netdev_get_ipv6(netdev, &na_list); + + list_for_each_entry_safe(na_entry, next, &na_list, node) { + if (add) + ubcore_set_net_addr(dev, &na_entry->addr); + else + ubcore_unset_net_addr(dev, &na_entry->addr); + + list_del(&na_entry->node); + kfree(na_entry); + } +} + +static int ubcore_add_netaddr(struct ubcore_device *dev, struct net_device *netdev) +{ + if (netdev->reg_state >= NETREG_UNREGISTERING) + return NOTIFY_DONE; + + ubcore_update_netaddr(dev, netdev, true); + return NOTIFY_OK; +} + +static int ubcore_remove_netaddr(struct ubcore_device *dev, struct net_device *netdev) +{ + if (netdev->reg_state >= NETREG_UNREGISTERED) + return NOTIFY_DONE; + + ubcore_update_netaddr(dev, netdev, false); + return NOTIFY_OK; +} + +static int ubcore_net_notifier_call(struct notifier_block *nb, unsigned long event, void *arg) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(arg); + struct ubcore_device **devices; + struct ubcore_device *dev; + uint32_t num_devices = 0; + uint32_t i; + + if (netdev == NULL) + return NOTIFY_DONE; + + devices = ubcore_get_devices_from_netdev(netdev, &num_devices); + if (devices == NULL) + return NOTIFY_DONE; + + ubcore_log_info("Get a net event %s from ubcore_dev %s%s", netdev_cmd_to_name(event), + netdev_name(netdev), netdev_reg_state(netdev)); + + for (i = 0; i < num_devices; i++) { + dev = devices[i]; + switch (event) { + case NETDEV_REGISTER: + case NETDEV_UP: + ubcore_add_netaddr(dev, netdev); + break; + case NETDEV_UNREGISTER: + case NETDEV_DOWN: + ubcore_remove_netaddr(dev, netdev); + break; + default: + break; + } + } + ubcore_put_devices(devices, num_devices); + return NOTIFY_OK; +} + +static struct notifier_block ubcore_ipv6_notifier = { + .notifier_call = ubcore_ipv6_notifier_call, +}; + +static struct notifier_block ubcore_ipv4_notifier = { + .notifier_call = ubcore_ipv4_notifier_call, +}; + +static struct notifier_block ubcore_net_notifier = { .notifier_call = ubcore_net_notifier_call }; + +static int ubcore_register_notifiers(void) +{ + int ret; + + ret = register_netdevice_notifier(&ubcore_net_notifier); + if (ret != 0) { + pr_err("Failed to register netdev notifier, ret = %d\n", ret); + return ret; + } + ret = register_inetaddr_notifier(&ubcore_ipv4_notifier); + if (ret != 0) { + (void)unregister_netdevice_notifier(&ubcore_net_notifier); + pr_err("Failed to register inetaddr notifier, ret = %d\n", ret); + return -1; + } + ret = register_inet6addr_notifier(&ubcore_ipv6_notifier); + if (ret != 0) { + (void)unregister_inetaddr_notifier(&ubcore_ipv4_notifier); + (void)unregister_netdevice_notifier(&ubcore_net_notifier); + pr_err("Failed to register inet6addr notifier, ret = %d\n", ret); + return -1; + } + return 0; +} + +static void ubcore_unregister_notifiers(void) +{ + (void)unregister_inet6addr_notifier(&ubcore_ipv6_notifier); + (void)unregister_inetaddr_notifier(&ubcore_ipv4_notifier); + (void)unregister_netdevice_notifier(&ubcore_net_notifier); +} + +static int __init ubcore_init(void) +{ + int ret; + + ret = ubcore_register_sysfs(); + if (ret != 0) + return ret; + + /* uasid is assigned from 1, and 0 means random value. + * so 0 consumed here first. + */ + bitmap_zero(g_uasid_bitmap, UBCORE_MAX_UASID); + set_bit(0, g_uasid_bitmap); + + if (ubcore_netlink_init() != 0) { + ubcore_unregister_sysfs(); + return -1; + } + + ret = ubcore_register_notifiers(); + if (ret != 0) { + pr_err("Failed to register notifiers\n"); + ubcore_unregister_sysfs(); + ubcore_netlink_exit(); + return -1; + } + ubcore_log_info("ubcore module init success.\n"); + return 0; +} + +static void __exit ubcore_exit(void) +{ + ubcore_unregister_notifiers(); + ubcore_netlink_exit(); + ubcore_unregister_sysfs(); + ubcore_log_info("ubcore module exits.\n"); +} + +module_init(ubcore_init); +module_exit(ubcore_exit); + +MODULE_DESCRIPTION("Kernel module for ubus"); +MODULE_AUTHOR("huawei"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.c b/drivers/ub/urma/ubcore/ubcore_netlink.c new file mode 100644 index 0000000000000000000000000000000000000000..15cdff268966536e0f36b42f999adb77cb3cbdc8 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netlink.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netlink module + * Author: Chen Wen, Yan Fangfang + * Create: 2022-08-27 + * Note: + * History: 2022-08-27: create file + */ + +#include +#include +#include +#include +#include "ubcore_log.h" +#include "ubcore_netlink.h" + +#define UBCORE_NL_TYPE 24 /* same with agent netlink type */ +#define UBCORE_NL_TIMEOUT 10000 /* 10s */ +#define UBCORE_NL_INVALID_PORT 0 + +struct sock *nl_sock; +static uint32_t g_agent_port = UBCORE_NL_INVALID_PORT; /* get agent pid */ + +static void ubcore_nl_cb_func(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + + nlh = nlmsg_hdr(skb); + if (nlmsg_len(nlh) < sizeof(struct ubcore_nlmsg) || skb->len < nlh->nlmsg_len) { + ubcore_log_err("Invalid nl msg received"); + return; + } + + switch (nlh->nlmsg_type) { + case UBCORE_NL_SET_AGENT_PID: + g_agent_port = nlh->nlmsg_pid; + break; + default: + ubcore_log_err("Unexpected nl msg type: %d received\n", nlh->nlmsg_type); + break; + } +} + +int ubcore_netlink_init(void) +{ + /* create netlink socket */ + struct netlink_kernel_cfg cfg = { .input = ubcore_nl_cb_func }; + + nl_sock = (struct sock *)netlink_kernel_create(&init_net, UBCORE_NL_TYPE, &cfg); + if (nl_sock == NULL) { + ubcore_log_err("Netlink_kernel_create error.\n"); + return -1; + } + return 0; +} + +void ubcore_netlink_exit(void) +{ + if (nl_sock != NULL) { + netlink_kernel_release(nl_sock); + nl_sock = NULL; + } +} diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.h b/drivers/ub/urma/ubcore/ubcore_netlink.h new file mode 100644 index 0000000000000000000000000000000000000000..b07ba64a67f022da3aacb692d33be0291816917b --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netlink.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netlink head file + * Author: Chen Wen + * Create: 2022-08-27 + * Note: + * History: 2022-08-27: Create file + */ + +#ifndef UBCORE_NETLINK_H +#define UBCORE_NETLINK_H + +#include +#include + +enum ubcore_nl_resp_status { UBCORE_NL_RESP_FAIL = -1, UBCORE_NL_RESP_SUCCESS = 0 }; + +enum ubcore_nlmsg_type { + UBCORE_NL_CREATE_TP_REQ = NLMSG_MIN_TYPE, /* 0x10 */ + UBCORE_NL_CREATE_TP_RESP, + UBCORE_NL_DESTROY_TP_REQ, + UBCORE_NL_DESTROY_TP_RESP, + UBCORE_NL_QUERY_TP_REQ, + UBCORE_NL_QUERY_TP_RESP, + UBCORE_NL_RESTORE_TP_REQ, + UBCORE_NL_RESTORE_TP_RESP, + UBCORE_NL_SET_AGENT_PID +}; + +struct ubcore_nlmsg { + uint32_t nlmsg_seq; + enum ubcore_nlmsg_type msg_type; + enum ubcore_transport_type transport_type; + union ubcore_eid src_eid; + union ubcore_eid dst_eid; + uint32_t payload_len; + uint8_t payload[0]; +} __packed; + +struct ubcore_nl_session { + struct ubcore_nlmsg *req; + struct ubcore_nlmsg *resp; + struct list_head node; + struct kref kref; + struct completion comp; /* Synchronization event of timeout sleep and thread wakeup */ +}; + +static inline uint32_t ubcore_nlmsg_len(struct ubcore_nlmsg *msg) +{ + return sizeof(struct ubcore_nlmsg) + msg->payload_len; +} + +int ubcore_netlink_init(void); +void ubcore_netlink_exit(void); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_priv.h b/drivers/ub/urma/ubcore/ubcore_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..28c78d000cb0b9e21d3853c5fa1dee0dfa9669d5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_priv.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore's private data structure and function declarations + * Author: Qian Guoxin + * Create: 2022-7-22 + * Note: + * History: 2022-7-22: Create file + */ + +#ifndef UBCORE_PRIV_H +#define UBCORE_PRIV_H + +#include +#include + +static inline bool ubcore_check_dev_name_invalid(struct ubcore_device *dev, char *dev_name) +{ + return (strcmp(dev->dev_name, dev_name) != 0); +} + +/* Caller must put device */ +struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, enum ubcore_transport_type type); +void ubcore_get_device(struct ubcore_device *dev); +void ubcore_put_device(struct ubcore_device *dev); + +/* Must call ubcore_put_devices to put and release the returned devices */ +struct ubcore_device **ubcore_get_devices_from_netdev(struct net_device *netdev, uint32_t *cnt); +void ubcore_put_devices(struct ubcore_device **devices, uint32_t cnt); +void ubcore_set_default_eid(struct ubcore_device *dev); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_tp.c b/drivers/ub/urma/ubcore/ubcore_tp.c new file mode 100644 index 0000000000000000000000000000000000000000..ca665a16a07f3f7efe31a13acd4f6c18e60412b0 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp implementation + * Author: Yan Fangfang + * Create: 2022-08-25 + * Note: + * History: 2022-08-25: Create file + */ + +#include +#include +#include +#include +#include +#include + +#define UB_PROTOCOL_HEAD_BYTES 313 +#define UB_MTU_BITS_BASE_SHIFT 7 + +static inline int ubcore_mtu_enum_to_int(enum ubcore_mtu mtu) +{ + return 1 << ((int)mtu + UB_MTU_BITS_BASE_SHIFT); +} + +enum ubcore_mtu ubcore_get_mtu(int mtu) +{ + mtu = mtu - UB_PROTOCOL_HEAD_BYTES; + + if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_8192)) + return UBCORE_MTU_8192; + if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_4096)) + return UBCORE_MTU_4096; + else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_2048)) + return UBCORE_MTU_2048; + else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_1024)) + return UBCORE_MTU_1024; + else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_512)) + return UBCORE_MTU_512; + else if (mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_256)) + return UBCORE_MTU_256; + else + return 0; +} +EXPORT_SYMBOL(ubcore_get_mtu); + +struct ubcore_tp *ubcore_create_vtp(struct ubcore_device *dev, const union ubcore_eid *remote_eid, + enum ubcore_transport_mode trans_mode, + struct ubcore_udata *udata) +{ + return NULL; +} +EXPORT_SYMBOL(ubcore_create_vtp); + +int ubcore_destroy_vtp(struct ubcore_tp *vtp) +{ + return -1; +} +EXPORT_SYMBOL(ubcore_destroy_vtp); diff --git a/drivers/ub/urma/ubcore/ubcore_umem.c b/drivers/ub/urma/ubcore/ubcore_umem.c new file mode 100644 index 0000000000000000000000000000000000000000..433c3e89116e8575cc1a7631a072a7d01cade713 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_umem.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore device add and remove ops file + * Author: Fan Yizhen + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: create file + */ + +#include +#include +#include +#include +#include +#include + +#include "ubcore_log.h" +#include + +static void umem_unpin_pages(struct ubcore_umem *umem, uint64_t nents) +{ + struct scatterlist *sg; + uint32_t i; + + for_each_sg(umem->sg_head.sgl, sg, nents, i) { + struct page *page = sg_page(sg); + + unpin_user_page(page); + } + sg_free_table(&umem->sg_head); +} + +static void umem_free_sgt(struct ubcore_umem *umem) +{ + umem_unpin_pages(umem, umem->sg_head.nents); +} + +static inline uint64_t umem_cal_npages(uint64_t va, uint64_t len) +{ + return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; +} + +static int umem_pin_pages(uint64_t cur_base, uint64_t npages, uint32_t gup_flags, + struct page **page_list) +{ + int pinned; + + pinned = pin_user_pages_fast(cur_base, + min_t(unsigned long, (unsigned long)npages, + PAGE_SIZE / sizeof(struct page *)), + gup_flags | FOLL_LONGTERM, page_list); + return pinned; +} + +static uint64_t umem_atomic_add(uint64_t npages, struct mm_struct *mm) +{ + uint64_t ret; + + ret = atomic64_add_return(npages, &mm->pinned_vm); + return ret; +} + +static void umem_atomic_sub(uint64_t npages, struct mm_struct *mm) +{ + atomic64_sub(npages, &mm->pinned_vm); +} + +static struct scatterlist *umem_sg_set_page(struct scatterlist *sg_start, int pinned, + struct page **page_list) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sg_start, sg, pinned, i) { + sg_set_page(sg, page_list[i], PAGE_SIZE, 0); + } + return sg; +} + +static int umem_add_new_pinned(struct ubcore_umem *umem, uint64_t npages) +{ + uint64_t lock_limit; + uint64_t new_pinned; + + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + new_pinned = umem_atomic_add(npages, umem->owning_mm); + if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { + ubcore_log_err("Npages to be pinned is greater than RLIMIT_MEMLOCK[%llu].\n", + lock_limit); + return -ENOMEM; + } + return 0; +} + +static uint64_t umem_pin_all_pages(struct ubcore_umem *umem, uint64_t npages, uint32_t gup_flags, + struct page **page_list) +{ + struct scatterlist *sg_list_start = umem->sg_head.sgl; + uint64_t cur_base = umem->va & PAGE_MASK; + uint64_t page_count = npages; + int pinned; + + while (page_count != 0) { + cond_resched(); + pinned = umem_pin_pages(cur_base, page_count, gup_flags, page_list); + if (pinned < 0) { + ubcore_log_err( + "Pin pages failed, cur_base: %llx, page_count: %llx, pinned: %d.\n", + cur_base, page_count, pinned); + return npages - page_count; + } + cur_base += (uint64_t)pinned * PAGE_SIZE; + page_count -= (uint64_t)pinned; + sg_list_start = umem_sg_set_page(sg_list_start, pinned, page_list); + } + return npages; +} + +static int umem_verify_input(const struct ubcore_device *ub_dev, uint64_t va, uint64_t len, + union ubcore_umem_flag flag) +{ + if (ub_dev == NULL || ((va + len) < va) || PAGE_ALIGN(va + len) < (va + len)) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + if (flag.bs.non_pin == 1) { + ubcore_log_err("Non-pin mode is not supported.\n"); + return -EINVAL; + } + if (can_do_mlock() == 0) + return -EPERM; + return 0; +} + +static int umem_dma_map(struct ubcore_umem *umem, uint64_t npages, unsigned long dma_attrs) +{ + int ret; + + ret = dma_map_sg_attrs(umem->ub_dev->dma_dev, umem->sg_head.sgl, npages, DMA_BIDIRECTIONAL, + dma_attrs); + if (ret == 0) { + ubcore_log_err("Dma map failed, ret: %d\n", ret); + return -ENOMEM; + } + umem->nmap += (uint32_t)ret; + return 0; +} + +static void ubcore_fill_umem(struct ubcore_umem *umem, struct ubcore_device *dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag) +{ + umem->ub_dev = dev; + umem->va = va; + umem->length = len; + umem->flag = flag; + umem->owning_mm = current->mm; + mmgrab(umem->owning_mm); +} + +static struct ubcore_umem *ubcore_get_target_umem(struct ubcore_device *dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag, + struct page **page_list) +{ + uint32_t gup_flags = (flag.bs.writable == 1) ? FOLL_WRITE : (FOLL_WRITE | FOLL_FORCE); + unsigned long dma_attrs = 0; + struct ubcore_umem *umem; + uint64_t npages; + uint64_t pinned; + int ret = 0; + + umem = kzalloc(sizeof(*umem), GFP_KERNEL); + if (umem == 0) { + ret = -ENOMEM; + goto out; + } + + ubcore_fill_umem(umem, dev, va, len, flag); + npages = umem_cal_npages(umem->va, umem->length); + if (npages == 0 || npages > UINT_MAX) { + ret = -EINVAL; + goto umem_kfree; + } + + ret = umem_add_new_pinned(umem, npages); + if (ret != 0) + goto sub_pinned_vm; + + ret = sg_alloc_table(&umem->sg_head, (unsigned int)npages, GFP_KERNEL); + if (ret != 0) + goto sub_pinned_vm; + + pinned = umem_pin_all_pages(umem, npages, gup_flags, page_list); + if (pinned != npages) { + ret = -ENOMEM; + goto umem_release; + } + + ret = umem_dma_map(umem, npages, dma_attrs); + if (ret != 0) + goto umem_release; + + goto out; + +umem_release: + umem_unpin_pages(umem, pinned); +sub_pinned_vm: + umem_atomic_sub(npages, umem->owning_mm); +umem_kfree: + mmdrop(umem->owning_mm); + kfree(umem); +out: + free_page((unsigned long)page_list); + return ret != 0 ? ERR_PTR(ret) : umem; +} + +struct ubcore_umem *ubcore_umem_get(struct ubcore_device *dev, uint64_t va, uint64_t len, + union ubcore_umem_flag flag) +{ + struct page **page_list; + int ret; + + ret = umem_verify_input(dev, va, len, flag); + if (ret < 0) + return ERR_PTR(ret); + + page_list = (struct page **)__get_free_page(GFP_KERNEL); + if (page_list == 0) + return ERR_PTR(-ENOMEM); + + return ubcore_get_target_umem(dev, va, len, flag, page_list); +} +EXPORT_SYMBOL(ubcore_umem_get); + +void ubcore_umem_release(struct ubcore_umem *umem) +{ + uint64_t npages; + + if (IS_ERR_OR_NULL(umem)) + return; + + npages = umem_cal_npages(umem->va, umem->length); + dma_unmap_sg(umem->ub_dev->dma_dev, umem->sg_head.sgl, umem->nmap, DMA_BIDIRECTIONAL); + umem_free_sgt(umem); + umem_atomic_sub(npages, umem->owning_mm); + mmdrop(umem->owning_mm); + kfree(umem); +} +EXPORT_SYMBOL(ubcore_umem_release); diff --git a/drivers/ub/urma/uburma/Makefile b/drivers/ub/urma/uburma/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..72038c480241f9fbce89b4a2343c03355fe7a86d --- /dev/null +++ b/drivers/ub/urma/uburma/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# + +uburma-objs := uburma_main.o \ + uburma_cdev_file.o + +obj-$(CONFIG_UB) += uburma.o diff --git a/drivers/ub/urma/uburma/uburma_cdev_file.c b/drivers/ub/urma/uburma/uburma_cdev_file.c new file mode 100644 index 0000000000000000000000000000000000000000..cfc317dab628c173b90491f2bb7ebbd966daa946 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cdev_file.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma cdev file + * Author: Qian Guoxin + * Create: 2022-08-16 + * Note: + * History: 2022-08-16: Create file + */ + +#include +#include +#include + +#include +#include + +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_cdev_file.h" + +#define UBURMA_MAX_DEV_NAME 64 +#define UBURMA_MAX_VALUE_LEN 24 + +/* callback information */ +typedef ssize_t (*uburma_show_attr_cb)(const struct ubcore_device *ubc_dev, char *buf); +typedef ssize_t (*uburma_store_attr_cb)(struct ubcore_device *ubc_dev, const char *buf, size_t len); + +static ssize_t uburma_show_dev_attr(struct device *dev, struct device_attribute *attr, char *buf, + uburma_show_attr_cb show_cb) +{ + struct uburma_device *ubu_dev = dev_get_drvdata(dev); + struct ubcore_device *ubc_dev; + ssize_t ret = -ENODEV; + int srcu_idx; + + if (!ubu_dev || !buf) { + uburma_log_err("Invalid argument.\n"); + return -EINVAL; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev) + ret = show_cb(ubc_dev, buf); + + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return ret; +} + +static ssize_t uburma_store_dev_attr(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len, uburma_store_attr_cb store_cb) +{ + struct uburma_device *ubu_dev = dev_get_drvdata(dev); + struct ubcore_device *ubc_dev; + ssize_t ret = -ENODEV; + int srcu_idx; + + if (!ubu_dev || !buf) { + uburma_log_err("Invalid argument with ubcore device nullptr.\n"); + return -EINVAL; + } + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev) + ret = store_cb(ubc_dev, buf, len); + + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return ret; +} + +/* interface for exporting device attributes */ +static ssize_t ubdev_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_DEV_NAME, "%s\n", ubc_dev->dev_name); +} + +static ssize_t ubdev_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, ubdev_show_cb); +} + +static DEVICE_ATTR_RO(ubdev); + +static ssize_t eid_show_cb(const struct ubcore_device *ubc_dev, char *buf) +{ + return snprintf(buf, (UBCORE_EID_STR_LEN + 1) + 1, EID_FMT "\n", + EID_ARGS(ubc_dev->attr.eid)); +} + +static ssize_t eid_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return uburma_show_dev_attr(dev, attr, buf, eid_show_cb); +} + +static int str_to_eid(const char *buf, size_t len, union ubcore_eid *eid) +{ + char *end; + int ret; + + if (buf == NULL || eid == NULL) { + uburma_log_err("Invalid argument\n"); + return -EINVAL; + } + + ret = in6_pton(buf, (int)len, (u8 *)eid, -1, (const char **)&end); + if (ret == 0) { + uburma_log_err("format error: %s.\n", buf); + return -EINVAL; + } + return 0; +} + +static ssize_t eid_store_cb(struct ubcore_device *ubc_dev, const char *buf, size_t len) +{ + union ubcore_eid eid; + ssize_t ret; + + if (str_to_eid(buf, len, &eid) != 0) { + uburma_log_err("failed to str_to_eid: %s, %lu.\n", buf, len); + return -EINVAL; + } + + ret = ubcore_set_eid(ubc_dev, &eid); + if (ret == 0) + ret = (int)len; // len is required for success return. + return ret; +} + +static ssize_t eid_store(struct device *dev, struct device_attribute *attr, const char *buf, + size_t len) +{ + return uburma_store_dev_attr(dev, attr, buf, len, eid_store_cb); +} + +static DEVICE_ATTR_RW(eid); // 0644 + +static struct attribute *uburma_dev_attrs[] = { + &dev_attr_ubdev.attr, + &dev_attr_eid.attr, + NULL, +}; + +static const struct attribute_group uburma_dev_attr_group = { + .attrs = uburma_dev_attrs, +}; + +int uburma_create_dev_attr_files(struct uburma_device *ubu_dev) +{ + int ret; + + ret = sysfs_create_group(&ubu_dev->dev->kobj, &uburma_dev_attr_group); + if (ret != 0) { + uburma_log_err("sysfs create group failed, ret:%d.\n", ret); + return -1; + } + + return 0; +} + +void uburma_remove_dev_attr_files(struct uburma_device *ubu_dev) +{ + sysfs_remove_group(&ubu_dev->dev->kobj, &uburma_dev_attr_group); +} diff --git a/drivers/ub/urma/uburma/uburma_cdev_file.h b/drivers/ub/urma/uburma/uburma_cdev_file.h new file mode 100644 index 0000000000000000000000000000000000000000..4207358f1f9a525947f6e3ad0718aac987f84905 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cdev_file.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma device file ops file + * Author: Qian Guoxin + * Create: 2022-8-16 + * Note: + * History: 2022-8-16: Create file + */ + +#ifndef UBURMA_CDEV_FILE_H +#define UBURMA_CDEV_FILE_H + +#include "uburma_types.h" + +int uburma_create_dev_attr_files(struct uburma_device *ubu_dev); +void uburma_remove_dev_attr_files(struct uburma_device *ubu_dev); + +#endif /* UBURMA_CDEV_FILE_H */ diff --git a/drivers/ub/urma/uburma/uburma_log.h b/drivers/ub/urma/uburma/uburma_log.h new file mode 100644 index 0000000000000000000000000000000000000000..c59f5de35eb83212b9a3e95b6133737b474f7482 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_log.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma log head file + * Author: Qian Guoxin + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + */ + +#ifndef UBURMA_LOG_H +#define UBURMA_LOG_H + +/* add log head info, "LogTag_UBURMA|function|[line]| */ +#define UBURMA_LOG_TAG "LogTag_UBURMA" +#define uburma_log(l, format, args...) \ + ((void)pr_##l("%s|%s:[%d]|" format, UBURMA_LOG_TAG, __func__, __LINE__, ##args)) + +#define uburma_log_info(...) uburma_log(info, __VA_ARGS__) + +#define uburma_log_err(...) uburma_log(err, __VA_ARGS__) + +#define uburma_log_warn(...) uburma_log(warn, __VA_ARGS__) + +#define uburma_log_debug(...) uburma_log(debug, __VA_ARGS__) + +#endif /* UBURMA_LOG_H */ diff --git a/drivers/ub/urma/uburma/uburma_main.c b/drivers/ub/urma/uburma/uburma_main.c new file mode 100644 index 0000000000000000000000000000000000000000..06c0c0c5b0393ea8a3f39597d60278b9b4369426 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_main.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma kernel module + * Author: Qian Guoxin + * Create: 2021-08-03 + * Note: + * History: 2021-08-03: Create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_cdev_file.h" + +#define UBURMA_MAX_DEVICE 1024 +#define UBURMA_DYNAMIC_MINOR_NUM UBURMA_MAX_DEVICE +#define UBURMA_MODULE_NAME "uburma" +#define UBURMA_DEVNODE_MODE (0666) + +static DECLARE_BITMAP(g_dev_bitmap, UBURMA_MAX_DEVICE); + +static dev_t g_dynamic_uburma_dev; +static struct class *g_uburma_class; + +static const struct file_operations g_uburma_fops = { + .owner = THIS_MODULE, + // .write = uburma_write, + .llseek = no_llseek, +}; + +static int uburma_add_device(struct ubcore_device *ubc_dev); +static void uburma_remove_device(struct ubcore_device *ubc_dev, void *client_ctx); +static struct ubcore_client g_urma_client = { + .list_node = LIST_HEAD_INIT(g_urma_client.list_node), + .client_name = "urma", + .add = uburma_add_device, + .remove = uburma_remove_device, +}; + +static void uburma_release_dev(struct kobject *kobj) +{ + struct uburma_device *ubu_dev = container_of(kobj, struct uburma_device, kobj); + + cleanup_srcu_struct(&ubu_dev->ubc_dev_srcu); + kfree(ubu_dev); +} + +static struct kobj_type uburma_dev_ktype = { + .release = uburma_release_dev, +}; + +static int uburma_get_devt(dev_t *devt) +{ + unsigned int devnum = (unsigned int)find_first_zero_bit(g_dev_bitmap, UBURMA_MAX_DEVICE); + + if (devnum >= UBURMA_MAX_DEVICE) { + uburma_log_err("Invalid argument.\n"); + return -ENOMEM; + } + set_bit(devnum, g_dev_bitmap); + *devt = g_dynamic_uburma_dev + devnum; + return 0; +} + +static int uburma_device_create(struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) +{ + /* create /dev/uburma/dev_name> */ + ubu_dev->dev = device_create(g_uburma_class, ubc_dev->dev.parent, ubu_dev->cdev.dev, + ubu_dev, "%s", ubc_dev->dev_name); + if (IS_ERR(ubu_dev->dev)) { + uburma_log_err("device create failed, device:%s.\n", ubc_dev->dev_name); + return -ENOMEM; + } + + if (uburma_create_dev_attr_files(ubu_dev) != 0) { + uburma_log_err("failed to fill attributes, device:%s.\n", ubc_dev->dev_name); + goto destroy_dev; + } + + return 0; + +destroy_dev: + device_destroy(g_uburma_class, ubu_dev->cdev.dev); + return -EPERM; +} + +static void uburma_device_destroy(struct uburma_device *ubu_dev, + const struct ubcore_device *ubc_dev) +{ + device_destroy(g_uburma_class, ubu_dev->cdev.dev); +} + +static int uburma_cdev_create(struct uburma_device *ubu_dev, struct ubcore_device *ubc_dev) +{ + dev_t base; + + if (uburma_get_devt(&base) != 0) { + uburma_log_err("Invalid argument.\n"); + return -ENOMEM; + } + ubu_dev->devnum = base - g_dynamic_uburma_dev; + + cdev_init(&ubu_dev->cdev, NULL); + ubu_dev->cdev.owner = THIS_MODULE; + ubu_dev->cdev.ops = &g_uburma_fops; + ubu_dev->cdev.kobj.parent = &ubu_dev->kobj; + (void)kobject_set_name(&ubu_dev->cdev.kobj, "%s", ubc_dev->dev_name); + + /* create /sys/class/uburma/dev_name> */ + if (cdev_add(&ubu_dev->cdev, base, 1)) + goto free_bit; + + if (uburma_device_create(ubu_dev, ubc_dev) != 0) { + uburma_log_err("device create failed, device:%s.\n", ubc_dev->dev_name); + goto del_cdev; + } + return 0; + +del_cdev: + cdev_del(&ubu_dev->cdev); +free_bit: + clear_bit(ubu_dev->devnum, g_dev_bitmap); + return -EPERM; +} + +static int uburma_add_device(struct ubcore_device *ubc_dev) +{ + struct uburma_device *ubu_dev; + int ret; + + ubu_dev = kzalloc(sizeof(struct uburma_device), GFP_KERNEL); + if (ubu_dev == NULL) + return -ENOMEM; + + ret = init_srcu_struct(&ubu_dev->ubc_dev_srcu); + if (ret != 0) { + kfree(ubu_dev); + return -EPERM; + } + + atomic_set(&ubu_dev->refcnt, 1); + init_completion(&ubu_dev->comp); + + /* cmd cnt and completion for ioctl and mmap cmds */ + atomic_set(&ubu_dev->cmdcnt, 1); + init_completion(&ubu_dev->cmddone); + + kobject_init(&ubu_dev->kobj, &uburma_dev_ktype); + mutex_init(&ubu_dev->lists_mutex); + INIT_LIST_HEAD(&ubu_dev->uburma_file_list); + + rcu_assign_pointer(ubu_dev->ubc_dev, ubc_dev); + ubu_dev->num_comp_vectors = ubc_dev->num_comp_vectors; + + if (uburma_cdev_create(ubu_dev, ubc_dev) != 0) { + uburma_log_err("can not create cdev.\n"); + goto err; + } + + ubcore_set_client_ctx_data(ubc_dev, &g_urma_client, ubu_dev); + return 0; + +err: + if (atomic_dec_and_test(&ubu_dev->refcnt)) + complete(&ubu_dev->comp); + + wait_for_completion(&ubu_dev->comp); + kfree(ubu_dev); + return -EPERM; +} + +static void uburma_remove_device(struct ubcore_device *ubc_dev, void *client_ctx) +{ + struct uburma_device *ubu_dev = client_ctx; + + if (ubu_dev == NULL) + return; + + uburma_device_destroy(ubu_dev, ubc_dev); + cdev_del(&ubu_dev->cdev); + clear_bit(ubu_dev->devnum, g_dev_bitmap); + + if (atomic_dec_and_test(&ubu_dev->refcnt)) + complete(&ubu_dev->comp); + + /* do not wait_for_completion(&ubu_dev->comp) */ + kobject_put(&ubu_dev->kobj); +} + +static void uburma_register_client(void) +{ + int ret; + + ret = ubcore_register_client(&g_urma_client); + if (ret != 0) + uburma_log_err("register client failed, ret: %d.\n", ret); + else + uburma_log_info("register client succeed.\n"); +} + +static void uburma_unregister_client(void) +{ + ubcore_unregister_client(&g_urma_client); + uburma_log_info("unregister client succeed.\n"); +} + +static char *uburma_devnode(struct device *dev, umode_t *mode) +{ + if (mode) + *mode = UBURMA_DEVNODE_MODE; + + return kasprintf(GFP_KERNEL, "uburma/%s", dev_name(dev)); +} + +static const void *uburma_net_namespace(struct device *dev) +{ + struct uburma_device *ubu_dev = dev_get_drvdata(dev); + struct ubcore_device *ubc_dev; + + if (ubu_dev == NULL) + return &init_net; + + ubc_dev = ubu_dev->ubc_dev; + + if (ubc_dev->netdev) + return dev_net(ubc_dev->netdev); + else + return &init_net; +} + +static int uburma_class_create(void) +{ + int ret; + + ret = alloc_chrdev_region(&g_dynamic_uburma_dev, 0, UBURMA_DYNAMIC_MINOR_NUM, + UBURMA_MODULE_NAME); + if (ret != 0) { + uburma_log_err("couldn't register dynamic device number.\n"); + goto out; + } + + /* create /sys/class/uburma */ + g_uburma_class = class_create(THIS_MODULE, UBURMA_MODULE_NAME); + if (IS_ERR(g_uburma_class)) { + ret = (int)PTR_ERR(g_uburma_class); + uburma_log_err("couldn't create class %s.\n", UBURMA_MODULE_NAME); + goto out_chrdev; + } + g_uburma_class->devnode = uburma_devnode; + g_uburma_class->ns_type = &net_ns_type_operations; + g_uburma_class->namespace = uburma_net_namespace; + /* + * to do class_create_file + */ + + return 0; +out_chrdev: + unregister_chrdev_region(g_dynamic_uburma_dev, UBURMA_DYNAMIC_MINOR_NUM); +out: + return ret; +} + +static void uburma_class_destroy(void) +{ + class_destroy(g_uburma_class); + unregister_chrdev_region(g_dynamic_uburma_dev, UBURMA_DYNAMIC_MINOR_NUM); +} + +static int __init uburma_init(void) +{ + int ret; + + ret = uburma_class_create(); + if (ret != 0) { + uburma_log_err("uburma dev create failed.\n"); + return ret; + } + + uburma_register_client(); + uburma_log_info("uburma module init success.\n"); + return 0; +} + +static void __exit uburma_exit(void) +{ + uburma_unregister_client(); + uburma_class_destroy(); + uburma_log_info("uburma module exits.\n"); +} + +module_init(uburma_init); +module_exit(uburma_exit); + +MODULE_DESCRIPTION("Kernel module for urma client"); +MODULE_AUTHOR("huawei"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ub/urma/uburma/uburma_types.h b/drivers/ub/urma/uburma/uburma_types.h new file mode 100644 index 0000000000000000000000000000000000000000..fb691e2b14d8cd399e43282063f19e9709f3a176 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_types.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Types definition provided by uburma + * Author: Qian Guoxin + * Create: 2021-8-4 + * Note: + * History: 2021-8-4: Create file + */ + +#ifndef UBURMA_TYPES_H +#define UBURMA_TYPES_H + +#include +#include +#include +#include +#include +#include + +#include + +struct uburma_device { + atomic_t refcnt; + struct completion comp; /* When refcnt becomes 0, it will wake up */ + atomic_t cmdcnt; /* number of unfinished ioctl and mmap cmds */ + struct completion cmddone; /* When cmdcnt becomes 0, cmddone will wake up */ + int num_comp_vectors; + unsigned int devnum; + struct cdev cdev; + struct device *dev; + struct ubcore_device *__rcu ubc_dev; + struct srcu_struct ubc_dev_srcu; /* protect ubc_dev */ + struct kobject kobj; /* when equal to 0 , free uburma_device. */ + struct mutex lists_mutex; /* protect lists */ + struct list_head uburma_file_list; +}; + +#endif /* UBURMA_TYPES_H */ diff --git a/include/urma/ubcore_api.h b/include/urma/ubcore_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3273fbb6cebb48b4831a42a0fc75619598bf1542 --- /dev/null +++ b/include/urma/ubcore_api.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: API definition provided by ubcore to ubep device driver + * Author: Qian Guoxin + * Create: 2022-1-25 + * Note: + * History: 2022-1-25: Create file + */ + +#ifndef UBCORE_API_H +#define UBCORE_API_H + +#include + +/** + * Register a device to ubcore + * @param[in] dev: the ubcore device; + * @return: 0 on success, other value on error + */ +int ubcore_register_device(struct ubcore_device *dev); +/** + * Unregister a device from ubcore + * @param[in] dev: the ubcore device; + */ +void ubcore_unregister_device(struct ubcore_device *dev); +/** + * Dispatch an asynchronous event to all registered handlers + * @param[in] event: asynchronous event; + */ +void ubcore_dispatch_async_event(struct ubcore_event *event); + +/** + * Allocate physical memory and do DMA mapping + * @param[in] dev: the ubcore device; + * @param[in] va: the VA address to be mapped. + * @param[in] len: Length of the address space to be allocated and mapped by DMA. + * @param[in] flag: Attribute flags + * Return: umem ptr on success, ERR_PTR on error + */ +struct ubcore_umem *ubcore_umem_get(struct ubcore_device *dev, uint64_t va, uint64_t len, + union ubcore_umem_flag flag); +/** + * Release umem allocated + * @param[in] umem: the ubcore umem created before + */ +void ubcore_umem_release(struct ubcore_umem *umem); + +/** + * Invoke create virtual tp on a PF device, called only by driver + * @param[in] dev: the ubcore device; + * @param[in] remote_eid: destination remote eid address of the tp to be created + * @param[in] trans_mode: transport mode of the tp to be created + * @param[in] udata: driver defined data + * @return: tp pointer on success, NULL on error + */ +struct ubcore_tp *ubcore_create_vtp(struct ubcore_device *dev, + const union ubcore_eid *remote_eid, + enum ubcore_transport_mode trans_mode, + struct ubcore_udata *udata); + +/** + * Invoke destroy virtual tp from a PF device, called only by driver + * @param[in] tp: the tp to be destroyed + * @return: 0 on success, other value on error + */ +int ubcore_destroy_vtp(struct ubcore_tp *vtp); + +/** + * Invoke get mtu value, called only by driver + * @param[in] mtu: specifies the MTU value of the NIC interface. + * @return: The MTU of the UB protocol, this value removes the length of the network layer, + * transport layer, transaction layer header and ICRC. + */ +enum ubcore_mtu ubcore_get_mtu(int mtu); + +#endif diff --git a/include/urma/ubcore_opcode.h b/include/urma/ubcore_opcode.h new file mode 100644 index 0000000000000000000000000000000000000000..515c7710755d33cd0ff66456032b4e35f833a816 --- /dev/null +++ b/include/urma/ubcore_opcode.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore opcode header file + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2023-2-28 + * Note: + * History: 2023-2-28: Create file + */ + +#ifndef UBCORE_OPCODE_H +#define UBCORE_OPCODE_H + +/* opcode definition */ +/* Must be consistent with urma_opcode_t */ +enum ubcore_opcode { + UBCORE_OPC_WRITE = 0x00, + UBCORE_OPC_WRITE_IMM = 0x01, + UBCORE_OPC_WRITE_NOTIFY = 0x02, // not support result + // will return for UBCORE_OPC_WRITE_NOTIFY + UBCORE_OPC_READ = 0x10, + UBCORE_OPC_CAS = 0x20, + UBCORE_OPC_FAA = 0x21, + UBCORE_OPC_CAS_WITH_MASK = 0x24, + UBCORE_OPC_FAA_WITH_MASK = 0x25, + UBCORE_OPC_SEND = 0x40, // remote JFR/jetty ID + UBCORE_OPC_SEND_IMM = 0x41, // remote JFR/jetty ID + UBCORE_OPC_SEND_INVALIDATE = 0x42, // remote JFR/jetty ID and seg token id + UBCORE_OPC_NOP = 0x51, + UBCORE_OPC_LAST +}; + +/* completion information */ +/* Must be consistent with urma_cr_status_t */ +enum ubcore_cr_status { // completion record status + UBCORE_CR_SUCCESS = 0, + UBCORE_CR_LOC_LEN_ERR, // Local data too long error + UBCORE_CR_LOC_OPERATION_ERR, // Local operation err + UBCORE_CR_LOC_PROTECTION_ERR, // Local memory protection error + UBCORE_CR_LOC_ACCESS_ERR, // Access to local memory error when WRITE_WITH_IMM + UBCORE_CR_REM_INVALID_REQ_ERR, + UBCORE_CR_REM_ACCESS_ERR, // Memory access protection error occurred in the remote node + UBCORE_CR_REM_OPERATION_ERR, + UBCORE_CR_RETRY_CNT_EXC_ERR, // Retransmission exceeds the maximum number of times + UBCORE_CR_RNR_RETRY_CNT_EXC_ERR, // RNR retries exceeded the maximum number: + // remote jfr has no buffer + UBCORE_CR_FATAL_ERR, + UBCORE_CR_WR_FLUSH_ERR, + UBCORE_CR_RESP_TIMEOUT_ERR, + UBCORE_CR_MORE_TO_POLL_ERR, + UBCORE_CR_GENERAL_ERR +}; + +/* Must be consistent with urma_cr_opcode_t */ +enum ubcore_cr_opcode { + UBCORE_CR_OPC_SEND = 0x00, + UBCORE_CR_OPC_SEND_WITH_IMM, + UBCORE_CR_OPC_SEND_WITH_INV, + UBCORE_CR_OPC_WRITE_WITH_IMM +}; + +#endif diff --git a/include/urma/ubcore_types.h b/include/urma/ubcore_types.h new file mode 100644 index 0000000000000000000000000000000000000000..73bae89574d6e48a82e62e832ddc5cf127ed95b4 --- /dev/null +++ b/include/urma/ubcore_types.h @@ -0,0 +1,1662 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Types definition provided by ubcore to client and ubep device + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + * History: 2021-11-23: Add segment and jetty management + */ + +#ifndef UBCORE_TYPES_H +#define UBCORE_TYPES_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define UBCORE_MAX_PORT_CNT 8 +#define UBCORE_MAX_VF_CNT 1024 +#define UBCORE_SEG_MAPPED 1 +#define UBCORE_MAX_DEV_NAME 64 +#define UBCORE_MAX_DRIVER_NAME 64 +#define UBCORE_HASH_TABLE_SIZE 64 +#define UBCORE_NET_ADDR_BYTES (16) +#define UBCORE_MAC_BYTES 6 +#define UBCORE_MAX_ATTR_GROUP 3 +#define UBCORE_EID_SIZE (16) +#define UBCORE_EID_STR_LEN (39) +#define EID_FMT \ + "%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x" +#define EID_UNPACK(...) __VA_ARGS__ +#define EID_RAW_ARGS(eid) EID_UNPACK(eid[0], eid[1], eid[2], eid[3], eid[4], eid[5], eid[6], \ + eid[7], eid[8], eid[9], eid[10], eid[11], eid[12], eid[13], eid[14], eid[15]) +#define EID_ARGS(eid) EID_RAW_ARGS((eid).raw) + +#define UBCORE_MAX_UPI_CNT 1000 +#define UBCORE_OWN_VF_ID (0xffff) + +enum ubcore_transport_type { + UBCORE_TRANSPORT_INVALID = -1, + UBCORE_TRANSPORT_UB, + UBCORE_TRANSPORT_IB, + UBCORE_TRANSPORT_IP, + UBCORE_TRANSPORT_MAX +}; + +#define UBCORE_ACCESS_LOCAL_WRITE (0x1 << 0) +#define UBCORE_ACCESS_REMOTE_READ (0x1 << 1) +#define UBCORE_ACCESS_REMOTE_WRITE (0x1 << 2) +#define UBCORE_ACCESS_REMOTE_ATOMIC (0x1 << 3) +#define UBCORE_ACCESS_REMOTE_INVALIDATE (0x1 << 4) + +union ubcore_eid { + uint8_t raw[UBCORE_EID_SIZE]; + struct { + uint64_t resv; + uint32_t prefix; + uint32_t addr; + } in4; + struct { + uint64_t subnet_prefix; + uint64_t interface_id; + } in6; +}; + +struct ubcore_ueid_cfg { + union ubcore_eid eid; + uint32_t upi; +}; + +struct ubcore_jetty_id { + union ubcore_eid eid; + uint32_t uasid; + uint32_t id; +}; + +struct ubcore_ubva { + union ubcore_eid eid; + uint32_t uasid; + uint64_t va; +} __packed; + +struct ubcore_ht_param { + uint32_t size; + uint32_t node_offset; /* offset of hlist node in the hash table object */ + uint32_t key_offset; + uint32_t key_size; + int (*cmp_f)(void *obj, const void *key); + void (*free_f)(void *obj); +}; + +struct ubcore_hash_table { + struct ubcore_ht_param p; + struct hlist_head *head; + spinlock_t lock; + struct kref kref; +}; + +union ubcore_jfc_flag { + struct { + uint32_t lock_free : 1; + uint32_t jfc_inline : 1; + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +union ubcore_jfs_flag { + struct { + /* 0: IDC_MODE. + * 1: DC_MODE. + * 2: LS_MODE + */ + uint32_t mode : 2; + uint32_t lock_free : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +union ubcore_jfr_flag { + struct { + uint32_t key_policy : 3; /* 0: UBCORE_KEY_NONE + * 1: UBCORE_KEY_PLAIN_TEXT + * 2: UBCORE_KEY_SIGNED + * 3: UBCORE_KEY_ALL_ENCRYPTED + * 4: UBCORE_KEY_RESERVED + */ + uint32_t tag_matching : 1; + uint32_t lock_free : 1; + uint32_t reserved : 27; + } bs; + uint32_t value; +}; + +enum ubcore_jfc_attr_mask { + UBCORE_JFC_MODERATE_COUNT = 0x1, + UBCORE_JFC_MODERATE_PERIOD = 0x1 << 1 +}; + +struct ubcore_jfc_attr { + uint32_t mask; /* mask value refer to enum ubcore_jfc_attr_mask */ + uint16_t moderate_count; + uint16_t moderate_period; /* in micro seconds */ +}; + +enum ubcore_jfc_state { + UBCORE_JFC_STATE_INVALID = 0, + UBCORE_JFC_STATE_VALID, + UBCORE_JFC_STATE_ERROR +}; + +enum ubcore_jetty_state { + UBCORE_JETTY_STATE_RESET = 0, + UBCORE_JETTY_STATE_READY, + UBCORE_JETTY_STATE_SUSPENDED, + UBCORE_JETTY_STATE_ERROR +}; + +struct ubcore_jfs_attr { + uint32_t mask; /* mask value refer to ubcore_jfs_attr_mask_t */ + enum ubcore_jetty_state state; +}; + +enum ubcore_jfr_attr_mask { UBCORE_JFR_RX_THRESHOLD = 0x1 }; + +struct ubcore_jfr_attr { + uint32_t mask; /* mask value refer to enum ubcore_jfr_attr_mask */ + uint32_t rx_threshold; +}; + +enum ubcore_jetty_attr_mask { UBCORE_JETTY_RX_THRESHOLD = 0x1 }; + +struct ubcore_jetty_attr { + uint32_t mask; /* mask value refer to enum ubcore_jetty_attr_mask */ + uint32_t rx_threshold; + enum ubcore_jetty_state state; +}; + +union ubcore_import_seg_flag { + struct { + uint32_t cacheable : 1; + uint32_t access : 6; + uint32_t mapping : 1; + uint32_t reserved : 24; + } bs; + uint32_t value; +}; + +union ubcore_reg_seg_flag { + struct { + uint32_t key_policy : 3; + uint32_t cacheable : 1; + uint32_t dsva : 1; + uint32_t access : 6; + uint32_t non_pin : 1; + uint32_t user_iova : 1; + uint32_t reserved : 19; + } bs; + uint32_t value; +}; + +struct ubcore_udrv_priv { + uint64_t in_addr; + uint32_t in_len; + uint64_t out_addr; + uint32_t out_len; +}; + +struct ubcore_ucontext { + struct ubcore_device *ub_dev; + uint32_t uasid; + void *jfae; /* jfae uobj */ + atomic_t use_cnt; +}; + +struct ubcore_udata { + struct ubcore_ucontext *uctx; + struct ubcore_udrv_priv *udrv_data; +}; + +struct ubcore_jfc; +typedef void (*ubcore_comp_callback_t)(struct ubcore_jfc *jfc); + +enum ubcore_event_type { + UBCORE_EVENT_JFC_ERR, + UBCORE_EVENT_JFS_FATAL, + UBCORE_EVENT_JFS_ACCESS_ERR, + UBCORE_EVENT_JFR_FATAL, + UBCORE_EVENT_JFR_ACCESS_ERR, + UBCORE_EVENT_JETTY_FATAL, + UBCORE_EVENT_JETTY_ACCESS_ERR, + UBCORE_EVENT_PORT_ACTIVE, + UBCORE_EVENT_PORT_ERR, + UBCORE_EVENT_DEV_FATAL, + UBCORE_EVENT_ID_CHANGE, + UBCORE_EVENT_TP_ERR +}; + +struct ubcore_event { + struct ubcore_device *ub_dev; + union { + struct ubcore_jfc *jfc; + struct ubcore_jfs *jfs; + struct ubcore_jfr *jfr; + struct ubcore_jetty *jetty; + struct ubcore_tp *tp; + uint32_t port_id; + } element; + enum ubcore_event_type event_type; +}; + +typedef void (*ubcore_event_callback_t)(struct ubcore_event *event, struct ubcore_ucontext *ctx); + +struct ubcore_event_handler { + void (*event_callback)(struct ubcore_event *event, struct ubcore_event_handler *handler); + struct list_head node; +}; + +struct ubcore_jfc_cfg { + uint32_t depth; + union ubcore_jfc_flag flag; + void *jfc_context; + uint32_t eq_id; +}; + +struct ubcore_jfc { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfc_cfg jfc_cfg; + uint32_t id; /* allocated by driver */ + ubcore_comp_callback_t jfce_handler; + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfc; /* user space jfc pointer */ + struct hlist_node hnode; + atomic_t use_cnt; +}; + +/* transport mode */ +enum ubcore_transport_mode { + UBCORE_TP_RM = 0x1, /* Reliable message */ + UBCORE_TP_RC = 0x1 << 1, /* Reliable connection */ + UBCORE_TP_UM = 0x1 << 2 /* Unreliable message */ +}; + +struct ubcore_jfs_cfg { + uint32_t depth; + union ubcore_jfs_flag flag; + uint8_t priority; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + void *jfs_context; + struct ubcore_jfc *jfc; + enum ubcore_transport_mode trans_mode; +}; + +struct ubcore_jfs { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfs_cfg jfs_cfg; + uint32_t id; /* allocted by driver */ + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfs; /* user space jfs pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ +}; + +struct ubcore_key { + uint32_t key; +}; + +struct ubcore_jfr_cfg { + uint32_t id; /* user may assign id */ + uint32_t depth; + union ubcore_jfr_flag flag; + uint8_t max_sge; + uint8_t min_rnr_timer; + enum ubcore_transport_mode trans_mode; + struct ubcore_jfc *jfc; + struct ubcore_key ukey; + void *jfr_context; +}; + +struct ubcore_jfr { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfr_cfg jfr_cfg; + uint32_t id; /* allocted by driver */ + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfr; /* user space jfr pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ +}; + +union ubcore_jetty_flag { + struct { + uint32_t share_jfr : 1; /* 0: URMA_NO_SHARE_JFR. + * 1: URMA_SHARE_JFR. + */ + uint32_t reserved : 31; + } bs; + uint32_t value; +}; + +struct ubcore_jetty_cfg { + uint32_t id; /* user may assign id */ + uint32_t jfs_depth; + uint32_t jfr_depth; + union ubcore_jetty_flag flag; + struct ubcore_jfc *send_jfc; + struct ubcore_jfc *recv_jfc; + struct ubcore_jfr *jfr; /* shared jfr */ + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint8_t max_recv_sge; + uint32_t max_inline_data; + uint8_t priority; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + uint8_t min_rnr_timer; + enum ubcore_transport_mode trans_mode; + struct ubcore_key ukey; + void *jetty_context; +}; + +struct ubcore_tjetty_cfg { + struct ubcore_jetty_id id; /* jfr, jetty or jetty group id to be imported */ + enum ubcore_transport_mode trans_mode; + struct ubcore_key ukey; /* jfr, jetty or jetty group ukey value to be imported */ +}; + +enum ubcore_target_type { UBCORE_JFR = 0, UBCORE_JETTY, UBCORE_JFR_GROUP, UBCORE_JETTY_GROUP }; + +struct ubcore_tjetty { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + enum ubcore_target_type type; + struct ubcore_tjetty_cfg cfg; + struct ubcore_tp *tp; /* for UB transport device */ + atomic_t use_cnt; + struct mutex lock; +}; + +struct ubcore_jetty { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jetty_cfg jetty_cfg; + uint32_t id; /* allocted by driver */ + struct ubcore_tjetty *remote_jetty; // bind to remote jetty + ubcore_event_callback_t jfae_handler; + uint64_t urma_jetty; /* user space jetty pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct ubcore_hash_table *tptable; /* Only for devices not natively supporting RM mode */ +}; + +struct ubcore_key_id { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + uint32_t key_id; + atomic_t use_cnt; +}; + +struct ubcore_seg_cfg { + uint64_t va; + uint64_t len; + struct ubcore_key_id *keyid; + struct ubcore_key ukey; + union ubcore_reg_seg_flag flag; + uint64_t iova; +}; + +union ubcore_seg_attr { + struct { + uint32_t key_policy : 3; + uint32_t cacheable : 1; + uint32_t dsva : 1; + uint32_t access : 6; + uint32_t non_pin : 1; + uint32_t user_iova : 1; + uint32_t reserved : 19; + } bs; + uint32_t value; +}; + +struct ubcore_seg { + struct ubcore_ubva ubva; + uint64_t len; + union ubcore_seg_attr attr; + uint32_t key_id; +}; + +struct ubcore_target_seg_cfg { + struct ubcore_seg seg; + union ubcore_import_seg_flag flag; + uint64_t mva; /* optional */ + struct ubcore_key ukey; +}; + +struct ubcore_target_seg { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_seg seg; + uint64_t mva; + struct ubcore_key_id *keyid; + atomic_t use_cnt; +}; + +enum ubcore_mtu { + UBCORE_MTU_256 = 1, + UBCORE_MTU_512, + UBCORE_MTU_1024, + UBCORE_MTU_2048, + UBCORE_MTU_4096, + UBCORE_MTU_8192 +}; + +enum ubcore_tp_cc_alg { + UBCORE_TP_CC_PFC = 0, + UBCORE_TP_CC_DCQCN, + UBCORE_TP_CC_DCQCN_AND_NETWORK_CC, + UBCORE_TP_CC_LDCP, + UBCORE_TP_CC_LDCP_AND_CAQM, + UBCORE_TP_CC_LDCP_AND_OPEN_CC, + UBCORE_TP_CC_HC3, + UBCORE_TP_CC_DIP +}; + +enum ubcore_congestion_ctrl_alg { + UBCORE_CC_PFC = 0x1 << UBCORE_TP_CC_PFC, + UBCORE_CC_DCQCN = 0x1 << UBCORE_TP_CC_DCQCN, + UBCORE_CC_DCQCN_AND_NETWORK_CC = 0x1 << UBCORE_TP_CC_DCQCN_AND_NETWORK_CC, + UBCORE_CC_LDCP = 0x1 << UBCORE_TP_CC_LDCP, + UBCORE_CC_LDCP_AND_CAQM = 0x1 << UBCORE_TP_CC_LDCP_AND_CAQM, + UBCORE_CC_LDCP_AND_OPEN_CC = 0x1 << UBCORE_TP_CC_LDCP_AND_OPEN_CC, + UBCORE_CC_HC3 = 0x1 << UBCORE_TP_CC_HC3, + UBCORE_CC_DIP = 0x1 << UBCORE_TP_CC_DIP +}; + +enum ubcore_speed { + UBCORE_SP_10M = 0, + UBCORE_SP_100M, + UBCORE_SP_1G, + UBCORE_SP_2_5G, + UBCORE_SP_5G, + UBCORE_SP_10G, + UBCORE_SP_14G, + UBCORE_SP_25G, + UBCORE_SP_40G, + UBCORE_SP_50G, + UBCORE_SP_100G, + UBCORE_SP_200G, + UBCORE_SP_400G, + UBCORE_SP_800G +}; + +enum ubcore_link_width { + UBCORE_LINK_X1 = 0x1, + UBCORE_LINK_X2 = 0x1 << 1, + UBCORE_LINK_X4 = 0x1 << 2, + UBCORE_LINK_X8 = 0x1 << 3, + UBCORE_LINK_X16 = 0x1 << 4, + UBCORE_LINK_X32 = 0x1 << 5 +}; + +enum ubcore_port_state { + UBCORE_PORT_NOP = 0, + UBCORE_PORT_DOWN, + UBCORE_PORT_INIT, + UBCORE_PORT_ARMED, + UBCORE_PORT_ACTIVE, + UBCORE_PORT_ACTIVE_DEFER +}; + +union ubcore_device_feat { + struct { + uint32_t oor : 1; + uint32_t jfc_per_wr : 1; + uint32_t stride_op : 1; + uint32_t load_store_op : 1; + uint32_t non_pin : 1; + uint32_t pmem : 1; + uint32_t jfc_inline : 1; + uint32_t spray_en : 1; + uint32_t selective_retrans : 1; + uint32_t reserved : 23; + } bs; + uint32_t value; +}; + +struct ubcore_port_status { + enum ubcore_port_state state; /* PORT_DOWN, PORT_INIT, PORT_ACTIVE */ + enum ubcore_speed active_speed; /* bandwidth */ + enum ubcore_link_width active_width; /* link width: X1, X2, X4 */ + enum ubcore_mtu active_mtu; +}; + +struct ubcore_device_status { + struct ubcore_port_status port_status[UBCORE_MAX_PORT_CNT]; +}; + +struct ubcore_port_attr { + enum ubcore_mtu max_mtu; /* MTU_256, MTU_512, MTU_1024 */ +}; + +struct ubcore_device_cap { + union ubcore_device_feat feature; + uint32_t max_jfc; + uint32_t max_jfs; + uint32_t max_jfr; + uint32_t max_jetty; + uint32_t max_jfc_depth; + uint32_t max_jfs_depth; + uint32_t max_jfr_depth; + uint32_t max_jfs_inline_size; + uint32_t max_jfs_sge; + uint32_t max_jfs_rsge; + uint32_t max_jfr_sge; + uint64_t max_msg_size; + uint64_t max_rc_outstd_cnt; /* max read command outstanding count in the function entity */ + uint16_t trans_mode; /* one or more from enum ubcore_transport_mode */ + uint16_t congestion_ctrl_alg; /* one or more mode from enum ubcore_congestion_ctrl_alg */ + uint16_t comp_vector_cnt; /* completion vector count */ + uint32_t utp_cnt; +}; + +struct ubcore_device_attr { + union ubcore_eid eid; // RW + uint32_t max_eid_cnt; + uint64_t guid; + uint32_t max_upi_cnt; + uint32_t upi[UBCORE_MAX_UPI_CNT]; // VF or PF own UPIs + struct ubcore_device_cap dev_cap; + uint8_t port_cnt; + struct ubcore_port_attr port_attr[UBCORE_MAX_PORT_CNT]; + bool virtualization; /* In VM or not, must set by driver when register device */ + uint16_t vf_cnt; /* PF: greater than or equal to 0; VF: must be 0 */ +}; + +union ubcore_device_cfg_mask { + struct { + uint32_t port_ets : 1; + uint32_t port_fec : 1; + } bs; + uint32_t value; +}; + +struct ubcore_congestion_control { + uint32_t data; +}; + +struct ubcore_port_ets { + uint32_t data; +}; + +struct ubcore_port_fec { + uint32_t data; +}; + +struct ubcore_device_cfg { + union ubcore_device_cfg_mask mask; + struct ubcore_port_fec fec; + struct ubcore_port_ets ets; +}; + +/* struct [struct ubcore_user_ctl_in] should be consistent with [urma_user_ctl_in_t] */ +struct ubcore_user_ctl_in { + uint64_t addr; + uint32_t len; + uint32_t opcode; +}; + +/* struct [struct ubcore_user_ctl_out] should be consistent with [urma_user_ctl_out_t] */ +struct ubcore_user_ctl_out { + uint64_t addr; + uint32_t len; + uint32_t rsv; +}; + +struct ubcore_user_ctl { + struct ubcore_ucontext *uctx; + struct ubcore_user_ctl_in in; + struct ubcore_user_ctl_out out; + struct ubcore_udrv_priv udrv_data; +}; + +struct ubcore_net_addr { + union { + uint8_t raw[UBCORE_NET_ADDR_BYTES]; + struct { + uint64_t resv1; + uint32_t resv2; + uint32_t addr; + } in4; + struct { + uint64_t subnet_prefix; + uint64_t interface_id; + } in6; + } net_addr; + uint64_t vlan; /* available for UBOE */ + uint8_t mac[UBCORE_MAC_BYTES]; /* available for UBOE */ +}; + +union ubcore_tp_cfg_flag { + struct { + uint32_t target : 1; /* 0: initiator, 1: target */ + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ + uint32_t reserved : 27; + } bs; + uint32_t value; +}; + +union ubcore_tp_mod_flag { + struct { + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t cc_alg : 4; /* The value is enum ubcore_tp_cc_alg */ + uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ + uint32_t reserved : 24; + } bs; + uint32_t value; +}; + +/* The first bits must be consistent with union ubcore_tp_cfg_flag */ +union ubcore_tp_flag { + struct { + uint32_t target : 1; /* 0: initiator, 1: target */ + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t cc_alg : 4; /* The value is enum ubcore_tp_cc_alg */ + uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ + uint32_t reserved : 23; + } bs; + uint32_t value; +}; + +enum ubcore_tp_state { + UBCORE_TP_STATE_RESET = 0, + UBCORE_TP_STATE_RTR, + UBCORE_TP_STATE_RTS, + UBCORE_TP_STATE_ERROR +}; + +enum ubcore_ta_type { + UBCORE_TA_NONE = 0, + UBCORE_TA_JFS_TJFR, + UBCORE_TA_JETTY_TJETTY, + UBCORE_TA_VIRT /* virtualization */ +}; + +struct ubcore_ta { + enum ubcore_ta_type type; + union { + struct ubcore_jfs *jfs; + struct ubcore_jfr *jfr; + struct ubcore_jetty *jetty; + }; + struct ubcore_jetty_id tjetty_id; /* peer jetty id */ +}; + +struct ubcore_tp_cfg { + struct ubcore_ta *ta; /* NULL for UB device */ + union ubcore_tp_cfg_flag flag; /* indicate initiator or target, etc */ + struct ubcore_net_addr local_net_addr; + struct ubcore_net_addr peer_net_addr; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + enum ubcore_transport_mode trans_mode; + uint32_t rx_psn; + enum ubcore_mtu mtu; + uint16_t data_udp_start; /* src udp port start, for multipath data */ + uint16_t ack_udp_start; /* src udp port start, for multipath ack */ + uint8_t udp_range; /* src udp port range, for both multipath data and ack */ + uint8_t retry_num; + uint8_t ack_timeout; + uint8_t tc; /* traffic class */ +}; + +struct ubcore_tp_ext { + uint64_t addr; + uint32_t len; +}; + +union ubcore_tp_attr_mask { + struct { + uint32_t flag : 1; + uint32_t peer_tpn : 1; + uint32_t state : 1; + uint32_t tx_psn : 1; + uint32_t rx_psn : 1; /* modify both rx psn and tx psn when restore tp */ + uint32_t mtu : 1; + uint32_t cc_pattern_idx : 1; + uint32_t peer_ext : 1; + uint32_t reserved : 24; + } bs; + uint32_t value; +}; + +struct ubcore_tp_attr { + union ubcore_tp_mod_flag flag; + uint32_t peer_tpn; + enum ubcore_tp_state state; + uint32_t tx_psn; + uint32_t rx_psn; /* modify both rx psn and tx psn when restore tp */ + enum ubcore_mtu mtu; + uint8_t cc_pattern_idx; + struct ubcore_tp_ext peer_ext; +}; + +struct ubcore_tp { + uint32_t tpn; /* driver assgined in creating tp */ + uint32_t peer_tpn; + struct ubcore_device *ub_dev; + union ubcore_tp_flag flag; /* indicate initiator or target, etc */ + struct ubcore_net_addr local_net_addr; + struct ubcore_net_addr peer_net_addr; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + enum ubcore_transport_mode trans_mode; + enum ubcore_tp_state state; + uint32_t rx_psn; + uint32_t tx_psn; + enum ubcore_mtu mtu; + uint16_t data_udp_start; /* src udp port start, for multipath data */ + uint16_t ack_udp_start; /* src udp port start, for multipath ack */ + uint8_t udp_range; /* src udp port range, for both multipath data and ack */ + uint8_t retry_num; + uint8_t ack_timeout; + uint8_t tc; /* traffic class */ + uint8_t cc_pattern_idx; + struct ubcore_tp_ext tp_ext; /* driver fill in creating tp */ + struct ubcore_tp_ext peer_ext; /* ubcore fill before modifying tp */ + atomic_t use_cnt; + void *priv; /* ubcore private data for tp management */ +}; + +enum ubcore_res_key_type { + UBCORE_RES_KEY_UPI = 1, // key id: UPI ID + UBCORE_RES_KEY_TP, // key id: TPN + UBCORE_RES_KEY_TPG, // key id: TPGN, currently not supported + UBCORE_RES_KEY_UTP, // key id: UTP ID + UBCORE_RES_KEY_JFS, // key id: JFS ID + UBCORE_RES_KEY_JFR, // key id: JFR ID + UBCORE_RES_KEY_JETTY, // key id: JETTY ID + UBCORE_RES_KEY_JETTY_GROUP, // key id: JETTY GROUP ID, currently not supported + UBCORE_RES_KEY_JFC, // key id: JFC ID + UBCORE_RES_KEY_SEG, // key id: UKEY ID + UBCORE_RES_KEY_URMA_DEV // key id: EID +}; + +struct ubcore_res_upi_val { + uint32_t upi; +}; + +struct ubcore_res_tp_val { + uint32_t tpn; + uint32_t psn; + uint8_t pri; + uint8_t oor; + uint8_t state; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint8_t udp_range; + uint32_t spray_en; +}; + +struct ubcore_res_tpg_val { + uint32_t tp_cnt; + uint8_t pri; + uint32_t *tp_list; +}; + +struct ubcore_res_utp_val { + uint8_t utp_id; + uint8_t spray_en; + uint16_t data_udp_start; + uint8_t udp_range; +}; + +struct ubcore_res_jfs_val { + uint32_t jfs_id; + uint8_t state; + uint32_t depth; + uint8_t pri; + uint32_t jfc_id; +}; + +struct ubcore_res_jfr_val { + uint32_t jfr_id; + uint8_t state; + uint32_t depth; + uint8_t pri; + uint32_t jfc_id; +}; + +struct ubcore_res_jetty_val { + uint32_t jetty_id; + uint32_t send_jfc_id; + uint32_t recv_jfc_id; + uint32_t jfr_id; + uint32_t jfs_depth; + uint32_t jfr_depth; + uint8_t state; + uint8_t pri; +}; + +struct ubcore_res_jetty_group_val { + uint16_t jetty_cnt; + uint32_t *jetty_list; +}; + +struct ubcore_res_jfc_val { + uint32_t jfc_id; + uint8_t state; + uint32_t depth; +}; + +struct ubcore_res_seg_val { + struct ubcore_ubva ubva; + uint64_t len; + uint32_t key_id; + struct ubcore_key ukey; +}; + +struct ubcore_seg_info { + struct ubcore_ubva ubva; + uint64_t len; + uint32_t key_id; +}; + +struct ubcore_res_dev_val { + uint32_t seg_cnt; + struct ubcore_seg_info *seg_list; // SEG key_id list + uint32_t jfs_cnt; + uint32_t *jfs_list; // JFS ID list + uint32_t jfr_cnt; + uint32_t *jfr_list; // JFR ID list + uint32_t jfc_cnt; + uint32_t *jfc_list; // JFC ID list + uint32_t jetty_cnt; + uint32_t *jetty_list; // Jetty ID list + uint32_t jetty_group_cnt; + uint32_t *jetty_group_list; // Jetty group ID list + uint32_t tp_cnt; + uint32_t *tp_list; // RC + uint32_t tpg_cnt; + uint32_t *tpg_list; // RM + uint32_t utp_cnt; + uint32_t *utp_list; // UM +}; + +struct ubcore_res_key { + uint8_t type; /* refer to enum struct ubcore_res_key_type */ + uint32_t key; /* as UPI, key is vf_id */ +}; + +struct ubcore_res_val { + uint64_t addr; /* allocated and free by ubcore */ + uint32_t len; /* in&out. As a input parameter, + * it indicates the length allocated by the ubcore + * As a output parameter, it indicates the actual data length. + */ +}; + +union ubcore_jfs_wr_flag { + struct { + uint32_t place_order : 2; /* 0: There is no order with other WR. + * 1: relax order. + * 2: strong order. + * 3: reserve. + */ + uint32_t comp_order : 1; /* 0: There is no completion order with other WR + * 1: Completion order with previous WR. + */ + + uint32_t fence : 1; /* 0: There is no fence. + * 1: Fence with previous read and atomic WR + */ + uint32_t solicited_enable : 1; /* 0: not solicited. + * 1: solicited. It will trigger an event + * on remote side + */ + uint32_t complete_enable : 1; /* 0: Do not notify local process + * after the task is complete. + * 1: Notify local process + * after the task is completed. + */ + uint32_t inline_flag : 1; /* 0: No inline. + * 1: Inline data. + */ + uint32_t reserved : 25; + } bs; + uint32_t value; +}; + +struct ubcore_sge { + uint64_t addr; + uint32_t len; + struct ubcore_target_seg *tseg; +}; + +struct ubcore_sg { + struct ubcore_sge *sge; + uint32_t num_sge; +}; + +struct ubcore_rw_wr { + struct ubcore_sg src; + struct ubcore_sg dst; + struct ubcore_tjetty *tjetty; /* For write imm */ + uint64_t notify_data; /* notify data or immeditate data in host byte order */ +}; + +struct ubcore_send_wr { + struct ubcore_sg src; + struct ubcore_tjetty *tjetty; + uint8_t target_hint; /* hint of jetty in a target jetty group */ + uint64_t imm_data; /* immeditate data in host byte order */ + struct ubcore_target_seg *tseg; /* Used only when send with invalidate */ +}; + +struct ubcore_cas_wr { + struct ubcore_sge *dst; /* len must be less or equal to 8 Bytes */ + struct ubcore_sge *src; /* Local address for destination original value written back */ + uint64_t cmp_data; /* Value compared with destination value */ + uint64_t swap_data; /* If destination value is the same as cmp_data, + * destination value will be change to swap_data + */ +}; + +struct ubcore_cas_mask_wr { + struct ubcore_cas_wr cas; + uint64_t cmp_msk; + uint64_t swap_msk; +}; + +struct ubcore_faa_wr { + struct ubcore_sge *dst; /* len in the sge is the length of faa at remote side */ + struct ubcore_sge *src; /* Local address for destination original value written back */ + uint64_t operand; /* Addend */ +}; + +struct ubcore_faa_mask_wr { + struct ubcore_faa_wr faa; + uint64_t msk; +}; + +struct ubcore_jfs_wr { + enum ubcore_opcode opcode; + union ubcore_jfs_wr_flag flag; + uintptr_t user_ctx; + union { + struct ubcore_rw_wr rw; + struct ubcore_send_wr send; + struct ubcore_cas_wr cas; + struct ubcore_cas_mask_wr cas_mask; + struct ubcore_faa_wr faa; + struct ubcore_faa_mask_wr faa_mask; + }; + struct ubcore_jfs_wr *next; +}; + +struct ubcore_jfr_wr { + struct ubcore_sg src; + uintptr_t user_ctx; + struct ubcore_jfr_wr *next; +}; + +union ubcore_cr_flag { + struct { + uint8_t inline_flag : 1; /* Indicate CR contains inline data or not */ + uint8_t s_r : 1; /* Indicate CR stands for sending or receiving */ + uint8_t jetty : 1; /* Indicate local_id or remote_id + * in the CR stands for jetty or JFS/JFR + */ + } bs; + uint8_t value; +}; + +struct ubcore_cr { + enum ubcore_cr_status status; + uintptr_t user_ctx; + enum ubcore_cr_opcode opcode; + union ubcore_cr_flag flag; + uint32_t completion_len; /* The number of bytes transferred */ + uint32_t local_id; /* Local jetty ID, or JFS ID, or JFR ID, depending on flag */ + struct ubcore_jetty_id remote_id; /* Valid only for receiving CR. + * The remote jetty where received msg comes from, + * may be jetty ID or JFS ID, depending on flag + */ + uint64_t imm_data; /* Valid only for received CR */ + uint32_t tpn; + uintptr_t user_data; /* Use as pointer to local jetty struct */ +}; + +enum ubcore_stats_key_type { + UBCORE_STATS_KEY_TP = 1, + UBCORE_STATS_KEY_TPG = 2, + UBCORE_STATS_KEY_JFS = 3, + UBCORE_STATS_KEY_JFR = 4, + UBCORE_STATS_KEY_JETTY = 5, + UBCORE_STATS_KEY_JETTY_GROUP = 6 +}; + +struct ubcore_stats_key { + uint8_t type; /* stats type, refer to enum ubcore_stats_key_type */ + uint32_t key; /* key can be tpn/tpgn/jetty_id/token_id/ctx_id/etc */ +}; + +struct ubcore_stats_com_val { + uint64_t tx_pkt; + uint64_t rx_pkt; + uint64_t tx_bytes; + uint64_t rx_bytes; + uint64_t tx_pkt_err; + uint64_t rx_pkt_err; +}; + +struct ubcore_stats_val { + uint64_t addr; /* this addr is alloc and free by ubcore, + * refer to struct ubcore_stats_com_val + */ + + uint32_t len; /* [in/out] real length filled when success + * to query and buffer length enough; + * expected length filled and return failure when buffer length not enough + */ +}; + +union ubcore_utp_mod_flag { + struct { + uint32_t spray_en : 1; // Whether to enable end-side port number hashing, + // 0 : disabled, 1 : enabled + uint32_t reserved : 31; + } bs; + uint32_t value; +}; + +struct ubcore_utp_attr { + union ubcore_utp_mod_flag flag; + uint16_t data_udp_start; + uint8_t udp_range; +}; + +union ubcore_utp_attr_mask { + struct { + uint32_t flag : 1; + uint32_t udp_port : 1; + uint32_t udp_range : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +struct ubcore_ops { + struct module *owner; /* kernel driver module */ + char driver_name[UBCORE_MAX_DRIVER_NAME]; /* user space driver name */ + uint32_t abi_version; /* abi version of kernel driver */ + /** + * set function entity id for ub device. must be called before alloc context + * @param[in] dev: the ub device handle; + * @param[in] eid: function entity id (eid) to set; + * @return: 0 on success, other value on error + */ + int (*set_eid)(struct ubcore_device *dev, union ubcore_eid eid); + /** + * set upi + * @param[in] dev: the ub device handle; + * @param[in] vf_id: vf_id; + * @param[in] idx: idx of upi in vf; + * @param[in] upi: upi of vf to set + * @return: 0 on success, other value on error + */ + int (*set_upi)(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx, uint32_t upi); + /** + * add a function entity id (eid) to ub device + * @param[in] dev: the ubcore_device handle; + * @param[in] eid: function entity id (eid) to be added; + * @return: the index of eid, less than 0 indicating error + */ + int (*add_eid)(struct ubcore_device *dev, const union ubcore_eid *eid); + /** + * remove a function entity id (eid) specified by idx from ub device + * @param[in] dev: the ubcore_device handle; + * @param[in] idx: the idx of function entity id (eid) to be deleted; + * @return: 0 on success, other value on error + */ + int (*delete_eid_by_idx)(struct ubcore_device *dev, uint16_t idx); + /** + * add a function entity id (eid) to ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] vf_id: vf_id; + * @param[in] cfg: eid and the upi of vf to which the eid belongs can be specified; + * @return: the index of eid/upi, less than 0 indicating error + */ + int (*add_ueid)(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_ueid_cfg *cfg); + /** + * remove a function entity id (eid) specified by idx from ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] vf_id: vf_id; + * @param[in] idx: the idx of function entity id (eid) to be deleted; + * @return: 0 on success, other value on error + */ + int (*delete_ueid_by_idx)(struct ubcore_device *dev, uint16_t vf_id, uint16_t idx); + /** + * query device attributes + * @param[in] dev: the ub device handle; + * @param[out] attr: attributes for the driver to fill in + * @return: 0 on success, other value on error + */ + int (*query_device_attr)(struct ubcore_device *dev, struct ubcore_device_attr *attr); + /** + * query device status + * @param[in] dev: the ub device handle; + * @param[out] status: status for the driver to fill in + * @return: 0 on success, other value on error + */ + int (*query_device_status)(const struct ubcore_device *dev, + struct ubcore_device_status *status); + /** + * query resource + * @param[in] dev: the ub device handle; + * @param[in] key: resource type and key; + * @param[in/out] val: addr and len of value + * @return: 0 on success, other value on error + */ + int (*query_res)(const struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val); + /** + * config device + * @param[in] dev: the ub device handle; + * @param[in] cfg: device configuration + * @return: 0 on success, other value on error + */ + int (*config_device)(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg); + /** + * set ub network address + * @param[in] dev: the ub device handle; + * @param[in] net_addr: net_addr to set + * @return: 0 on success, other value on error + */ + int (*set_net_addr)(struct ubcore_device *dev, const struct ubcore_net_addr *net_addr); + /** + * unset ub network address + * @param[in] dev: the ub device handle; + * @param[in] net_addr: net_addr to unset + * @return: 0 on success, other value on error + */ + int (*unset_net_addr)(struct ubcore_device *dev, const struct ubcore_net_addr *net_addr); + /** + * allocate a context from ubep for a user process + * @param[in] dev: the ub device handle; + * @param[in] uasid: uasid for the context to be allocated + * @param[in] udrv_data: user space driver data + * @return: pointer to user context on success, null or error, + */ + struct ubcore_ucontext *(*alloc_ucontext)(struct ubcore_device *dev, uint32_t uasid, + struct ubcore_udrv_priv *udrv_data); + /** + * free a context to ubep + * @param[in] uctx: the user context created before; + * @return: 0 on success, other value on error + */ + int (*free_ucontext)(struct ubcore_ucontext *uctx); + /** + * mmap doorbell or jetty buffer, etc + * @param[in] uctx: the user context created before; + * @param[in] vma: linux vma including vm_start, vm_pgoff, etc; + * @return: 0 on success, other value on error + */ + int (*mmap)(struct ubcore_ucontext *ctx, struct vm_area_struct *vma); + + /* segment part */ + /** alloc key id to ubep + * @param[in] dev: the ub device handle; + * @param[in] udata: ucontext and user space driver data + * @return: key id pointer on success, NULL on error + */ + struct ubcore_key_id *(*alloc_key_id)(struct ubcore_device *dev, + struct ubcore_udata *udata); + + /** free key id from ubep + * @param[in] key_id: the key id alloced before; + * @return: 0 on success, other value on error + */ + int (*free_key_id)(struct ubcore_key_id *key_id); + + /** register segment to ubep + * @param[in] dev: the ub device handle; + * @param[in] cfg: segment attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: target segment pointer on success, NULL on error + */ + struct ubcore_target_seg *(*register_seg)(struct ubcore_device *dev, + const struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); + + /** unregister segment from ubep + * @param[in] tseg: the segment registered before; + * @return: 0 on success, other value on error + */ + int (*unregister_seg)(struct ubcore_target_seg *tseg); + + /** import a remote segment to ubep + * @param[in] dev: the ub device handle; + * @param[in] cfg: segment attributes and import configurations + * @param[in] udata: ucontext and user space driver data + * @return: target segment handle on success, NULL on error + */ + struct ubcore_target_seg *(*import_seg)(struct ubcore_device *dev, + const struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); + + /** unimport seg from ubep + * @param[in] tseg: the segment imported before; + * @return: 0 on success, other value on error + */ + int (*unimport_seg)(struct ubcore_target_seg *tseg); + + /* jetty part */ + /** + * create jfc with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jfc attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jfc pointer on success, NULL on error + */ + struct ubcore_jfc *(*create_jfc)(struct ubcore_device *dev, + const struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata); + + /** + * modify jfc from ubep. + * @param[in] jfc: the jfc created before; + * @param[in] attr: ubcore jfc attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jfc)(struct ubcore_jfc *jfc, const struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata); + + /** + * destroy jfc from ubep. + * @param[in] jfc: the jfc created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jfc)(struct ubcore_jfc *jfc); + + /** + * rearm jfc. + * @param[in] jfc: the jfc created before; + * @param[in] solicited_only: rearm notify by message marked with solicited flag + * @return: 0 on success, other value on error + */ + int (*rearm_jfc)(struct ubcore_jfc *jfc, bool solicited_only); + + /** + * create jfs with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jfs attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jfs pointer on success, NULL on error + */ + struct ubcore_jfs *(*create_jfs)(struct ubcore_device *dev, + const struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify jfs from ubep. + * @param[in] jfs: the jfs created before; + * @param[in] attr: ubcore jfs attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jfs)(struct ubcore_jfs *jfs, const struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata); + /** + * query jfs from ubep. + * @param[in] jfs: the jfs created before; + * @param[out] cfg: jfs configurations; + * @param[out] attr: ubcore jfs attributes; + * @return: 0 on success, other value on error + */ + int (*query_jfs)(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr); + /** + * destroy jfs from ubep. + * @param[in] jfs: the jfs created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jfs)(struct ubcore_jfs *jfs); + /** + * flush jfs from ubep. + * @param[in] jfs: the jfs created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, + * 0 means no completion record returned, -1 on error + */ + int (*flush_jfs)(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr); + + /** + * create jfr with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jfr attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jfr pointer on success, NULL on error + */ + struct ubcore_jfr *(*create_jfr)(struct ubcore_device *dev, + const struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify jfr from ubep. + * @param[in] jfr: the jfr created before; + * @param[in] attr: ubcore jfr attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jfr)(struct ubcore_jfr *jfr, const struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata); + /** + * query jfr from ubep. + * @param[in] jfr: the jfr created before; + * @param[out] cfg: jfr configurations; + * @param[out] attr: ubcore jfr attributes; + * @return: 0 on success, other value on error + */ + int (*query_jfr)(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr); + /** + * destroy jfr from ubep. + * @param[in] jfr: the jfr created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jfr)(struct ubcore_jfr *jfr); + + /** + * import jfr to ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: remote jfr attributes and import configurations + * @param[in] udata: ucontext and user space driver data + * @return: target jfr pointer on success, NULL on error + */ + struct ubcore_tjetty *(*import_jfr)(struct ubcore_device *dev, + const struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + /** + * unimport jfr from ubep. + * @param[in] tjfr: the target jfr imported before; + * @return: 0 on success, other value on error + */ + int (*unimport_jfr)(struct ubcore_tjetty *tjfr); + + /** + * create jetty with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jetty attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jetty pointer on success, NULL on error + */ + struct ubcore_jetty *(*create_jetty)(struct ubcore_device *dev, + const struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[in] attr: ubcore jetty attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jetty)(struct ubcore_jetty *jetty, const struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata); + /** + * query jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[out] cfg: jetty configurations; + * @param[out] attr: ubcore jetty attributes; + * @return: 0 on success, other value on error + */ + int (*query_jetty)(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr); + /** + * destroy jetty from ubep. + * @param[in] jetty: the jetty created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jetty)(struct ubcore_jetty *jetty); + + /** + * flush jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, + * 0 means no completion record returned, -1 on error + */ + int (*flush_jetty)(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr); + + /** + * import jetty to ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: remote jetty attributes and import configurations + * @param[in] udata: ucontext and user space driver data + * @return: target jetty pointer on success, NULL on error + */ + struct ubcore_tjetty *(*import_jetty)(struct ubcore_device *dev, + const struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + /** + * unimport jetty from ubep. + * @param[in] tjetty: the target jetty imported before; + * @return: 0 on success, other value on error + */ + int (*unimport_jetty)(struct ubcore_tjetty *tjetty); + /** + * create tp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: tp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: tp pointer on success, NULL on error + */ + struct ubcore_tp *(*create_tp)(struct ubcore_device *dev, const struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify tp. + * @param[in] tp: tp pointer created before + * @param[in] attr: tp attributes + * @param[in] mask: attr mask indicating the attributes to be modified + * @return: 0 on success, other value on error + */ + int (*modify_tp)(struct ubcore_tp *tp, const struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask); + /** + * destroy tp. + * @param[in] tp: tp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_tp)(struct ubcore_tp *tp); + /** + * operation of user ioctl cmd. + * @param[in] user_ctl: kdrv user control command pointer; + * Return: 0 on success, other value on error + */ + int (*user_ctl)(struct ubcore_user_ctl *user_ctl); + + /** data path ops */ + /** + * post jfs wr. + * @param[in] jfs: the jfs created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jfs_wr)(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); + /** + * post jfr wr. + * @param[in] jfr: the jfr created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jfr_wr)(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); + /** + * post jetty send wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jetty_send_wr)(struct ubcore_jetty *jetty, const struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); + /** + * post jetty receive wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jetty_recv_wr)(struct ubcore_jetty *jetty, const struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); + /** + * poll jfc. + * @param[in] jfc: the jfc created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be polled; + * @return: 0 on success, other value on error + */ + int (*poll_jfc)(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); + int (*config_utp)(struct ubcore_device *dev, uint32_t utp_id, + const struct ubcore_utp_attr *attr, union ubcore_utp_attr_mask mask); + /** + * query_stats. success to query and buffer length is enough + * @param[in] dev: the ub device handle; + * @param[in] key: type and key value of the ub device to query; + * @param[in/out] val: address and buffer length of query results + * @return: 0 on success, other value on error + */ + int (*query_stats)(const struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val); +}; + +struct ubcore_bitmap { + unsigned long *bits; + uint32_t size; + spinlock_t lock; +}; + +enum ubcore_hash_table_type { + UBCORE_HT_JFS = 0, /* jfs hash table */ + UBCORE_HT_JFR, /* jfr hash table */ + UBCORE_HT_JFC, /* jfc hash table */ + UBCORE_HT_JETTY, /* jetty hash table */ + UBCORE_HT_TP, /* tp table */ + UBCORE_HT_NUM +}; + +struct ubcore_device { + struct list_head list_node; /* add to device list */ + + /* driver fills start */ + char dev_name[UBCORE_MAX_DEV_NAME]; + + struct device *dma_dev; + struct device dev; + struct net_device *netdev; + struct ubcore_ops *ops; + enum ubcore_transport_type transport_type; + int num_comp_vectors; /* Number of completion interrupt vectors for the device */ + struct ubcore_device_attr attr; + struct attribute_group *group[UBCORE_MAX_ATTR_GROUP]; /* driver may fill group [1] */ + /* driver fills end */ + + struct ubcore_device_cfg cfg; + + /* port management */ + struct kobject *ports_parent; /* kobject parent of the ports in the port list */ + struct list_head port_list; + + /* For ubcore client */ + spinlock_t client_ctx_lock; + struct list_head client_ctx_list; + struct list_head event_handler_list; + spinlock_t event_handler_lock; + struct ubcore_hash_table ht[UBCORE_HT_NUM]; /* to be replaced with uobj */ + + /* protect from unregister device */ + atomic_t use_cnt; + struct completion comp; +}; + +struct ubcore_port { + struct kobject kobj; /* add to port list */ + struct ubcore_device *ub_dev; + uint32_t port_no; + struct ubcore_net_addr net_addr; +}; + +struct ubcore_client { + struct list_head list_node; + char *client_name; + int (*add)(struct ubcore_device *dev); + void (*remove)(struct ubcore_device *dev, void *client_ctx); +}; + +struct ubcore_client_ctx { + struct list_head list_node; + void *data; // Each ubep device create some data on the client, such as uburma_device. + struct ubcore_client *client; +}; + +union ubcore_umem_flag { + struct { + uint32_t non_pin : 1; /* 0: pinned to physical memory. + * 1: non pin. + */ + uint32_t writable : 1; /* 0: read-only. + * 1: writable. + */ + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +struct ubcore_umem { + struct ubcore_device *ub_dev; + struct mm_struct *owning_mm; + uint64_t length; + uint64_t va; + union ubcore_umem_flag flag; + struct sg_table sg_head; + uint32_t nmap; +}; + +#endif diff --git a/include/urma/ubcore_uapi.h b/include/urma/ubcore_uapi.h new file mode 100644 index 0000000000000000000000000000000000000000..008915072ad2f06a67101bf7f31cbf06f9924cd2 --- /dev/null +++ b/include/urma/ubcore_uapi.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore api for other client kmod, such as uburma. + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + * History: 2021-11-25: add segment and jetty management function + * History: 2022-7-25: modify file name + */ + +#ifndef UBCORE_UAPI_H +#define UBCORE_UAPI_H + +#include + +/** + * Application specifies the device to allocate an context. + * @param[in] dev: ubcore_device found by add ops in the client. + * @param[in] uasid: (deprecated) + * @param[in] udrv_data (optional): ucontext and user space driver data + * @return: ubcore_ucontext pointer on success, NULL on fail. + * Note: this API is called only by uburma representing user-space application, + * not by other kernel modules + */ +struct ubcore_ucontext *ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t uasid, + struct ubcore_udrv_priv *udrv_data); +/** + * Free the allocated context. + * @param[in] dev: device to free context. + * @param[in] ucontext: handle of the allocated context. + * Note: this API is called only by uburma representing user-space application, + * not by other kernel modules + */ +void ubcore_free_ucontext(const struct ubcore_device *dev, struct ubcore_ucontext *ucontext); +/** + * set function entity id for ub device. must be called before alloc context + * @param[in] dev: the ubcore_device handle; + * @param[in] eid: function entity id (eid) to set; + * @return: 0 on success, other value on error + */ +int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid); +/** + * query device attributes + * @param[in] dev: the ubcore_device handle; + * @param[out] attr: attributes returned to client + * @return: 0 on success, other value on error + */ +int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_attr *attr); +/** + * config device + * @param[in] dev: the ubcore_device handle; + * @param[in] cfg: device configuration + * @return: 0 on success, other value on error + */ +int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg); + +/** + * set ctx data of a client + * @param[in] dev: the ubcore_device handle; + * @param[in] client: ubcore client pointer + * @param[in] data: client private data to be set + * @return: 0 on success, other value on error + */ +void ubcore_set_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client, + void *data); +/** + * get ctx data of a client + * @param[in] dev: the ubcore_device handle; + * @param[in] client: ubcore client pointer + * @return: client private data set before + */ +void *ubcore_get_client_ctx_data(struct ubcore_device *dev, const struct ubcore_client *client); +/** + * Register a new client to ubcore + * @param[in] dev: the ubcore_device handle; + * @param[in] new_client: ubcore client to be registered + * @return: 0 on success, other value on error + */ +int ubcore_register_client(struct ubcore_client *new_client); +/** + * Unregister a client from ubcore + * @param[in] rm_client: ubcore client to be unregistered + */ +void ubcore_unregister_client(struct ubcore_client *rm_client); +/** + * query stats + * @param[in] dev: the ubcore_device handle; + * @param[in] key: stats type and key; + * @param[in/out] val: addr and len of value + * @return: 0 on success, other value on error + */ +int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val); + +#endif