diff --git a/drivers/ub/hw/hns3/hns3_udma_abi.h b/drivers/ub/hw/hns3/hns3_udma_abi.h index 92c2f1f05504248761b16f4c4a23c395f97ed16c..d6458e6b97d2d2336d32661b360f1aa28abcee7d 100644 --- a/drivers/ub/hw/hns3/hns3_udma_abi.h +++ b/drivers/ub/hw/hns3/hns3_udma_abi.h @@ -13,48 +13,48 @@ * */ -#ifndef _UDMA_ABI_H -#define _UDMA_ABI_H +#ifndef _HNS3_UDMA_ABI_H +#define _HNS3_UDMA_ABI_H #include -#define MAP_COMMAND_MASK 0xff -#define MAP_INDEX_MASK 0xffffff -#define MAP_INDEX_SHIFT 8 -#define UDMA_DWQE_PAGE_SIZE 65536 -#define UDMA_JETTY_X_PREFIX_BIT_NUM 2 -#define UDMA_JFS_QPN_PREFIX 0x2 -#define UDMA_JFR_QPN_PREFIX 0x1 -#define UDMA_JETTY_QPN_PREFIX 0x3 -#define UDMA_ADDR_4K_MASK 0xfffUL -#define URMA_SEG_ACCESS_GUARD (1UL << 5) -#define UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER BIT(0) -#define UDMA_DCA_INVALID_DCA_NUM ~0U +#define HNS3_UDMA_MAP_COMMAND_MASK 0xff +#define HNS3_UDMA_MAP_INDEX_MASK 0xffffff +#define HNS3_UDMA_MAP_INDEX_SHIFT 8 +#define HNS3_UDMA_DWQE_PAGE_SIZE 65536 +#define HNS3_UDMA_JETTY_X_PREFIX_BIT_NUM 2 +#define HNS3_UDMA_JFS_QPN_PREFIX 0x2 +#define HNS3_UDMA_JFR_QPN_PREFIX 0x1 +#define HNS3_UDMA_JETTY_QPN_PREFIX 0x3 +#define HNS3_UDMA_ADDR_4K_MASK 0xfffUL +#define HNS3_URMA_SEG_ACCESS_GUARD (1UL << 5) +#define HNS3_UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER BIT(0) +#define HNS3_UDMA_DCA_INVALID_DCA_NUM ~0U enum { - UDMA_MMAP_UAR_PAGE, - UDMA_MMAP_DWQE_PAGE, - UDMA_MMAP_RESET_PAGE, - UDMA_MMAP_TYPE_DCA + HNS3_UDMA_MMAP_UAR_PAGE, + HNS3_UDMA_MMAP_DWQE_PAGE, + HNS3_UDMA_MMAP_RESET_PAGE, + HNS3_UDMA_MMAP_TYPE_DCA, }; -enum udma_jfc_init_attr_mask { - UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS = 1 << 0, +enum hns3_udma_jfc_init_attr_mask { + HNS3_UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS = 1 << 0, }; -enum udma_jfc_create_flags { - UDMA_JFC_CREATE_ENABLE_POE_MODE = 1 << 0, - UDMA_JFC_CREATE_ENABLE_NOTIFY = 1 << 1, +enum hns3_udma_jfc_create_flags { + HNS3_UDMA_JFC_CREATE_ENABLE_POE_MODE = 1 << 0, /* conflict with notify */ + HNS3_UDMA_JFC_CREATE_ENABLE_NOTIFY = 1 << 1, }; -enum udma_jfc_notify_mode { - UDMA_JFC_NOTIFY_MODE_64B_ALIGN, - UDMA_JFC_NOTIFY_MODE_4B_ALIGN, - UDMA_JFC_NOTIFY_MODE_DDR_64B_ALIGN, - UDMA_JFC_NOTIFY_MODE_DDR_4B_ALIGN, +enum hns3_udma_jfc_notify_mode { + HNS3_UDMA_JFC_NOTIFY_MODE_64B_ALIGN, + HNS3_UDMA_JFC_NOTIFY_MODE_4B_ALIGN, + HNS3_UDMA_JFC_NOTIFY_MODE_DDR_64B_ALIGN, + HNS3_UDMA_JFC_NOTIFY_MODE_DDR_4B_ALIGN, }; -struct udma_create_jfr_ucmd { +struct hns3_udma_create_jfr_ucmd { uint64_t buf_addr; uint64_t idx_addr; uint64_t db_addr; @@ -66,38 +66,38 @@ struct udma_create_jfr_ucmd { bool share_jfr; }; -enum udma_jfr_cap_flags { - UDMA_JFR_CAP_RECORD_DB = 1 << 0, +enum hns3_udma_jfr_cap_flags { + HNS3_UDMA_JFR_CAP_RECORD_DB = 1 << 0, }; -struct udma_create_jfr_resp { +struct hns3_udma_create_jfr_resp { uint32_t jfr_caps; uint32_t srqn; }; -struct udma_jfc_attr_ex { - uint64_t jfc_ex_mask; /* Use enum udma_jfc_init_attr_mask */ - uint64_t create_flags; /* Use enum udma_jfc_create_flags */ +struct hns3_udma_jfc_attr_ex { + uint64_t jfc_ex_mask; /* Use enum hns3_udma_jfc_init_attr_mask */ + uint64_t create_flags; /* Use enum hns3_udma_jfc_create_flags */ uint64_t notify_addr; uint8_t poe_channel; /* poe channel to use */ - uint8_t notify_mode; /* Use enum udma_jfc_notify_mode */ + uint8_t notify_mode; /* Use enum hns3_udma_jfc_notify_mode */ }; -struct udma_create_jfc_ucmd { +struct hns3_udma_create_jfc_ucmd { uint64_t buf_addr; uint64_t db_addr; - struct udma_jfc_attr_ex jfc_attr_ex; + struct hns3_udma_jfc_attr_ex jfc_attr_ex; }; -enum udma_jfc_cap_flags { - UDMA_JFC_CAP_RECORD_DB = 1 << 0, +enum hns3_udma_jfc_cap_flags { + HNS3_UDMA_JFC_CAP_RECORD_DB = 1 << 0, }; -struct udma_create_jfc_resp { +struct hns3_udma_create_jfc_resp { uint32_t jfc_caps; }; -struct udma_create_tp_ucmd { +struct hns3_udma_create_tp_ucmd { bool is_jetty; union { uint32_t jfs_id; @@ -113,20 +113,20 @@ struct udma_create_tp_ucmd { uint64_t sdb_addr; }; -struct udma_create_jetty_ucmd { - struct udma_create_tp_ucmd create_tp_ucmd; +struct hns3_udma_create_jetty_ucmd { + struct hns3_udma_create_tp_ucmd create_tp_ucmd; uint32_t jfr_id; uint32_t srqn; uint64_t buf_addr; uint64_t sdb_addr; }; -enum udma_qp_cap_flags { - UDMA_QP_CAP_RQ_RECORD_DB = 1 << 0, - UDMA_QP_CAP_SQ_RECORD_DB = 1 << 1, - UDMA_QP_CAP_OWNER_DB = 1 << 2, - UDMA_QP_CAP_DYNAMIC_CTX_ATTACH = 1 << 4, - UDMA_QP_CAP_DIRECT_WQE = 1 << 5, +enum hns3_udma_qp_cap_flags { + HNS3_UDMA_QP_CAP_RQ_RECORD_DB = 1 << 0, + HNS3_UDMA_QP_CAP_SQ_RECORD_DB = 1 << 1, + HNS3_UDMA_QP_CAP_OWNER_DB = 1 << 2, + HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH = 1 << 4, + HNS3_UDMA_QP_CAP_DIRECT_WQE = 1 << 5, }; struct udp_srcport { @@ -135,7 +135,7 @@ struct udp_srcport { uint8_t um_udp_range; }; -struct udma_create_tp_resp { +struct hns3_udma_create_tp_resp { uint64_t cap_flags; uint32_t qpn; uint32_t path_mtu; @@ -143,32 +143,32 @@ struct udma_create_tp_resp { uint8_t priority; }; -struct udma_create_jetty_resp { - struct udma_create_tp_resp create_tp_resp; +struct hns3_udma_create_jetty_resp { + struct hns3_udma_create_tp_resp create_tp_resp; }; -struct udma_create_jfs_ucmd { - struct udma_create_tp_ucmd create_tp_ucmd; +struct hns3_udma_create_jfs_ucmd { + struct hns3_udma_create_tp_ucmd create_tp_ucmd; }; -struct udma_create_jfs_resp { - struct udma_create_tp_resp create_tp_resp; +struct hns3_udma_create_jfs_resp { + struct hns3_udma_create_tp_resp create_tp_resp; }; -struct udma_create_ctx_ucmd { +struct hns3_udma_create_ctx_ucmd { uint32_t comp; uint32_t dca_max_qps; uint32_t dca_unit_size; }; -enum udma_context_comp_mask { +enum hns3_udma_context_comp_mask { UDMA_CONTEXT_MASK_DCA_PRIME_QPS = 1 << 0, UDMA_CONTEXT_MASK_DCA_UNIT_SIZE = 1 << 1, UDMA_CONTEXT_MASK_DCA_MAX_SIZE = 1 << 2, UDMA_CONTEXT_MASK_DCA_MIN_SIZE = 1 << 3, }; -struct udma_create_ctx_resp { +struct hns3_udma_create_ctx_resp { uint32_t num_comp_vectors; uint32_t num_qps_shift; uint32_t num_jfs_shift; @@ -250,7 +250,7 @@ struct udma_dca_query_resp { uint32_t page_count; }; -enum udma_user_ctl_handlers { +enum hns3_udma_user_ctl_handlers { UDMA_USER_CTL_FLUSH_CQE, UDMA_CONFIG_POE_CHANNEL, UDMA_QUERY_POE_CHANNEL, @@ -263,4 +263,4 @@ enum udma_user_ctl_handlers { UDMA_OPCODE_NUM, }; -#endif /* _UDMA_ABI_H */ +#endif /* _HNS3_UDMA_ABI_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_cmd.c b/drivers/ub/hw/hns3/hns3_udma_cmd.c index 733d60c65923abdea111227da0610a4dab403788..b294d761a9308705c0793c391c61c6d8cf1bf30c 100644 --- a/drivers/ub/hw/hns3/hns3_udma_cmd.c +++ b/drivers/ub/hw/hns3/hns3_udma_cmd.c @@ -284,13 +284,13 @@ static void dump_desc(struct udma_dev *dev, ((desc->data[SUB_OPCODE_IDX] & 0xFF) == UDMA_CMD_WRITE_CQC_TIMER_BT0)) dev_err_ratelimited(dev->dev, - "Send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n", + "send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n", desc->opcode, desc->data[0], desc->data[1], desc->data[2], desc->data[3], desc->data[4], desc->data[5], num_mailbox); else dev_info_ratelimited(dev->dev, - "Send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n", + "send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n", desc->opcode, desc->data[0], desc->data[1], desc->data[2], desc->data[3], desc->data[4], desc->data[5], num_mailbox); diff --git a/drivers/ub/hw/hns3/hns3_udma_db.c b/drivers/ub/hw/hns3/hns3_udma_db.c index 0880d4d4bcfdc791f14c43623abaf847f5f30ab8..c0f21c9bf85bd21f23c8f5c3c89a920993c4e8bc 100644 --- a/drivers/ub/hw/hns3/hns3_udma_db.c +++ b/drivers/ub/hw/hns3/hns3_udma_db.c @@ -19,18 +19,19 @@ #include "hns3_udma_device.h" #include "hns3_udma_db.h" -int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt, +int udma_db_map_user(struct udma_ucontext *udma_ctx, uint64_t virt, struct udma_db *db) { + struct udma_dev *udma_dev = to_udma_dev(udma_ctx->uctx.ub_dev); uint64_t page_addr = virt & PAGE_MASK; union ubcore_umem_flag access = {}; struct udma_user_db_page *db_page; uint32_t offset; int ret = 0; - mutex_lock(&udma_dev->pgdir_mutex); + mutex_lock(&udma_ctx->pgdir_mutex); - list_for_each_entry(db_page, &udma_dev->pgdir_list, list) { + list_for_each_entry(db_page, &udma_ctx->pgdir_list, list) { if (db_page->user_virt == page_addr) goto found; } @@ -53,7 +54,7 @@ int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt, goto out; } - list_add(&db_page->list, &udma_dev->pgdir_list); + list_add(&db_page->list, &udma_ctx->pgdir_list); found: offset = virt - page_addr; @@ -63,14 +64,14 @@ int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt, refcount_inc(&db_page->refcount); out: - mutex_unlock(&udma_dev->pgdir_mutex); + mutex_unlock(&udma_ctx->pgdir_mutex); return ret; } -void udma_db_unmap_user(struct udma_dev *udma_dev, struct udma_db *db) +void udma_db_unmap_user(struct udma_ucontext *udma_ctx, struct udma_db *db) { - mutex_lock(&udma_dev->pgdir_mutex); + mutex_lock(&udma_ctx->pgdir_mutex); refcount_dec(&db->user_page->refcount); if (refcount_dec_if_one(&db->user_page->refcount)) { @@ -79,5 +80,5 @@ void udma_db_unmap_user(struct udma_dev *udma_dev, struct udma_db *db) kfree(db->user_page); } - mutex_unlock(&udma_dev->pgdir_mutex); + mutex_unlock(&udma_ctx->pgdir_mutex); } diff --git a/drivers/ub/hw/hns3/hns3_udma_db.h b/drivers/ub/hw/hns3/hns3_udma_db.h index e5a230cf49c0048a5410fc36012581a35aee217e..692be21c531bd6616c4fa5e9330ec173fc8c31b1 100644 --- a/drivers/ub/hw/hns3/hns3_udma_db.h +++ b/drivers/ub/hw/hns3/hns3_udma_db.h @@ -18,9 +18,9 @@ #include "hns3_udma_device.h" -int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt, +int udma_db_map_user(struct udma_ucontext *udma_ctx, uint64_t virt, struct udma_db *db); -void udma_db_unmap_user(struct udma_dev *udma_dev, struct udma_db *db); +void udma_db_unmap_user(struct udma_ucontext *udma_ctx, struct udma_db *db); #endif /* _UDMA_DB_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_dca.c b/drivers/ub/hw/hns3/hns3_udma_dca.c index d9bef86735b365bb8e652f268807e1c678e7188c..d57fd3148f00384b8c3d03a4a7a6f102417364a3 100644 --- a/drivers/ub/hw/hns3/hns3_udma_dca.c +++ b/drivers/ub/hw/hns3/hns3_udma_dca.c @@ -60,7 +60,7 @@ void udma_enable_dca(struct udma_dev *dev, struct udma_qp *qp) INIT_LIST_HEAD(&cfg->aging_node); cfg->buf_id = UDMA_DCA_INVALID_BUF_ID; cfg->npages = qp->buff_size >> UDMA_HW_PAGE_SHIFT; - cfg->dcan = UDMA_DCA_INVALID_DCA_NUM; + cfg->dcan = HNS3_UDMA_DCA_INVALID_DCA_NUM; } static void stop_aging_dca_mem(struct udma_dca_ctx *ctx, @@ -181,11 +181,11 @@ static void kick_dca_buf(struct udma_dev *dev, struct udma_dca_cfg *cfg, static void free_dca_num(struct udma_dca_cfg *cfg, struct udma_dca_ctx *ctx) { - if (cfg->dcan == UDMA_DCA_INVALID_DCA_NUM) + if (cfg->dcan == HNS3_UDMA_DCA_INVALID_DCA_NUM) return; ida_free(&ctx->ida, cfg->dcan); - cfg->dcan = UDMA_DCA_INVALID_DCA_NUM; + cfg->dcan = HNS3_UDMA_DCA_INVALID_DCA_NUM; } void udma_disable_dca(struct udma_dev *dev, struct udma_qp *qp) @@ -317,7 +317,7 @@ static uint32_t alloc_dca_num(struct udma_dca_ctx *ctx) ret = ida_alloc_range(&ctx->ida, 0, ctx->max_qps - 1, GFP_KERNEL); if (ret < 0) - return UDMA_DCA_INVALID_DCA_NUM; + return HNS3_UDMA_DCA_INVALID_DCA_NUM; stop_free_dca_buf(ctx, ret); update_dca_buf_status(ctx, ret, false); @@ -418,7 +418,7 @@ static void unregister_dca_mem(struct udma_dev *dev, struct udma_dca_ctx *ctx, } static uint32_t get_udca_max_qps(struct udma_dev *udma_dev, - struct udma_create_ctx_ucmd *ucmd) + struct hns3_udma_create_ctx_ucmd *ucmd) { uint32_t qp_num = 0; @@ -450,7 +450,7 @@ static int udma_query_qpc(struct udma_dev *udma_dev, uint32_t qpn, int ret; mailbox = udma_alloc_cmd_mailbox(udma_dev); - if (IS_ERR_OR_NULL(mailbox)) { + if (IS_ERR(mailbox)) { dev_err(udma_dev->dev, "alloc mailbox failed\n"); ret = PTR_ERR(mailbox); goto alloc_mailbox_fail; @@ -599,12 +599,12 @@ int udma_register_udca(struct udma_dev *udma_dev, struct udma_ucontext *context, struct ubcore_udrv_priv *udrv_data) { struct udma_dca_ctx *dca_ctx = &context->dca_ctx; - struct udma_create_ctx_ucmd ucmd = {}; + struct hns3_udma_create_ctx_ucmd ucmd = {}; int max_qps; int ret; ret = copy_from_user(&ucmd, (void *)udrv_data->in_addr, - min(udrv_data->in_len, (uint32_t)sizeof(ucmd))); + min_t(uint32_t, udrv_data->in_len, (uint32_t)sizeof(ucmd))); if (ret) { dev_err(udma_dev->dev, "Failed to copy udata, ret = %d.\n", ret); @@ -1157,7 +1157,7 @@ int udma_dca_attach(struct udma_dev *dev, struct udma_dca_attach_attr *attr, cfg->attach_count++; spin_unlock(&cfg->lock); - resp->alloc_flags |= UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER; + resp->alloc_flags |= HNS3_UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER; resp->alloc_pages = cfg->npages; resp->dcan = cfg->dcan; update_dca_buf_status(ctx, cfg->dcan, true); diff --git a/drivers/ub/hw/hns3/hns3_udma_debugfs.c b/drivers/ub/hw/hns3/hns3_udma_debugfs.c index 31d514805de8ecd5064db6b0843620a360cf453d..d2f26cc36708923c34e53be019c296f83354eb0c 100644 --- a/drivers/ub/hw/hns3/hns3_udma_debugfs.c +++ b/drivers/ub/hw/hns3/hns3_udma_debugfs.c @@ -147,7 +147,7 @@ static uint64_t calc_loading_percent(size_t total, size_t free, static void dca_setup_qp_stats(struct udma_qp *qp, struct dca_qp_stats *stats) { - if (!(qp->en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) + if (!(qp->en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) return; stats->qpn = (uint32_t)qp->qpn; diff --git a/drivers/ub/hw/hns3/hns3_udma_device.h b/drivers/ub/hw/hns3/hns3_udma_device.h index 7cfc6ff34f95c063772281a4785a69777af752b5..8f55e25341ac0df59cf006008d15cacb87536176 100644 --- a/drivers/ub/hw/hns3/hns3_udma_device.h +++ b/drivers/ub/hw/hns3/hns3_udma_device.h @@ -441,6 +441,8 @@ struct udma_ucontext { struct udma_dca_ctx dca_ctx; void *dca_dbgfs; uint32_t eid_index; + struct list_head pgdir_list; + struct mutex pgdir_mutex; }; struct udma_cmd_context { @@ -822,8 +824,6 @@ struct udma_dev { uint64_t reset_cnt; struct udma_netdev uboe; - struct list_head pgdir_list; - struct mutex pgdir_mutex; uint8_t __iomem *reg_base; struct udma_caps caps; @@ -844,6 +844,7 @@ struct udma_dev { uint16_t func_id; uint32_t func_num; uint32_t cong_algo_tmpl_id; + struct udma_ida uar_ida; struct udma_jfs_table jfs_table; struct udma_jfr_table jfr_table; diff --git a/drivers/ub/hw/hns3/hns3_udma_dfx.c b/drivers/ub/hw/hns3/hns3_udma_dfx.c index 40e79f60fdd142e08d367ff83970b4dc2531d3b0..f89226edf544c8947aaa1c89fead522a75c607a9 100644 --- a/drivers/ub/hw/hns3/hns3_udma_dfx.c +++ b/drivers/ub/hw/hns3/hns3_udma_dfx.c @@ -63,7 +63,7 @@ static int udma_dfx_query_context(struct udma_dev *udma_dev, uint32_t id, mailbox = udma_alloc_cmd_mailbox(udma_dev); if (IS_ERR(mailbox)) { - dev_err(udma_dev->dev, "alloc mailbox failed\n"); + dev_err(udma_dev->dev, "alloc mailbox failed.\n"); ret = PTR_ERR(mailbox); goto alloc_mailbox_fail; } @@ -114,12 +114,12 @@ static int udma_dfx_seg_store(const char *p_buf, struct udma_dfx_info *udma_dfx) ret = udma_dfx_read_buf(str, p_buf); if (ret) { - dev_info(udma_dev->dev, "the inputing is invalid\n"); + dev_info(udma_dev->dev, "the inputing is invalid.\n"); return ret; } if (kstrtouint(str, 0, &seg_key)) { - dev_err(udma_dev->dev, "convert str failed\n"); + dev_err(udma_dev->dev, "convert str failed.\n"); return -EINVAL; } @@ -127,7 +127,7 @@ static int udma_dfx_seg_store(const char *p_buf, struct udma_dfx_info *udma_dfx) ret = udma_dfx_query_context(udma_dev, mpt_index, &mpt_entry, sizeof(mpt_entry), UDMA_CMD_QUERY_MPT); if (ret) { - dev_err(udma_dev->dev, "query seg context failed, ret = %d\n", ret); + dev_err(udma_dev->dev, "query seg context failed, ret = %d.\n", ret); return ret; } @@ -166,7 +166,7 @@ static void udma_dfx_query_sccc(struct udma_dev *udma_dev, uint32_t sccc_id) udma_dev->caps.scc_ctx_sz, UDMA_CMD_QUERY_SCCC); if (ret) { - dev_err(udma_dev->dev, "query sccc failed, ret = %d\n", ret); + dev_err(udma_dev->dev, "query sccc failed, ret = %d.\n", ret); kfree(sccc); return; } @@ -192,19 +192,19 @@ static int udma_dfx_tp_store(const char *p_buf, struct udma_dfx_info *udma_dfx) ret = udma_dfx_read_buf(str, p_buf); if (ret) { - dev_info(udma_dev->dev, "the inputing is invalid\n"); + dev_info(udma_dev->dev, "the inputing is invalid.\n"); return ret; } if (kstrtouint(str, 0, &tpn)) { - dev_err(udma_dev->dev, "convert str failed\n"); + dev_err(udma_dev->dev, "convert str failed.\n"); return -EINVAL; } ret = udma_dfx_query_context(udma_dev, tpn, &qp_context, sizeof(qp_context), UDMA_CMD_QUERY_QPC); if (ret) { - dev_err(udma_dev->dev, "query qp context failed, ret = %d\n", ret); + dev_err(udma_dev->dev, "query qp context failed, ret = %d.\n", ret); return ret; } @@ -319,19 +319,19 @@ static int udma_dfx_jfc_store(const char *p_buf, struct udma_dfx_info *udma_dfx) ret = udma_dfx_read_buf(str, p_buf); if (ret) { - dev_info(udma_dev->dev, "the inputing is invalid\n"); + dev_info(udma_dev->dev, "the inputing is invalid.\n"); return ret; } if (kstrtouint(str, 0, &jfcn)) { - dev_err(udma_dev->dev, "convert str failed\n"); + dev_err(udma_dev->dev, "convert str failed.\n"); return -EINVAL; } ret = udma_dfx_query_context(udma_dev, jfcn, &jfc_context, sizeof(jfc_context), UDMA_CMD_QUERY_CQC); if (ret) { - dev_info(udma_dev->dev, "query jfc context fail, ret = %d, jfcn = %u\n", + dev_info(udma_dev->dev, "query jfc context fail, ret = %d, jfcn = %u.\n", ret, jfcn); return ret; } @@ -1024,7 +1024,7 @@ static int udma_dfx_add_sysfs(struct udma_dfx_info *udma_dfx) &dev->kobj, "%s", udma_dfx->dev.dev_name); if (ret) - dev_err(drv_device, "kobject_init_and_add failed!\r\n"); + dev_err(drv_device, "kobject_init_and_add failed.\r\n"); return ret; } @@ -1102,7 +1102,7 @@ static int udma_dfx_list_init(int num) kfree(dfx->jfs_list); tpn_list_alloc_failed: kfree(dfx->tpn_list); - dev_err(drv_device, "dfx alloc list failed\n"); + dev_err(drv_device, "dfx alloc list failed.\n"); return ret; } @@ -1146,7 +1146,7 @@ static int udma_dfx_add_udma_device(struct udma_dev *udma_dev) if (udma_dev_count == MAX_UDMA_DEV) { dev_err(drv_device, - "udma dfx add device failed, g_udma_dfx_list is full\n."); + "udma dfx add device failed, g_udma_dfx_list is full.\n."); ret = -EINVAL; goto g_udma_dfx_list_full; } @@ -1168,17 +1168,17 @@ static int udma_dfx_add_udma_device(struct udma_dev *udma_dev) UBCORE_MAX_DEV_NAME); ret = udma_dfx_list_init(i); if (ret) { - dev_err(drv_device, "dfx add dev list failed\n"); + dev_err(drv_device, "dfx add dev list failed.\n"); goto dfx_list_init_failed; } ret = g_udma_dfx_list[i].dfx->ops->add_sysfs(g_udma_dfx_list[i].dfx); if (ret) { - dev_err(drv_device, "dfx add sysfs failed\n"); + dev_err(drv_device, "dfx add sysfs failed.\n"); goto add_sysfs_failed; } - dev_info(drv_device, "add udma device (%s) in udma dfx\n", + dev_info(drv_device, "add udma device (%s) in udma dfx.\n", g_udma_dfx_list[i].dfx->dev.dev_name); udma_dev_count++; @@ -1202,14 +1202,14 @@ static int udma_dfx_chrdev_create(struct udma_dev *udma_dev) major = register_chrdev(0, DFX_DEVICE_NAME, &chr_ops); if (major < 0) { - dev_err(udma_dev->dev, "udma dfx register the character device failed\n"); + dev_err(udma_dev->dev, "udma dfx register the character device failed.\n"); ret = major; goto device_register_failed; } drv_class = class_create(THIS_MODULE, DFX_DEVICE_NAME); if (IS_ERR(drv_class)) { - dev_err(udma_dev->dev, "udma dfx class create failed\n"); + dev_err(udma_dev->dev, "udma dfx class create failed.\n"); ret = (int)PTR_ERR(drv_class); goto class_create_failed; } @@ -1217,7 +1217,7 @@ static int udma_dfx_chrdev_create(struct udma_dev *udma_dev) drv_device = device_create(drv_class, NULL, MKDEV(major, 0), NULL, DFX_DEVICE_NAME); if (IS_ERR(drv_device)) { - dev_err(udma_dev->dev, "udma dfx create device failed\n"); + dev_err(udma_dev->dev, "udma dfx create device failed.\n"); ret = (int)PTR_ERR(drv_device); goto device_create_failed; } @@ -1268,7 +1268,7 @@ int udma_dfx_init(struct udma_dev *udma_dev) add_device_failed: if (!udma_dev_count) { - dev_info(drv_device, "udma dfx remove chr device\n"); + dev_info(drv_device, "udma dfx remove chr device.\n"); udma_dfx_chrdev_destroy(); } chrdev_create_failed: @@ -1282,7 +1282,7 @@ static void udma_dfx_remove_udma_device(struct udma_dev *udma_dev) for (i = 0; i < MAX_UDMA_DEV; i++) { write_lock(&g_udma_dfx_list[i].rwlock); if (g_udma_dfx_list[i].dev && g_udma_dfx_list[i].dev == udma_dev) { - dev_info(drv_device, "remove udma device (%s) from udma dfx\n", + dev_info(drv_device, "remove udma device (%s) from udma dfx.\n", g_udma_dfx_list[i].dfx->dev.dev_name); g_udma_dfx_list[i].dfx->ops->del_sysfs(g_udma_dfx_list[i].dfx); udma_dfx_list_free(i); @@ -1301,13 +1301,13 @@ static void udma_dfx_remove_udma_device(struct udma_dev *udma_dev) void udma_dfx_uninit(struct udma_dev *udma_dev) { if (!udma_dev_count) { - dev_err(udma_dev->dev, "no udma dfx device\n"); + dev_err(udma_dev->dev, "no udma dfx device.\n"); return; } udma_dfx_remove_udma_device(udma_dev); if (!udma_dev_count) { - dev_info(drv_device, "udma dfx remove chr device\n"); + dev_info(drv_device, "udma dfx remove chr device.\n"); udma_dfx_chrdev_destroy(); } } diff --git a/drivers/ub/hw/hns3/hns3_udma_hem.c b/drivers/ub/hw/hns3/hns3_udma_hem.c index 4bd58a8f5c2bb3b1138561ba8a539f455734d3a4..8b55d900a6763f03789687db549280618e009a99 100644 --- a/drivers/ub/hw/hns3/hns3_udma_hem.c +++ b/drivers/ub/hw/hns3/hns3_udma_hem.c @@ -106,17 +106,17 @@ static int get_hem_table_config(struct udma_dev *udma_dev, switch (type) { case HEM_TYPE_QPC: - mhop->buf_chunk_size = 1 << (udma_dev->caps.qpc_buf_pg_sz + mhop->buf_chunk_size = 1U << (udma_dev->caps.qpc_buf_pg_sz + PAGE_SHIFT); - mhop->bt_chunk_size = 1 << (udma_dev->caps.qpc_ba_pg_sz + mhop->bt_chunk_size = 1U << (udma_dev->caps.qpc_ba_pg_sz + PAGE_SHIFT); mhop->ba_l0_num = udma_dev->caps.qpc_bt_num; mhop->hop_num = udma_dev->caps.qpc_hop_num; break; case HEM_TYPE_MTPT: - mhop->buf_chunk_size = 1 << (udma_dev->caps.mpt_buf_pg_sz + mhop->buf_chunk_size = 1U << (udma_dev->caps.mpt_buf_pg_sz + PAGE_SHIFT); - mhop->bt_chunk_size = 1 << (udma_dev->caps.mpt_ba_pg_sz + mhop->bt_chunk_size = 1U << (udma_dev->caps.mpt_ba_pg_sz + PAGE_SHIFT); mhop->ba_l0_num = udma_dev->caps.mpt_bt_num; mhop->hop_num = udma_dev->caps.mpt_hop_num; @@ -2001,7 +2001,7 @@ int udma_mtr_find(struct udma_dev *udma_device, struct udma_mtr *mtr, if (!mtts || !mtt_count) goto out; - npage = min(mtt_count, left); + npage = min_t(int, mtt_count, left); left -= npage; for (mtt_count = 0; (uint32_t)mtt_count < npage; mtt_count++) mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]); diff --git a/drivers/ub/hw/hns3/hns3_udma_hw.c b/drivers/ub/hw/hns3/hns3_udma_hw.c index d0ecf04238fa786d56e96b7fa938c82da5dd6a3c..3a0cafc0329a543f8376dc9aa07ae0a8c5fe543e 100644 --- a/drivers/ub/hw/hns3/hns3_udma_hw.c +++ b/drivers/ub/hw/hns3/hns3_udma_hw.c @@ -686,7 +686,7 @@ static void apply_func_caps(struct udma_dev *udma_dev) /* The following caps are not in ncl config */ caps->gmv_entry_sz = UDMA_GMV_ENTRY_SZ; caps->gmv_hop_num = UDMA_HOP_NUM_0; - caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE / + caps->gmv_entry_num = caps->gmv_bt_num * (UDMA_PAGE_SIZE / caps->gmv_entry_sz); caps->max_eid_cnt = (caps->gmv_entry_num > UDMA_MAX_EID_NUM) ? UDMA_MAX_EID_NUM : caps->gmv_entry_num; @@ -1256,7 +1256,7 @@ alloc_link_table_buf(struct udma_dev *udma_dev) min_size = UDMA_EXT_LLM_MIN_PAGES(udma_dev->caps.sl_num) << pg_shift; /* Alloc data table */ - size = max(size, min_size); + size = max_t(uint32_t, size, min_size); link_tbl->buf = udma_buf_alloc(udma_dev, size, pg_shift, 0); if (IS_ERR(link_tbl->buf)) return ERR_PTR(-ENOMEM); @@ -1748,7 +1748,7 @@ static void udma_init_bank(struct udma_dev *dev) dev->bank[0].next = dev->bank[0].min; qpn_shift = dev->caps.num_qps_shift - UDMA_DEFAULT_MAX_JETTY_X_SHIFT - - UDMA_JETTY_X_PREFIX_BIT_NUM; + HNS3_UDMA_JETTY_X_PREFIX_BIT_NUM; for (i = 0; i < UDMA_QP_BANK_NUM; i++) { ida_init(&dev->bank[i].ida); dev->bank[i].max = (1U << qpn_shift) / UDMA_QP_BANK_NUM - 1; diff --git a/drivers/ub/hw/hns3/hns3_udma_jetty.c b/drivers/ub/hw/hns3/hns3_udma_jetty.c index 4465997200ab18fc9725525e7dc1bbd754043830..d08c0be6922fe5dee8fc0179e4501924be9e3b86 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jetty.c +++ b/drivers/ub/hw/hns3/hns3_udma_jetty.c @@ -58,7 +58,8 @@ static void udma_fill_jetty_um_qp_attr(struct udma_dev *dev, if (jetty->ubcore_jetty.jetty_cfg.priority >= dev->caps.sl_num) { qp_attr->priority = dev->caps.sl_num > 0 ? dev->caps.sl_num - 1 : 0; - dev_err(dev->dev, "The setted priority (%d) should smaller than the max priority (%d), priority (%d) is used\n", + dev_err(dev->dev, + "set priority (%u) should smaller than the MAX (%u), (%u) is used\n", jetty->ubcore_jetty.jetty_cfg.priority, dev->caps.sl_num, qp_attr->priority); } else { @@ -159,9 +160,9 @@ static void set_jetty_ext_sge_param(struct udma_jetty *jetty) if (wqe_sge_cnt) { total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt); - jetty->rc_node.sge_cnt = max(total_sge_cnt, - (uint32_t)UDMA_PAGE_SIZE / - UDMA_SGE_SIZE); + jetty->rc_node.sge_cnt = max_t(uint32_t, total_sge_cnt, + (uint32_t)UDMA_PAGE_SIZE / + UDMA_SGE_SIZE); } } @@ -217,9 +218,10 @@ static int set_jetty_buf_attr(struct udma_dev *udma_dev, static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, struct ubcore_jetty_cfg *cfg, - struct udma_create_jetty_ucmd *ucmd, + struct hns3_udma_create_jetty_ucmd *ucmd, struct ubcore_udata *udata) { + struct udma_ucontext *udma_uctx = to_udma_ucontext(udata->uctx); struct udma_buf_attr buf_attr = {}; int ret; @@ -232,7 +234,8 @@ static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, if (ret) return ret; } else { - ret = udma_db_map_user(dev, ucmd->sdb_addr, &jetty->rc_node.sdb); + ret = udma_db_map_user(udma_uctx, ucmd->sdb_addr, + &jetty->rc_node.sdb); if (ret) { dev_err(dev->dev, "failed to map user sdb_addr, ret = %d.\n", @@ -267,7 +270,7 @@ static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, dev_err(dev->dev, "failed to create WQE mtr for RC Jetty, ret = %d.\n", ret); - udma_db_unmap_user(dev, &jetty->rc_node.sdb); + udma_db_unmap_user(udma_uctx, &jetty->rc_node.sdb); return ret; } } @@ -276,7 +279,7 @@ static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, } static int alloc_common_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty, - struct udma_create_jetty_ucmd *ucmd) + struct hns3_udma_create_jetty_ucmd *ucmd) { struct udma_jetty_table *jetty_table = &udma_dev->jetty_table; int ret; @@ -400,7 +403,7 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev, struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); - struct udma_create_jetty_ucmd ucmd = {}; + struct hns3_udma_create_jetty_ucmd ucmd = {}; struct udma_jetty *jetty; int ret; @@ -410,8 +413,8 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev, } ret = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(ucmd))); + min_t(uint32_t, udata->udrv_data->in_len, + (uint32_t)sizeof(ucmd))); if (ret) { dev_err(udma_dev->dev, "failed to copy jetty udata, ret = %d.\n", @@ -451,6 +454,7 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev, static int free_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty) { + struct udma_ucontext *udma_uctx = to_udma_ucontext(jetty->ubcore_jetty.uctx); int ret = 0; if (jetty->tp_mode == UBCORE_TP_UM) { @@ -462,7 +466,7 @@ static int free_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty) udma_destroy_qp_common(dev, &jetty->qp, NULL); } else if (jetty->tp_mode == UBCORE_TP_RC && !jetty->dca_en) { - udma_db_unmap_user(dev, &jetty->rc_node.sdb); + udma_db_unmap_user(udma_uctx, &jetty->rc_node.sdb); if (jetty->shared_jfr) udma_mtr_destroy(dev, &jetty->rc_node.mtr); } diff --git a/drivers/ub/hw/hns3/hns3_udma_jfc.c b/drivers/ub/hw/hns3/hns3_udma_jfc.c index d6362c7b56f964f45d140ca76740aaa26279af4f..580271ccc2065dfa01c3799a7256f79ae67a2fa7 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfc.c +++ b/drivers/ub/hw/hns3/hns3_udma_jfc.c @@ -58,7 +58,7 @@ static int check_jfc_cfg(struct udma_dev *udma_dev, struct ubcore_jfc_cfg *cfg) } static int check_poe_attr(struct udma_dev *udma_dev, - struct udma_jfc_attr_ex *jfc_attr_ex) + struct hns3_udma_jfc_attr_ex *jfc_attr_ex) { if (!(udma_dev->caps.flags & UDMA_CAP_FLAG_POE)) { dev_err(udma_dev->dev, "Unsupport POE JFC.\n"); @@ -69,7 +69,7 @@ static int check_poe_attr(struct udma_dev *udma_dev, } static int check_notify_attr(struct udma_dev *udma_dev, - struct udma_jfc_attr_ex *jfc_attr_ex) + struct hns3_udma_jfc_attr_ex *jfc_attr_ex) { if (!(udma_dev->caps.flags & UDMA_CAP_FLAG_WRITE_NOTIFY)) { dev_err(udma_dev->dev, "Unsupport NOTIFY JFC.\n"); @@ -77,21 +77,21 @@ static int check_notify_attr(struct udma_dev *udma_dev, } switch (jfc_attr_ex->notify_mode) { - case UDMA_JFC_NOTIFY_MODE_4B_ALIGN: - case UDMA_JFC_NOTIFY_MODE_DDR_4B_ALIGN: + case HNS3_UDMA_JFC_NOTIFY_MODE_4B_ALIGN: + case HNS3_UDMA_JFC_NOTIFY_MODE_DDR_4B_ALIGN: break; - case UDMA_JFC_NOTIFY_MODE_64B_ALIGN: - case UDMA_JFC_NOTIFY_MODE_DDR_64B_ALIGN: - dev_err(udma_dev->dev, "Doesn't support notify mode %u\n", + case HNS3_UDMA_JFC_NOTIFY_MODE_64B_ALIGN: + case HNS3_UDMA_JFC_NOTIFY_MODE_DDR_64B_ALIGN: + dev_err(udma_dev->dev, "Doesn't support notify mode %u.\n", jfc_attr_ex->notify_mode); return -EINVAL; default: - dev_err(udma_dev->dev, "Invalid notify mode %u\n", + dev_err(udma_dev->dev, "Invalid notify mode %u.\n", jfc_attr_ex->notify_mode); return -EINVAL; } - if (jfc_attr_ex->notify_addr & UDMA_ADDR_4K_MASK) { + if (jfc_attr_ex->notify_addr & HNS3_UDMA_ADDR_4K_MASK) { dev_err(udma_dev->dev, "Notify addr should be aligned to 4k.\n"); return -EINVAL; @@ -101,19 +101,19 @@ static int check_notify_attr(struct udma_dev *udma_dev, } static int check_jfc_attr_ex(struct udma_dev *udma_dev, - struct udma_jfc_attr_ex *jfc_attr_ex) + struct hns3_udma_jfc_attr_ex *jfc_attr_ex) { int ret; switch (jfc_attr_ex->create_flags) { - case UDMA_JFC_CREATE_ENABLE_POE_MODE: + case HNS3_UDMA_JFC_CREATE_ENABLE_POE_MODE: ret = check_poe_attr(udma_dev, jfc_attr_ex); break; - case UDMA_JFC_CREATE_ENABLE_NOTIFY: + case HNS3_UDMA_JFC_CREATE_ENABLE_NOTIFY: ret = check_notify_attr(udma_dev, jfc_attr_ex); break; default: - dev_err(udma_dev->dev, "Invalid create flags %llu\n", + dev_err(udma_dev->dev, "Invalid create flags %llu.\n", jfc_attr_ex->create_flags); return -EINVAL; } @@ -123,7 +123,7 @@ static int check_jfc_attr_ex(struct udma_dev *udma_dev, static int check_create_jfc(struct udma_dev *udma_dev, struct ubcore_jfc_cfg *cfg, - struct udma_create_jfc_ucmd *ucmd, + struct hns3_udma_create_jfc_ucmd *ucmd, struct ubcore_udata *udata) { int ret; @@ -131,8 +131,8 @@ static int check_create_jfc(struct udma_dev *udma_dev, if (udata) { ret = copy_from_user((void *)ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(struct udma_create_jfc_ucmd))); + min_t(uint32_t, udata->udrv_data->in_len, + (uint32_t)sizeof(struct hns3_udma_create_jfc_ucmd))); if (ret) { dev_err(udma_dev->dev, "failed to copy JFC udata, ret = %d.\n", ret); @@ -147,7 +147,7 @@ static int check_create_jfc(struct udma_dev *udma_dev, } if (ucmd->jfc_attr_ex.jfc_ex_mask & - UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS) { + HNS3_UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS) { if (udma_dev->notify_addr) ucmd->jfc_attr_ex.notify_addr = udma_dev->notify_addr; @@ -168,12 +168,12 @@ static void set_jfc_param(struct udma_jfc *udma_jfc, struct ubcore_jfc_cfg *cfg) memcpy(&udma_jfc->ubcore_jfc.jfc_cfg, cfg, sizeof(struct ubcore_jfc_cfg)); } -static void init_jfc(struct udma_jfc *udma_jfc, struct udma_create_jfc_ucmd *ucmd) +static void init_jfc(struct udma_jfc *udma_jfc, struct hns3_udma_create_jfc_ucmd *ucmd) { spin_lock_init(&udma_jfc->lock); INIT_LIST_HEAD(&udma_jfc->sq_list); INIT_LIST_HEAD(&udma_jfc->rq_list); - if (ucmd->jfc_attr_ex.jfc_ex_mask & UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS) + if (ucmd->jfc_attr_ex.jfc_ex_mask & HNS3_UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS) udma_jfc->jfc_attr_ex = ucmd->jfc_attr_ex; } @@ -204,9 +204,10 @@ static void free_jfc_cqe_buf(struct udma_dev *dev, struct udma_jfc *jfc) static int alloc_jfc_buf(struct udma_dev *udma_dev, struct udma_jfc *udma_jfc, struct ubcore_udata *udata, - struct udma_create_jfc_ucmd *ucmd) + struct hns3_udma_create_jfc_ucmd *ucmd) { - struct udma_create_jfc_resp resp = {}; + struct udma_ucontext *udma_uctx = to_udma_ucontext(udata->uctx); + struct hns3_udma_create_jfc_resp resp = {}; int ret; ret = alloc_jfc_cqe_buf(udma_dev, udma_jfc, udata, ucmd->buf_addr); @@ -214,23 +215,23 @@ static int alloc_jfc_buf(struct udma_dev *udma_dev, struct udma_jfc *udma_jfc, return ret; if (udma_dev->caps.flags & UDMA_CAP_FLAG_CQ_RECORD_DB) { - ret = udma_db_map_user(udma_dev, ucmd->db_addr, &udma_jfc->db); + ret = udma_db_map_user(udma_uctx, ucmd->db_addr, &udma_jfc->db); if (ret) { dev_err(udma_dev->dev, "failed to map JFC db, ret = %d.\n", ret); goto db_err; } - udma_jfc->jfc_caps |= UDMA_JFC_CAP_RECORD_DB; + udma_jfc->jfc_caps |= HNS3_UDMA_JFC_CAP_RECORD_DB; } if (udata) { resp.jfc_caps = udma_jfc->jfc_caps; ret = copy_to_user((void *)udata->udrv_data->out_addr, &resp, - min(udata->udrv_data->out_len, - (uint32_t)sizeof(resp))); + min_t(uint32_t, udata->udrv_data->out_len, + (uint32_t)sizeof(resp))); if (ret) { dev_err(udma_dev->dev, - "failed to copy jfc resp, ret = %d\n", ret); + "failed to copy jfc resp, ret = %d.\n", ret); goto err_copy; } } @@ -241,8 +242,8 @@ static int alloc_jfc_buf(struct udma_dev *udma_dev, struct udma_jfc *udma_jfc, err_copy: if (udma_dev->caps.flags & UDMA_CAP_FLAG_CQ_RECORD_DB) { - udma_db_unmap_user(udma_dev, &udma_jfc->db); - udma_jfc->jfc_caps &= ~UDMA_JFC_CAP_RECORD_DB; + udma_db_unmap_user(udma_uctx, &udma_jfc->db); + udma_jfc->jfc_caps &= ~HNS3_UDMA_JFC_CAP_RECORD_DB; } db_err: free_jfc_cqe_buf(udma_dev, udma_jfc); @@ -255,7 +256,7 @@ static void set_write_notify_param(struct udma_jfc *udma_jfc, { uint8_t device_mode; - if (udma_jfc->jfc_attr_ex.notify_mode == UDMA_JFC_NOTIFY_MODE_4B_ALIGN) + if (udma_jfc->jfc_attr_ex.notify_mode == HNS3_UDMA_JFC_NOTIFY_MODE_4B_ALIGN) device_mode = UDMA_NOTIFY_DEV; else device_mode = UDMA_NOTIFY_DDR; @@ -324,7 +325,7 @@ static void udma_write_jfc_cqc(struct udma_dev *udma_dev, struct udma_jfc *udma_ dma_handle >> CQC_CQE_BA_H_OFFSET); udma_reg_write(jfc_context, CQC_CQ_MAX_CNT, UDMA_CQ_DEFAULT_BURST_NUM); udma_reg_write(jfc_context, CQC_CQ_PERIOD, UDMA_CQ_DEFAULT_INTERVAL); - if (udma_jfc->jfc_caps & UDMA_JFC_CAP_RECORD_DB) { + if (udma_jfc->jfc_caps & HNS3_UDMA_JFC_CAP_RECORD_DB) { udma_reg_enable(jfc_context, CQC_DB_RECORD_EN); udma_reg_write(jfc_context, CQC_CQE_DB_RECORD_ADDR_L, lower_32_bits(udma_jfc->db.dma) >> @@ -334,13 +335,13 @@ static void udma_write_jfc_cqc(struct udma_dev *udma_dev, struct udma_jfc *udma_ } if (udma_jfc->jfc_attr_ex.create_flags == - UDMA_JFC_CREATE_ENABLE_POE_MODE) { + HNS3_UDMA_JFC_CREATE_ENABLE_POE_MODE) { udma_reg_enable(jfc_context, CQC_POE_EN); udma_reg_write(jfc_context, CQC_POE_NUM, udma_jfc->jfc_attr_ex.poe_channel); } - if (udma_jfc->jfc_attr_ex.create_flags == UDMA_JFC_CREATE_ENABLE_NOTIFY) + if (udma_jfc->jfc_attr_ex.create_flags == HNS3_UDMA_JFC_CREATE_ENABLE_NOTIFY) set_write_notify_param(udma_jfc, jfc_context); } @@ -500,13 +501,15 @@ static void free_jfc_cqc(struct udma_dev *udma_dev, struct udma_jfc *udma_jfc) static void free_jfc_buf(struct udma_dev *udma_dev, struct udma_jfc *udma_jfc) { + struct udma_ucontext *udma_uctx = to_udma_ucontext(udma_jfc->ubcore_jfc.uctx); + /* wait for all interrupt processed */ if (refcount_dec_and_test(&udma_jfc->refcount)) complete(&udma_jfc->free); wait_for_completion(&udma_jfc->free); if (udma_dev->caps.flags & UDMA_CAP_FLAG_CQ_RECORD_DB) - udma_db_unmap_user(udma_dev, &udma_jfc->db); + udma_db_unmap_user(udma_uctx, &udma_jfc->db); udma_mtr_destroy(udma_dev, &udma_jfc->mtr); } @@ -571,7 +574,7 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *dev, struct ubcore_jfc_ struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); - struct udma_create_jfc_ucmd ucmd = {}; + struct hns3_udma_create_jfc_ucmd ucmd = {}; struct udma_jfc *udma_jfc; int ret; @@ -700,7 +703,7 @@ void udma_jfc_completion(struct udma_dev *udma_dev, uint32_t cqn) udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, cqn); if (!udma_jfc) { dev_warn(udma_dev->dev, - "Completion event for bogus CQ 0x%06x\n", cqn); + "Completion event for bogus CQ 0x%06x.\n", cqn); return; } @@ -718,13 +721,13 @@ void udma_jfc_event(struct udma_dev *udma_dev, uint32_t cqn, int event_type) udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, cqn); if (!udma_jfc) { - dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn); + dev_warn(dev, "Async event for bogus CQ 0x%06x.\n", cqn); return; } if (event_type != UDMA_EVENT_TYPE_JFC_ACCESS_ERROR && event_type != UDMA_EVENT_TYPE_JFC_OVERFLOW) { - dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n", + dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x.\n", event_type, cqn); return; } diff --git a/drivers/ub/hw/hns3/hns3_udma_jfc.h b/drivers/ub/hw/hns3/hns3_udma_jfc.h index 11fc6764c131ef5f9e76c7dc517f7c3eced876a7..fd41c06b3a4954a3fb1164e0105f3f48266c7689 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfc.h +++ b/drivers/ub/hw/hns3/hns3_udma_jfc.h @@ -32,7 +32,7 @@ struct udma_jfc { struct completion free; struct list_head sq_list; struct list_head rq_list; - struct udma_jfc_attr_ex jfc_attr_ex; + struct hns3_udma_jfc_attr_ex jfc_attr_ex; }; #define UDMA_JFC_CONTEXT_SIZE 16 diff --git a/drivers/ub/hw/hns3/hns3_udma_jfr.c b/drivers/ub/hw/hns3/hns3_udma_jfr.c index 09e3a7f0140da45003188a962e11447c40b97235..fa8423905c884bed306400ffdf35d01ec935a9f3 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfr.c +++ b/drivers/ub/hw/hns3/hns3_udma_jfr.c @@ -114,7 +114,7 @@ static int alloc_jfr_wqe_buf(struct udma_dev *dev, static int alloc_jfr_wqe_buf_rq(struct udma_dev *dev, struct udma_jfr *jfr, struct ubcore_udata *udata, - struct udma_create_jfr_ucmd *ucmd) + struct hns3_udma_create_jfr_ucmd *ucmd) { struct udma_buf_attr buf_attr = {}; uint32_t total_buff_size = 0; @@ -181,13 +181,14 @@ static void free_jfr_wqe_buf(struct udma_dev *dev, struct udma_jfr *jfr) static int alloc_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, struct ubcore_udata *udata) { - struct udma_create_jfr_ucmd ucmd = {}; + struct udma_ucontext *udma_uctx = to_udma_ucontext(udata->uctx); + struct hns3_udma_create_jfr_ucmd ucmd = {}; int ret; if (udata) { ret = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(ucmd))); + min_t(uint32_t, udata->udrv_data->in_len, + (uint32_t)sizeof(ucmd))); if (ret) { dev_err(dev->dev, "failed to copy JFR udata, ret = %d.\n", @@ -202,7 +203,7 @@ static int alloc_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, return ret; if (ucmd.wqe_buf_addr) { - jfr->jfr_caps |= UDMA_JFR_CAP_RECORD_DB; + jfr->jfr_caps |= HNS3_UDMA_JFR_CAP_RECORD_DB; ret = alloc_jfr_wqe_buf_rq(dev, jfr, udata, &ucmd); if (ret) goto err_idx; @@ -213,14 +214,14 @@ static int alloc_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, } if (dev->caps.flags & UDMA_CAP_FLAG_SRQ_RECORD_DB || - jfr->jfr_caps & UDMA_JFR_CAP_RECORD_DB) { - ret = udma_db_map_user(dev, ucmd.db_addr, &jfr->db); + jfr->jfr_caps & HNS3_UDMA_JFR_CAP_RECORD_DB) { + ret = udma_db_map_user(udma_uctx, ucmd.db_addr, &jfr->db); if (ret) { dev_err(dev->dev, "map jfr db failed, ret = %d.\n", ret); goto err_db; } - jfr->jfr_caps |= UDMA_JFR_CAP_RECORD_DB; + jfr->jfr_caps |= HNS3_UDMA_JFR_CAP_RECORD_DB; } refcount_set(&jfr->refcount, 1); @@ -319,7 +320,7 @@ static int write_jfrc(struct udma_dev *dev, struct udma_jfr *jfr, void *mb_buf) to_udma_hw_page_shift(jfr->buf_mtr.hem_cfg.ba_pg_shift)); udma_reg_write(ctx, SRQC_WQE_BUF_PG_SZ, to_udma_hw_page_shift(jfr->buf_mtr.hem_cfg.buf_pg_shift)); - if (jfr->jfr_caps & UDMA_JFR_CAP_RECORD_DB) { + if (jfr->jfr_caps & HNS3_UDMA_JFR_CAP_RECORD_DB) { udma_reg_enable(ctx, SRQC_RECORD_DB_EN); udma_reg_write(ctx, SRQC_RECORD_DB_ADDR_L, lower_32_bits(jfr->db.dma) >> @@ -526,14 +527,16 @@ static void free_jfrc(struct udma_dev *dev, struct udma_jfr *jfr) static void free_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) { + struct udma_ucontext *udma_uctx = to_udma_ucontext(jfr->ubcore_jfr.uctx); + if (refcount_dec_and_test(&jfr->refcount)) complete(&jfr->free); wait_for_completion(&jfr->free); if (dev->caps.flags & UDMA_CAP_FLAG_SRQ_RECORD_DB || - jfr->jfr_caps & UDMA_JFR_CAP_RECORD_DB) - udma_db_unmap_user(dev, &jfr->db); + jfr->jfr_caps & HNS3_UDMA_JFR_CAP_RECORD_DB) + udma_db_unmap_user(udma_uctx, &jfr->db); free_jfr_wqe_buf(dev, jfr); free_jfr_idx(dev, jfr); @@ -624,7 +627,7 @@ struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_ struct ubcore_udata *udata) { struct udma_dev *udma_dev = to_udma_dev(dev); - struct udma_create_jfr_resp resp = {}; + struct hns3_udma_create_jfr_resp resp = {}; struct udma_jfr *jfr; int ret; @@ -659,8 +662,8 @@ struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_ resp.jfr_caps = jfr->jfr_caps; resp.srqn = jfr->srqn; ret = copy_to_user((void *)udata->udrv_data->out_addr, &resp, - min(udata->udrv_data->out_len, - (uint32_t)sizeof(resp))); + min_t(uint32_t, udata->udrv_data->out_len, + (uint32_t)sizeof(resp))); if (ret) { dev_err(udma_dev->dev, "failed to copy jfr resp, ret = %d.\n", ret); diff --git a/drivers/ub/hw/hns3/hns3_udma_jfs.c b/drivers/ub/hw/hns3/hns3_udma_jfs.c index a5f8a411becc477dc86b59090cf12725246e618c..66874e99824841683c1379f93cd590fe7b337219 100644 --- a/drivers/ub/hw/hns3/hns3_udma_jfs.c +++ b/drivers/ub/hw/hns3/hns3_udma_jfs.c @@ -88,7 +88,7 @@ static void udma_fill_jfs_um_qp_attr(struct udma_dev *dev, struct udma_jfs *jfs, qp_attr->priority = dev->caps.sl_num > 0 ? dev->caps.sl_num - 1 : 0; dev_err(dev->dev, - "set priority (%u) should smaller than the MAC (%u), (%u) is used\n", + "set priority (%u) should smaller than the MAX (%u), (%u) is used\n", jfs->ubcore_jfs.jfs_cfg.priority, dev->caps.sl_num, qp_attr->priority); } else { @@ -135,13 +135,13 @@ static int alloc_jfs_buf(struct udma_dev *udma_dev, struct udma_jfs *jfs, struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata) { - struct udma_create_jfs_ucmd ucmd = {}; + struct hns3_udma_create_jfs_ucmd ucmd = {}; int ret = 0; if (udata) { ret = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(ucmd))); + min_t(uint32_t, udata->udrv_data->in_len, + (uint32_t)sizeof(ucmd))); if (ret) { dev_err(udma_dev->dev, "failed to copy jfs udata, ret = %d.\n", ret); diff --git a/drivers/ub/hw/hns3/hns3_udma_main.c b/drivers/ub/hw/hns3/hns3_udma_main.c index 2b8d429b57ed8f8c374f5e56bd9ecd572d75e896..5cf864eee70d6f12aa7c9654512a30e6df39f665 100644 --- a/drivers/ub/hw/hns3/hns3_udma_main.c +++ b/drivers/ub/hw/hns3/hns3_udma_main.c @@ -68,7 +68,7 @@ static int udma_uar_alloc(struct udma_dev *udma_dev, struct udma_uar *uar) static int udma_init_ctx_resp(struct udma_dev *dev, struct ubcore_udrv_priv *udrv_data, struct udma_dca_ctx *dca_ctx) { - struct udma_create_ctx_resp resp = {}; + struct hns3_udma_create_ctx_resp resp = {}; int ret; resp.num_comp_vectors = dev->caps.num_comp_vectors; @@ -96,7 +96,8 @@ static int udma_init_ctx_resp(struct udma_dev *dev, struct ubcore_udrv_priv *udr } ret = copy_to_user((void *)udrv_data->out_addr, &resp, - min(udrv_data->out_len, (uint32_t)sizeof(resp))); + min_t(uint32_t, udrv_data->out_len, + (uint32_t)sizeof(resp))); if (ret) dev_err(dev->dev, "copy ctx resp to user failed, ret = %d.\n", ret); @@ -110,6 +111,16 @@ static void udma_uar_free(struct udma_dev *udma_dev, ida_free(&udma_dev->uar_ida.ida, (int)context->uar.logic_idx); } +static void init_ucontext_list(struct udma_dev *udma_dev, + struct udma_ucontext *uctx) +{ + if (udma_dev->caps.flags & UDMA_CAP_FLAG_CQ_RECORD_DB || + udma_dev->caps.flags & UDMA_CAP_FLAG_QP_RECORD_DB) { + INIT_LIST_HEAD(&uctx->pgdir_list); + mutex_init(&uctx->pgdir_mutex); + } +} + static struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *dev, uint32_t eid_index, struct ubcore_udrv_priv *udrv_data) @@ -159,6 +170,8 @@ static struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *dev, UDMA_CAP_FLAG_DCA_MODE) udma_register_uctx_debugfs(udma_dev, context); + init_ucontext_list(udma_dev, context); + return &context->uctx; err_alloc_uar: @@ -183,12 +196,12 @@ static int udma_free_ucontext(struct ubcore_ucontext *uctx) static int get_mmap_cmd(struct vm_area_struct *vma) { - return (vma->vm_pgoff & MAP_COMMAND_MASK); + return (vma->vm_pgoff & HNS3_UDMA_MAP_COMMAND_MASK); } static uint64_t get_mmap_idx(struct vm_area_struct *vma) { - return ((vma->vm_pgoff >> MAP_INDEX_SHIFT) & MAP_INDEX_MASK); + return ((vma->vm_pgoff >> HNS3_UDMA_MAP_INDEX_SHIFT) & HNS3_UDMA_MAP_INDEX_MASK); } static int mmap_dca(struct ubcore_ucontext *context, struct vm_area_struct *vma) @@ -238,23 +251,23 @@ static int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) cmd = get_mmap_cmd(vma); switch (cmd) { - case UDMA_MMAP_UAR_PAGE: + case HNS3_UDMA_MMAP_UAR_PAGE: vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_udma_ucontext(uctx)->uar.pfn, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; break; - case UDMA_MMAP_DWQE_PAGE: + case HNS3_UDMA_MMAP_DWQE_PAGE: vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); qpn = get_mmap_idx(vma); - address = udma_dev->dwqe_page + qpn * UDMA_DWQE_PAGE_SIZE; + address = udma_dev->dwqe_page + qpn * HNS3_UDMA_DWQE_PAGE_SIZE; if (io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, - UDMA_DWQE_PAGE_SIZE, vma->vm_page_prot)) + HNS3_UDMA_DWQE_PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; break; - case UDMA_MMAP_RESET_PAGE: + case HNS3_UDMA_MMAP_RESET_PAGE: if (vma->vm_flags & (VM_WRITE | VM_EXEC)) return -EINVAL; @@ -263,7 +276,7 @@ static int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; break; - case UDMA_MMAP_TYPE_DCA: + case HNS3_UDMA_MMAP_TYPE_DCA: if (mmap_dca(uctx, vma)) return -EAGAIN; break; @@ -735,12 +748,6 @@ int udma_setup_hca(struct udma_dev *udma_dev) INIT_LIST_HEAD(&udma_dev->dip_list); spin_lock_init(&udma_dev->dip_list_lock); - if (udma_dev->caps.flags & UDMA_CAP_FLAG_CQ_RECORD_DB || - udma_dev->caps.flags & UDMA_CAP_FLAG_QP_RECORD_DB) { - INIT_LIST_HEAD(&udma_dev->pgdir_list); - mutex_init(&udma_dev->pgdir_mutex); - } - udma_init_uar_table(udma_dev); ret = udma_init_qp_table(udma_dev); @@ -987,10 +994,10 @@ static void udma_set_devname(struct udma_dev *udma_dev, struct ubcore_device *ub_dev) { if (strncasecmp(ub_dev->netdev->name, UB_DEV_BASE_NAME, UB_DEV_NAME_SHIFT)) - scnprintf(udma_dev->dev_name, UBCORE_MAX_DEV_NAME, "udma_c%ud%uf%u", + scnprintf(udma_dev->dev_name, UBCORE_MAX_DEV_NAME, "hns3_udma_c%ud%uf%u", udma_dev->chip_id, udma_dev->die_id, udma_dev->func_id); else - scnprintf(udma_dev->dev_name, UBCORE_MAX_DEV_NAME, "udma%s", + scnprintf(udma_dev->dev_name, UBCORE_MAX_DEV_NAME, "hns3_udma%s", ub_dev->netdev->name + UB_DEV_NAME_SHIFT); dev_info(udma_dev->dev, "Set dev_name %s\n", udma_dev->dev_name); diff --git a/drivers/ub/hw/hns3/hns3_udma_qp.c b/drivers/ub/hw/hns3/hns3_udma_qp.c index 204132e54fec87cf384d10edd575740e33a1fd67..c93cc78573464ff23f5429fd5cc2dda5f7f3fef3 100644 --- a/drivers/ub/hw/hns3/hns3_udma_qp.c +++ b/drivers/ub/hw/hns3/hns3_udma_qp.c @@ -349,7 +349,7 @@ static void edit_qpc_for_inline(struct udma_qp_context *context, static void edit_qpc_for_db(struct udma_qp_context *context, struct udma_qp_context *context_mask, struct udma_qp *qp) { - if (qp->en_flags & UDMA_QP_CAP_RQ_RECORD_DB) { + if (qp->en_flags & HNS3_UDMA_QP_CAP_RQ_RECORD_DB) { udma_reg_enable(context, QPC_RQ_RECORD_EN); udma_reg_clear(context_mask, QPC_RQ_RECORD_EN); if (is_rc_jetty(&qp->qp_attr) && !qp->qp_attr.jetty->shared_jfr && @@ -365,7 +365,7 @@ static void edit_qpc_for_db(struct udma_qp_context *context, struct udma_qp_cont } } - if (qp->en_flags & UDMA_QP_CAP_OWNER_DB) { + if (qp->en_flags & HNS3_UDMA_QP_CAP_OWNER_DB) { udma_reg_enable(context, QPC_OWNER_MODE); udma_reg_clear(context_mask, QPC_OWNER_MODE); } @@ -544,7 +544,7 @@ static int modify_qp_rtr_to_rts(struct udma_qp *qp, udma_reg_clear(context_mask, QPC_TX_CQN); } - if (qp->en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) { + if (qp->en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) { udma_reg_enable(context, QPC_DCA_MODE); udma_reg_clear(context_mask, QPC_DCA_MODE); } @@ -903,7 +903,7 @@ static int udma_set_abs_fields(struct udma_qp *qp, ret = modify_qp_reset_to_rtr(qp, attr, context, context_mask); if (ret) { dev_err(udma_device->dev, - "Something went wrong during reset to rtr, new_state = %d.\n", + "something went wrong during reset to rtr, new_state = %d.\n", new_state); goto out; } @@ -911,14 +911,14 @@ static int udma_set_abs_fields(struct udma_qp *qp, ret = modify_qp_reset_to_rtr(qp, attr, context, context_mask); if (ret) { dev_err(udma_device->dev, - "Something went wrong during reset to rtr, new_state = %d.\n", + "something went wrong during reset to rtr, new_state = %d.\n", new_state); goto out; } ret = modify_qp_rtr_to_rts(qp, context, context_mask); if (ret) { dev_err(udma_device->dev, - "Something went wrong during rtr to rts, new_state = %d.\n", + "something went wrong during rtr to rts, new_state = %d.\n", new_state); goto out; } @@ -926,7 +926,7 @@ static int udma_set_abs_fields(struct udma_qp *qp, ret = modify_qp_rtr_to_rts(qp, context, context_mask); if (ret) { dev_err(udma_device->dev, - "Something went wrong during rtr to rts, curr_state = %d.\n", + "something went wrong during rtr to rts, curr_state = %d.\n", curr_state); goto out; } @@ -1000,7 +1000,7 @@ int udma_modify_qp_common(struct udma_qp *qp, qp->state = new_state; if (qp->qp_type == QPT_RC && - qp->en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) + qp->en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) udma_modify_dca(udma_device, qp); out: @@ -1008,7 +1008,7 @@ int udma_modify_qp_common(struct udma_qp *qp, } int fill_jfs_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, - struct udma_create_tp_ucmd *ucmd) + struct hns3_udma_create_tp_ucmd *ucmd) { struct udma_jfs *udma_jfs; struct ubcore_jfs *jfs; @@ -1049,7 +1049,7 @@ int fill_jfs_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, } int fill_jfr_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, - struct udma_create_tp_ucmd *ucmd) + struct hns3_udma_create_tp_ucmd *ucmd) { struct udma_jfr *udma_jfr; struct ubcore_jfr *jfr; @@ -1085,7 +1085,7 @@ int fill_jfr_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, } int fill_jetty_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, - struct udma_create_tp_ucmd *ucmd) + struct hns3_udma_create_tp_ucmd *ucmd) { struct udma_jetty *udma_jetty; struct ubcore_jetty *jetty; @@ -1096,8 +1096,7 @@ int fill_jetty_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, qp_attr->tgt_id = qp_attr->is_tgt ? ucmd->ini_id.jetty_id : ucmd->tgt_id.jetty_id; - udma_jetty = (struct udma_jetty *)xa_load(&udma_dev->jetty_table.xa, - jetty_id); + udma_jetty = (struct udma_jetty *)xa_load(&udma_dev->jetty_table.xa, jetty_id); if (IS_ERR_OR_NULL(udma_jetty)) { dev_err(udma_dev->dev, "failed to find jetty, id = %u.\n", jetty_id); return -EINVAL; @@ -1153,7 +1152,7 @@ int udma_fill_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata) { bool is_target = cfg->flag.bs.target; - struct udma_create_tp_ucmd ucmd; + struct hns3_udma_create_tp_ucmd ucmd; struct udma_ucontext *udma_ctx; int status, eid_index; @@ -1161,8 +1160,8 @@ int udma_fill_qp_attr(struct udma_dev *udma_dev, struct udma_qp_attr *qp_attr, return 0; status = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(ucmd))); + min_t(uint32_t, udata->udrv_data->in_len, + (uint32_t)sizeof(ucmd))); if (status) { dev_err(udma_dev->dev, "failed to copy create tp ucmd, status = %d.\n", status); @@ -1226,7 +1225,7 @@ static void set_ext_sge_param(struct udma_dev *udma_dev, uint32_t sq_wqe_cnt, ext_sge_cnt = max_inline_data / UDMA_SGE_SIZE; /* Select the max data set by the user */ - qp->sq.max_gs = max(ext_sge_cnt, cap->max_send_sge); + qp->sq.max_gs = max_t(uint32_t, ext_sge_cnt, cap->max_send_sge); if (is_rc_jetty(&qp->qp_attr)) qp->sge.offset = qp->qp_attr.jetty->rc_node.sge_offset; @@ -1237,12 +1236,12 @@ static void set_ext_sge_param(struct udma_dev *udma_dev, uint32_t sq_wqe_cnt, */ if (wqe_sge_cnt) { total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt); - qp->sge.sge_cnt = max(total_sge_cnt, - (uint32_t)UDMA_PAGE_SIZE / UDMA_SGE_SIZE); + qp->sge.sge_cnt = max_t(uint32_t, total_sge_cnt, + (uint32_t)(UDMA_PAGE_SIZE / UDMA_SGE_SIZE)); } /* Ensure that the max_gs size does not exceed */ - qp->sq.max_gs = min(qp->sq.max_gs, udma_dev->caps.max_sq_sg); + qp->sq.max_gs = min_t(uint32_t, qp->sq.max_gs, udma_dev->caps.max_sq_sg); } static void set_rq_size(struct udma_dev *udma_dev, struct udma_qp *qp, struct udma_qp_cap *cap) @@ -1283,7 +1282,7 @@ static int set_user_sq_size(struct udma_dev *udma_dev, struct udma_qp *qp, static int set_qp_param(struct udma_dev *udma_dev, struct udma_qp *qp, struct ubcore_udata *udata, - struct udma_create_tp_ucmd *ucmd) + struct hns3_udma_create_tp_ucmd *ucmd) { struct udma_qp_attr *qp_attr = &qp->qp_attr; struct device *dev = udma_dev->dev; @@ -1315,8 +1314,8 @@ static int set_qp_param(struct udma_dev *udma_dev, struct udma_qp *qp, if (!qp_attr->is_tgt) { ret = copy_from_user(ucmd, (void *)udata->udrv_data->in_addr, - min(udata->udrv_data->in_len, - (uint32_t)sizeof(struct udma_create_tp_ucmd))); + min_t(uint32_t, udata->udrv_data->in_len, + (uint32_t)sizeof(struct hns3_udma_create_tp_ucmd))); if (ret) { dev_err(dev, "failed to copy create tp ucmd\n"); return ret; @@ -1456,7 +1455,7 @@ void init_jetty_x_qpn_bitmap(struct udma_dev *dev, int qpn_shift; qpn_shift = dev->caps.num_qps_shift - jetty_x_shift - - UDMA_JETTY_X_PREFIX_BIT_NUM; + HNS3_UDMA_JETTY_X_PREFIX_BIT_NUM; if (qpn_shift <= QPN_SHIFT_MIN) { qpn_map->qpn_shift = 0; return; @@ -1464,7 +1463,7 @@ void init_jetty_x_qpn_bitmap(struct udma_dev *dev, qpn_map->qpn_prefix = prefix << (dev->caps.num_qps_shift - - UDMA_JETTY_X_PREFIX_BIT_NUM); + HNS3_UDMA_JETTY_X_PREFIX_BIT_NUM); qpn_map->jid = jid; init_qpn_bitmap(qpn_map, qpn_shift); } @@ -1546,10 +1545,10 @@ static int alloc_wqe_buf(struct udma_dev *dev, struct udma_qp *qp, if (dca_en) { /* DCA must be enabled after the buffer attr is configured. */ udma_enable_dca(dev, qp); - qp->en_flags |= UDMA_QP_CAP_DYNAMIC_CTX_ATTACH; + qp->en_flags |= HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH; } else if ((PAGE_SIZE <= UDMA_DWQE_SIZE) && (dev->caps.flags & UDMA_CAP_FLAG_DIRECT_WQE)) { - qp->en_flags |= UDMA_QP_CAP_DIRECT_WQE; + qp->en_flags |= HNS3_UDMA_QP_CAP_DIRECT_WQE; } ret = udma_mtr_create(dev, &qp->mtr, buf_attr, @@ -1599,36 +1598,38 @@ static int alloc_qp_wqe(struct udma_dev *udma_dev, struct udma_qp *qp, } static int alloc_user_qp_db(struct udma_dev *udma_dev, struct udma_qp *qp, - struct udma_create_tp_ucmd *ucmd) + struct hns3_udma_create_tp_ucmd *ucmd) { int ret; if (!ucmd->sdb_addr) return 0; - ret = udma_db_map_user(udma_dev, ucmd->sdb_addr, &qp->sdb); + ret = udma_db_map_user(qp->udma_uctx, ucmd->sdb_addr, &qp->sdb); if (ret) { dev_err(udma_dev->dev, "failed to map user sdb_addr, ret = %d.\n", ret); return ret; } - qp->en_flags |= UDMA_QP_CAP_SQ_RECORD_DB; + qp->en_flags |= HNS3_UDMA_QP_CAP_SQ_RECORD_DB; return 0; } static int alloc_qp_db(struct udma_dev *udma_dev, struct udma_qp *qp, struct ubcore_udata *udata, - struct udma_create_tp_ucmd *ucmd) + struct hns3_udma_create_tp_ucmd *ucmd) { int ret = 0; if (udma_dev->caps.flags & UDMA_CAP_FLAG_SDI_MODE) - qp->en_flags |= UDMA_QP_CAP_OWNER_DB; + qp->en_flags |= HNS3_UDMA_QP_CAP_OWNER_DB; - if (udata) + if (udata) { + qp->udma_uctx = to_udma_ucontext(udata->uctx); ret = alloc_user_qp_db(udma_dev, qp, ucmd); + } return ret; } @@ -1849,23 +1850,23 @@ static void free_qpc(struct udma_dev *udma_dev, struct udma_qp *qp) static void free_qp_db(struct udma_dev *udma_dev, struct udma_qp *qp) { if ((is_rc_jetty(&qp->qp_attr) && - !(qp->en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) || + !(qp->en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) || qp->no_free_wqe_buf) return; - if (qp->en_flags & UDMA_QP_CAP_SQ_RECORD_DB) - udma_db_unmap_user(udma_dev, &qp->sdb); + if (qp->en_flags & HNS3_UDMA_QP_CAP_SQ_RECORD_DB) + udma_db_unmap_user(qp->udma_uctx, &qp->sdb); } static void free_wqe_buf(struct udma_dev *dev, struct udma_qp *qp) { if ((is_rc_jetty(&qp->qp_attr) && - !(qp->en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) || + !(qp->en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) || qp->no_free_wqe_buf) return; udma_mtr_destroy(dev, &qp->mtr); - if (qp->en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) + if (qp->en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) udma_disable_dca(dev, qp); } @@ -1908,22 +1909,22 @@ static uint32_t udma_get_jetty_qpn(struct udma_qp *qp) static int udma_alloc_qp_sq(struct udma_dev *udma_dev, struct udma_qp *qp, struct ubcore_udata *udata, - struct udma_create_tp_ucmd *ucmd) + struct hns3_udma_create_tp_ucmd *ucmd) { struct udma_qp_attr *qp_attr = &qp->qp_attr; int ret = 0; if (is_rc_jetty(qp_attr)) { qp->sdb = qp_attr->jetty->rc_node.sdb; - qp->en_flags |= UDMA_QP_CAP_SQ_RECORD_DB; + qp->en_flags |= HNS3_UDMA_QP_CAP_SQ_RECORD_DB; if (!qp_attr->jetty->shared_jfr && !qp_attr->jetty->dca_en) - qp->en_flags |= UDMA_QP_CAP_RQ_RECORD_DB; + qp->en_flags |= HNS3_UDMA_QP_CAP_RQ_RECORD_DB; qp->dca_ctx = &qp_attr->jetty->rc_node.context->dca_ctx; if (qp_attr->jetty->rc_node.buf_addr) { qp->mtr = qp_attr->jetty->rc_node.mtr; if ((PAGE_SIZE <= UDMA_DWQE_SIZE) && (udma_dev->caps.flags & UDMA_CAP_FLAG_DIRECT_WQE)) - qp->en_flags |= UDMA_QP_CAP_DIRECT_WQE; + qp->en_flags |= HNS3_UDMA_QP_CAP_DIRECT_WQE; } else { ret = alloc_qp_wqe(udma_dev, qp, qp_attr->jetty->rc_node.buf_addr); if (ret) @@ -1969,9 +1970,9 @@ int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, { struct udma_ucontext *uctx = to_udma_ucontext(udata->uctx); struct udma_qp_attr *qp_attr = &qp->qp_attr; - struct udma_create_tp_resp resp = {}; + struct hns3_udma_create_tp_resp resp = {}; + struct hns3_udma_create_tp_ucmd ucmd; struct device *dev = udma_dev->dev; - struct udma_create_tp_ucmd ucmd; int ret; qp->state = QPS_RESET; @@ -2030,8 +2031,8 @@ int udma_create_qp_common(struct udma_dev *udma_dev, struct udma_qp *qp, resp.um_srcport.um_udp_range = (uint8_t)um_udp_range + UDP_RANGE_BASE; ret = copy_to_user((void *)udata->udrv_data->out_addr, &resp, - min(udata->udrv_data->out_len, - (uint32_t)sizeof(resp))); + min_t(uint32_t, udata->udrv_data->out_len, + (uint32_t)sizeof(resp))); if (ret) { dev_err(dev, "copy qp resp failed!\n"); goto err_copy; diff --git a/drivers/ub/hw/hns3/hns3_udma_qp.h b/drivers/ub/hw/hns3/hns3_udma_qp.h index 1b913cbc96d4591810991511165b02c8883c9e64..7939d31e2ddda50d71fd80ed208efafc428efda6 100644 --- a/drivers/ub/hw/hns3/hns3_udma_qp.h +++ b/drivers/ub/hw/hns3/hns3_udma_qp.h @@ -261,6 +261,7 @@ struct udma_dca_cfg { struct udma_qp { struct udma_dev *udma_device; + struct udma_ucontext *udma_uctx; enum udma_qp_type qp_type; struct udma_qp_attr qp_attr; struct udma_wq sq; diff --git a/drivers/ub/hw/hns3/hns3_udma_segment.c b/drivers/ub/hw/hns3/hns3_udma_segment.c index 05270cd4a90aceb9647e10a15533492ed870093c..e3457f9fc62d98dd42b71b97f943c728bffca915 100644 --- a/drivers/ub/hw/hns3/hns3_udma_segment.c +++ b/drivers/ub/hw/hns3/hns3_udma_segment.c @@ -68,7 +68,7 @@ static int alloc_seg_key(struct udma_dev *udma_dev, struct udma_seg *seg) id = ida_alloc_range(&seg_ida->ida, seg_ida->min, seg_ida->max, GFP_KERNEL); if (id < 0) { - dev_err(udma_dev->dev, "failed to alloc id for MR key, id(%d)\n", + dev_err(udma_dev->dev, "failed to alloc id for MR key, id(%d).\n", id); return -ENOMEM; } @@ -91,13 +91,39 @@ static int alloc_seg_key(struct udma_dev *udma_dev, struct udma_seg *seg) return err; } +static uint64_t get_continuos_mem_size(uint64_t seg_addr) +{ +#define HNS3_DOUBLE 2 + return seg_addr / HNS3_UDMA_KB * HNS3_DOUBLE; +} + +static void get_pbl_addr_level(struct udma_seg *seg, struct udma_dev *udma_dev) +{ + uint64_t cont_mem_size; + uint64_t seg_size; + uint64_t page_num; + + if (seg->size <= SEG_MEM_SIZE_1G) { + seg->pbl_hop_num = UDMA_PBL_HOP_NUM - 1U; + seg_size = roundup_pow_of_two(seg->size); + cont_mem_size = get_continuos_mem_size(seg_size); + page_num = cont_mem_size / PAGE_SIZE; + udma_dev->caps.pbl_ba_pg_sz = ilog2(roundup_pow_of_two(page_num)); + } else { + seg->pbl_hop_num = udma_dev->caps.pbl_hop_num; + udma_dev->caps.pbl_ba_pg_sz = UDMA_BA_PG_SZ_SUPPORTED_16K; + } +} + + static int alloc_seg_pbl(struct udma_dev *udma_dev, struct udma_seg *seg, bool is_user) { struct udma_buf_attr buf_attr = {}; int err; - seg->pbl_hop_num = udma_dev->caps.pbl_hop_num; + get_pbl_addr_level(seg, udma_dev); + buf_attr.page_shift = PAGE_SHIFT; buf_attr.region[0].size = seg->size; buf_attr.region[0].hopnum = seg->pbl_hop_num; @@ -249,7 +275,7 @@ static void store_seg_id(struct udma_dev *udma_dev, struct udma_seg *seg) udma_eid = (struct udma_eid *)xa_load(&udma_dev->eid_table, seg->ctx->eid_index); if (IS_ERR_OR_NULL(udma_eid)) { - dev_err(udma_dev->dev, "failed to find eid, index = %d\n.", + dev_err(udma_dev->dev, "failed to find eid, index = %d.\n", seg->ctx->eid_index); return; } @@ -332,7 +358,7 @@ struct ubcore_target_seg *udma_register_seg(struct ubcore_device *dev, struct udma_seg *seg; int ret; - if (cfg->flag.bs.access >= URMA_SEG_ACCESS_GUARD) { + if (cfg->flag.bs.access >= HNS3_URMA_SEG_ACCESS_GUARD) { dev_err(udma_dev->dev, "invalid segment access 0x%x.\n", cfg->flag.bs.access); return NULL; diff --git a/drivers/ub/hw/hns3/hns3_udma_segment.h b/drivers/ub/hw/hns3/hns3_udma_segment.h index b3d2175439758b3aa14b5bc13019b379b35cd662..d05595c1cfbef11b8e4218992803f36aa7cbb243 100644 --- a/drivers/ub/hw/hns3/hns3_udma_segment.h +++ b/drivers/ub/hw/hns3/hns3_udma_segment.h @@ -28,6 +28,8 @@ #define PA_PAGE_SHIFT 6 #define MPT_VA_H_SHIFT 32 #define MPT_LEN_H_SHIFT 32 +#define HNS3_UDMA_KB 0x400 +#define SEG_MEM_SIZE_1G 0X40000000 // 1GB enum { MPT_ST_VALID = 0x1, diff --git a/drivers/ub/hw/hns3/hns3_udma_tp.c b/drivers/ub/hw/hns3/hns3_udma_tp.c index d90c86bee3e34226c7a6a7a2ece096335104f3e2..02f77d6b0a5defbcd961b96d2b658df36fee756e 100644 --- a/drivers/ub/hw/hns3/hns3_udma_tp.c +++ b/drivers/ub/hw/hns3/hns3_udma_tp.c @@ -299,7 +299,7 @@ static void copy_attr_to_pre_tp(struct udma_dev *udma_device, to_qp->dca_ctx = from_qp->dca_ctx; to_qp->en_flags = from_qp->en_flags; to_qp->buff_size = from_qp->buff_size; - if (to_qp->en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) + if (to_qp->en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH) udma_enable_dca(udma_device, to_qp); udma_mtr_move(&from_qp->mtr, &to_qp->mtr); @@ -333,7 +333,7 @@ static int udma_store_jetty_tp(struct udma_dev *udma_device, tjetty_hash = udma_get_jetty_hash(&jetty->rc_node.tjetty_id); if (tjetty_hash == hash && - (tp->qp.en_flags & UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) { + (tp->qp.en_flags & HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH)) { copy_attr_to_pre_tp(udma_device, &tp->qp, &jetty->rc_node.tp->qp); *fail_ret_tp = &jetty->rc_node.tp->ubcore_tp; diff --git a/drivers/ub/hw/hns3/hns3_udma_user_ctl.c b/drivers/ub/hw/hns3/hns3_udma_user_ctl.c index 45ddc65e1b61809b71ffcf80a3e91c3f67797955..98508b559ff922846dff3bfc8696d9f0caa60f24 100644 --- a/drivers/ub/hw/hns3/hns3_udma_user_ctl.c +++ b/drivers/ub/hw/hns3/hns3_udma_user_ctl.c @@ -31,7 +31,7 @@ int udma_user_ctl_flush_cqe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl ret = (int)copy_from_user(&fcp, (void *)in->addr, sizeof(struct flush_cqe_param)); - if (ret != 0) { + if (ret) { dev_err(udma_device->dev, "copy_from_user failed in flush_cqe, ret:%d.\n", ret); return -EFAULT; @@ -203,8 +203,8 @@ int udma_user_ctl_query_poe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl struct ubcore_user_ctl_out *out, struct ubcore_udrv_priv *udrv_data) { - struct udma_poe_info poe_info_out; - struct udma_poe_info poe_info_in; + struct udma_poe_info poe_info_out = {}; + struct udma_poe_info poe_info_in = {}; struct udma_dev *udma_device; uint64_t poe_addr; bool poe_en; @@ -243,9 +243,9 @@ int udma_user_ctl_query_poe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl poe_info_out.en = poe_en ? 1 : 0; poe_info_out.poe_addr = poe_addr; ret = (int)copy_to_user((void *)out->addr, &poe_info_out, - min(out->len, - (uint32_t)sizeof(struct udma_poe_info))); - if (ret != 0) { + min_t(uint32_t, out->len, + (uint32_t)sizeof(struct udma_poe_info))); + if (ret) { dev_err(udma_device->dev, "cp to user failed in query poe, ret:%d.\n", ret); return -EFAULT; @@ -332,10 +332,10 @@ int udma_user_ctl_dca_shrink(struct ubcore_ucontext *uctx, struct ubcore_user_ct } ret = (int)copy_to_user((void *)out->addr, &shrink_resp, - min(out->len, - (uint32_t)sizeof(struct udma_dca_shrink_resp))); + min_t(uint32_t, out->len, + (uint32_t)sizeof(struct udma_dca_shrink_resp))); if (ret) { - dev_err(udma_device->dev, "cp to user failed in dca_shrink, ret:%d.\n", + dev_err(udma_device->dev, "cp to user failed in dca shrink, ret:%d.\n", ret); return -EFAULT; } @@ -368,8 +368,8 @@ int udma_user_ctl_dca_attach(struct ubcore_ucontext *uctx, struct ubcore_user_ct } ret = (int)copy_to_user((void *)out->addr, &resp, - min(out->len, - (uint32_t)sizeof(struct udma_dca_attach_resp))); + min_t(uint32_t, out->len, + (uint32_t)sizeof(struct udma_dca_attach_resp))); if (ret) { udma_dca_disattach(udma_device, &attr); dev_err(udma_device->dev, "cp to user failed in dca_attach, ret:%d.\n", @@ -426,8 +426,8 @@ int udma_user_ctl_dca_query(struct ubcore_ucontext *uctx, struct ubcore_user_ctl } ret = (int)copy_to_user((void *)out->addr, &resp, - min(out->len, - (uint32_t)sizeof(struct udma_dca_query_resp))); + min_t(uint32_t, out->len, + (uint32_t)sizeof(struct udma_dca_query_resp))); if (ret) { dev_err(udma_device->dev, "cp to user failed in dca_query, ret:%d.\n", ret); @@ -476,7 +476,7 @@ static int udma_k_user_ctl_config_poe_chl(struct udma_dev *dev, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { - struct udma_user_ctl_cfg_poe_channel_in cfg_in; + struct hns3_udma_user_ctl_cfg_poe_channel_in cfg_in; int ret; memcpy(&cfg_in, (void *)in->addr, min_t(uint32_t, in->len, sizeof(cfg_in))); @@ -503,7 +503,7 @@ static int udma_k_user_ctl_notify_attr(struct udma_dev *dev, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { - struct udma_user_ctl_config_notify_attr attr_in; + struct hns3_udma_user_ctl_config_notify_attr attr_in; memcpy(&attr_in, (void *)in->addr, min_t(uint32_t, in->len, sizeof(attr_in))); dev->notify_addr = attr_in.notify_addr; @@ -515,7 +515,7 @@ static int udma_k_user_ctl_query_hw_id(struct udma_dev *dev, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { - struct udma_user_ctl_query_hw_id_out info_out; + struct hns3_udma_user_ctl_query_hw_id_out info_out; info_out.chip_id = dev->chip_id; info_out.die_id = dev->die_id; @@ -530,9 +530,9 @@ typedef int (*udma_k_user_ctl_ops)(struct udma_dev *dev, struct ubcore_user_ctl_out *out); static udma_k_user_ctl_ops g_udma_user_ctl_ops[] = { - [UDMA_K_USER_CTL_CONFIG_POE_CHANNEL] = udma_k_user_ctl_config_poe_chl, - [UDMA_K_USER_CTL_CONFIG_NOTIFY_ATTR] = udma_k_user_ctl_notify_attr, - [UDMA_K_USER_CTL_QUERY_HW_ID] = udma_k_user_ctl_query_hw_id, + [HNS3_UDMA_K_USER_CTL_CONFIG_POE_CHANNEL] = udma_k_user_ctl_config_poe_chl, + [HNS3_UDMA_K_USER_CTL_CONFIG_NOTIFY_ATTR] = udma_k_user_ctl_notify_attr, + [HNS3_UDMA_K_USER_CTL_QUERY_HW_ID] = udma_k_user_ctl_query_hw_id, }; int udma_k_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *k_user_ctl) @@ -541,7 +541,7 @@ int udma_k_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *k_user_ct struct ubcore_user_ctl_in in = k_user_ctl->in; struct udma_dev *udma_dev = to_udma_dev(dev); - if (in.opcode >= UDMA_K_USER_CTL_OPCODE_NUM || + if (in.opcode >= HNS3_UDMA_K_USER_CTL_OPCODE_NUM || !g_udma_user_ctl_ops[in.opcode]) { dev_err(udma_dev->dev, "bad kernel user ctl opcode: 0x%x.\n", in.opcode); diff --git a/drivers/ub/hw/hns3/hns3_udma_user_ctl.h b/drivers/ub/hw/hns3/hns3_udma_user_ctl.h index c1f327dd31d48b70c68dfa9c1b55b215655267a4..b7f08992104a517ed3d8ad1e54ec39ccc189416f 100644 --- a/drivers/ub/hw/hns3/hns3_udma_user_ctl.h +++ b/drivers/ub/hw/hns3/hns3_udma_user_ctl.h @@ -17,5 +17,30 @@ #define _UDMA_USER_CTL_H int udma_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *k_user_ctl); - +int udma_user_ctl_config_poe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_query_poe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_dca_shrink(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_dca_attach(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_dca_detach(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_dca_query(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_flush_cqe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_dca_reg(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, struct ubcore_udrv_priv *udrv_data); +int udma_user_ctl_dca_dereg(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, + struct ubcore_user_ctl_out *out, + struct ubcore_udrv_priv *udrv_data); #endif /* _UDMA_USER_CTL_H */ diff --git a/drivers/ub/hw/hns3/hns3_udma_user_ctl_api.h b/drivers/ub/hw/hns3/hns3_udma_user_ctl_api.h index 0e3a0e29f3d5cb59871f79770dcdd1801c357ef9..c3632401e7ddb3274297e998084c243e92fb4553 100644 --- a/drivers/ub/hw/hns3/hns3_udma_user_ctl_api.h +++ b/drivers/ub/hw/hns3/hns3_udma_user_ctl_api.h @@ -13,36 +13,36 @@ * */ -#ifndef _UDMA_USER_CTL_API_H -#define _UDMA_USER_CTL_API_H +#ifndef _HNS3_UDMA_USER_CTL_API_H +#define _HNS3_UDMA_USER_CTL_API_H -struct udma_user_ctl_poe_init_attr { +struct hns3_udma_user_ctl_poe_init_attr { uint64_t rsv; /* reserved for extension, now must be 0 */ uint64_t poe_addr; /* 0 for disable */ }; -struct udma_user_ctl_cfg_poe_channel_in { - struct udma_user_ctl_poe_init_attr *init_attr; +struct hns3_udma_user_ctl_cfg_poe_channel_in { + struct hns3_udma_user_ctl_poe_init_attr *init_attr; uint8_t poe_channel; }; -struct udma_user_ctl_config_notify_attr { +struct hns3_udma_user_ctl_config_notify_attr { uint64_t notify_addr; uint64_t reserved; }; -struct udma_user_ctl_query_hw_id_out { +struct hns3_udma_user_ctl_query_hw_id_out { uint32_t chip_id; uint32_t die_id; uint32_t func_id; uint32_t reserved; }; -enum udma_k_user_ctl_opcode { - UDMA_K_USER_CTL_CONFIG_POE_CHANNEL, - UDMA_K_USER_CTL_CONFIG_NOTIFY_ATTR, - UDMA_K_USER_CTL_QUERY_HW_ID, - UDMA_K_USER_CTL_OPCODE_NUM, +enum hns3_udma_k_user_ctl_opcode { + HNS3_UDMA_K_USER_CTL_CONFIG_POE_CHANNEL, + HNS3_UDMA_K_USER_CTL_CONFIG_NOTIFY_ATTR, + HNS3_UDMA_K_USER_CTL_QUERY_HW_ID, + HNS3_UDMA_K_USER_CTL_OPCODE_NUM, }; -#endif /* _UDMA_USER_CTL_API_H */ +#endif /* _HNS3_UDMA_USER_CTL_API_H */