diff --git a/drivers/infiniband/hw/xsc/Makefile b/drivers/infiniband/hw/xsc/Makefile index ac6243ef217ac928f90e45ffd5ed81438d8974ef..12d8baebe053ce352bcc5e7b420626f336128c34 100644 --- a/drivers/infiniband/hw/xsc/Makefile +++ b/drivers/infiniband/hw/xsc/Makefile @@ -2,10 +2,17 @@ # Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. # All rights reserved. -ccflags-y := -I $(srctree)/drivers/net/ethernet/yunsilicon/xsc/ +ccflags-y := -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc ccflags-y += -Wno-implicit-fallthrough +ifeq ($(USE_INTERNAL_IB_CORE), 1) + ccflags-y += -include /usr/src/ofa_kernel/include/rdma/ib_umem.h +endif -obj-$(CONFIG_INFINIBAND_XSC) += xsc_ib.o +obj-$(CONFIG_INFINIBAND_XSC) += xsc_ib.o + +xsc_ib-y := main.o xsc_rdma_ctrl.o cq.o qp.o mem.o mr.o ah.o \ + counters.o devx.o global.o private_dev.o ib_umem_ex.o\ + rtt.o xsc_ib_sysfs.o + +xsc_ib-$(CONFIG_XSC_PEER_SUPPORT) += peer_mem.o -xsc_ib-y := main.o xsc_rdma_ctrl.o cq.o qp.o mem.o mr.o ah.o mad.o counters.o devx.o global.o private_dev.o ib_umem_ex.o rtt.o -xsc_ib-$(CONFIG_MLX_PEER_SUPPORT) := peer_mem.o diff --git a/drivers/infiniband/hw/xsc/ah.c b/drivers/infiniband/hw/xsc/ah.c index 720adc93bb7f097053643610136217702cc32655..8c1791fb7b6065489ca7f74b546d325e5824badf 100644 --- a/drivers/infiniband/hw/xsc/ah.c +++ b/drivers/infiniband/hw/xsc/ah.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "xsc_ib.h" #include "user.h" @@ -26,15 +27,15 @@ static u32 xsc_calc_roce_udp_flow_label(void) } static u16 xsc_ah_get_udp_sport(const struct xsc_ib_dev *dev, - struct rdma_ah_attr *ah_attr) + struct rdma_ah_attr *ah_attr) { enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type; u16 sport = 0; u32 fl = 0; - if ((gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) && - (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) && - (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK)) { + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && + (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) && + (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK)) { fl = ah_attr->grh.flow_label; } else { /*generate a 20bit flow_label and output to user layer*/ @@ -88,11 +89,10 @@ xsc_ib_create_ah_def() { struct xsc_ib_ah *ah = to_mah(ibah); struct xsc_ib_dev *dev = to_mdev(ibah->device); - struct rdma_ah_attr *ah_attr = init_attr->ah_attr; enum rdma_ah_attr_type ah_type = ah_attr->type; - if ((ah_type == RDMA_AH_ATTR_TYPE_ROCE) && + if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) return RET_VALUE(-EINVAL); diff --git a/drivers/infiniband/hw/xsc/counters.c b/drivers/infiniband/hw/xsc/counters.c index 3b5330c233d394ed7be7952855451303c575624b..0d17e94b32870a78dc79d5c34368a3a7bd6cf3e0 100644 --- a/drivers/infiniband/hw/xsc/counters.c +++ b/drivers/infiniband/hw/xsc/counters.c @@ -5,11 +5,11 @@ */ #include -#include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_lag.h" +#include "common/xsc_cmd.h" #include "counters.h" #define COUNTERS_FILE_NAME "counters" @@ -66,7 +66,7 @@ static const struct counter_desc vf_hw_stats_desc[] = { }; static ssize_t counters_names_show(struct kobject *kobjs, - struct attribute *attr, char *buf) + struct attribute *attr, char *buf) { int i; ssize_t count = 0; @@ -74,11 +74,12 @@ static ssize_t counters_names_show(struct kobject *kobjs, const struct counter_desc *desc; xsc_counters_name_attr = container_of(attr, - struct xsc_counters_attribute, attr); + struct xsc_counters_attribute, + attr); if (!xsc_counters_name_attr->dev || - !xsc_counters_name_attr->desc || - xsc_counters_name_attr->desc_size == 0) + !xsc_counters_name_attr->desc || + xsc_counters_name_attr->desc_size == 0) return 0; for (i = 0; i < xsc_counters_name_attr->desc_size; ++i) { @@ -90,7 +91,7 @@ static ssize_t counters_names_show(struct kobject *kobjs, } static ssize_t counters_show(struct kobject *kobjs, - struct attribute *attr, char *buf) + struct attribute *attr, char *buf) { int i; ssize_t count = 0; @@ -105,10 +106,11 @@ static ssize_t counters_show(struct kobject *kobjs, struct xsc_lag *ldev; xsc_counters_attr = container_of(attr, - struct xsc_counters_attribute, attr); + struct xsc_counters_attribute, + attr); if (!xsc_counters_attr->dev || - !xsc_counters_attr->desc || + !xsc_counters_attr->desc || xsc_counters_attr->desc_size == 0) return 0; @@ -154,9 +156,9 @@ static ssize_t counters_show(struct kobject *kobjs, } static ssize_t counters_value_read(struct file *file, - struct kobject *kob, - struct bin_attribute *bin_attr, - char *buf, loff_t loff, size_t size) + struct kobject *kob, + struct bin_attribute *bin_attr, + char *buf, loff_t loff, size_t size) { int i; struct xsc_counters_bin_attribute *xsc_counters_bin_attr; @@ -168,12 +170,13 @@ static ssize_t counters_value_read(struct file *file, const struct counter_desc *desc; xsc_counters_bin_attr = container_of(&bin_attr->attr, - struct xsc_counters_bin_attribute, attr); + struct xsc_counters_bin_attribute, + attr); if (!xsc_counters_bin_attr->dev || - !xsc_counters_bin_attr->desc || - xsc_counters_bin_attr->desc_size == 0 || - xsc_counters_bin_attr->size == 0) + !xsc_counters_bin_attr->desc || + xsc_counters_bin_attr->desc_size == 0 || + xsc_counters_bin_attr->size == 0) return 0; dev = xsc_counters_bin_attr->dev; @@ -260,16 +263,20 @@ int xsc_counters_init(struct ib_device *ib_dev, struct xsc_core_device *dev) xsc_counters_bin->dev = dev; if (is_pf) { - xsc_counters_name->desc = xsc_counters->desc = - xsc_counters_bin->desc = &hw_stats_desc[0]; - xsc_counters_name->desc_size = xsc_counters->desc_size = - xsc_counters_bin->desc_size = ARRAY_SIZE(hw_stats_desc); + xsc_counters_name->desc = &hw_stats_desc[0]; + xsc_counters->desc = &hw_stats_desc[0]; + xsc_counters_bin->desc = &hw_stats_desc[0]; + xsc_counters_name->desc_size = ARRAY_SIZE(hw_stats_desc); + xsc_counters->desc_size = ARRAY_SIZE(hw_stats_desc); + xsc_counters_bin->desc_size = ARRAY_SIZE(hw_stats_desc); xsc_counters_bin->size = xsc_counters_bin->desc_size * sizeof(u64); } else { - xsc_counters_name->desc = xsc_counters->desc = - xsc_counters_bin->desc = &vf_hw_stats_desc[0]; - xsc_counters_name->desc_size = xsc_counters->desc_size = - xsc_counters_bin->desc_size = ARRAY_SIZE(vf_hw_stats_desc); + xsc_counters_name->desc = &vf_hw_stats_desc[0]; + xsc_counters->desc = &vf_hw_stats_desc[0]; + xsc_counters_bin->desc = &vf_hw_stats_desc[0]; + xsc_counters_name->desc_size = ARRAY_SIZE(vf_hw_stats_desc); + xsc_counters->desc_size = ARRAY_SIZE(vf_hw_stats_desc); + xsc_counters_bin->desc_size = ARRAY_SIZE(vf_hw_stats_desc); xsc_counters_bin->size = xsc_counters_bin->desc_size * sizeof(u64); } @@ -283,7 +290,7 @@ int xsc_counters_init(struct ib_device *ib_dev, struct xsc_core_device *dev) dev->counters_priv = counters_attr_g; - ret = sysfs_create_group(&(ib_dev->dev.kobj), counters_attr_g); + ret = sysfs_create_group(&ib_dev->dev.kobj, counters_attr_g); if (ret) goto err_counters_create_group; @@ -333,7 +340,7 @@ void xsc_counters_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) xsc_counters = (struct xsc_counters_attribute *)counters_attrs[1]; if (counters_attr_g) { - sysfs_remove_group(&(ib_dev->dev.kobj), counters_attr_g); + sysfs_remove_group(&ib_dev->dev.kobj, counters_attr_g); kfree(counters_attr_g); counters_attr_g = NULL; } diff --git a/drivers/infiniband/hw/xsc/counters.h b/drivers/infiniband/hw/xsc/counters.h index d9a41cb06c8765c33d63df0ab4c9543cd61878c5..2215571967b9a70297179e75786a0fa0b40af3c8 100644 --- a/drivers/infiniband/hw/xsc/counters.h +++ b/drivers/infiniband/hw/xsc/counters.h @@ -18,10 +18,10 @@ struct counter_desc { struct xsc_counters_attribute { struct attribute attr; ssize_t (*show)(struct kobject *kobj, - struct attribute *attr, char *buf); + struct attribute *attr, char *buf); ssize_t (*store)(struct kobject *kobj, - struct attribute *attr, const char *buf, - size_t count); + struct attribute *attr, const char *buf, + size_t count); int id; struct xsc_core_device *dev; const struct counter_desc *desc; @@ -31,17 +31,17 @@ struct xsc_counters_attribute { struct xsc_counters_bin_attribute { struct attribute attr; ssize_t (*read)(struct file *file, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size); + struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t size); ssize_t (*write)(struct file *file, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size); + struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t size); int (*mmap)(struct file *file, - struct kobject *kobj, - struct bin_attribute *attr, - struct vm_area_struct *vma); + struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma); int id; struct xsc_core_device *dev; const struct counter_desc *desc; @@ -50,14 +50,14 @@ struct xsc_counters_bin_attribute { }; ssize_t counters_vf_names_show(struct kobject *kobjs, - struct attribute *attr, char *buf); + struct attribute *attr, char *buf); ssize_t counters_vf_value_read(struct file *file, - struct kobject *kob, - struct bin_attribute *bin_attr, - char *buf, loff_t loff, size_t size); + struct kobject *kob, + struct bin_attribute *bin_attr, + char *buf, loff_t loff, size_t size); ssize_t counters_vf_show(struct kobject *kobjs, - struct attribute *attr, char *buf); + struct attribute *attr, char *buf); #endif diff --git a/drivers/infiniband/hw/xsc/cq.c b/drivers/infiniband/hw/xsc/cq.c index ceaa9c20500cad0b72a03a5c0cbec0683868da48..7b4e7475032ca96a664b7660a54827d44e6e9b88 100644 --- a/drivers/infiniband/hw/xsc/cq.c +++ b/drivers/infiniband/hw/xsc/cq.c @@ -8,7 +8,7 @@ #include #include "xsc_ib.h" #include "user.h" -#include +#include "common/xsc_hsi.h" #include enum { @@ -96,7 +96,7 @@ static void xsc_ib_cq_event(struct xsc_core_cq *xcq, enum xsc_event type) if (type != XSC_EVENT_TYPE_CQ_ERROR) { xsc_ib_err(dev, "Unexpected event type %d on CQ %06x\n", - type, xcq->cqn); + type, xcq->cqn); return; } @@ -129,7 +129,8 @@ static void *get_sw_cqe(struct xsc_ib_cq *cq, int n) } static inline void handle_good_req(struct ib_wc *wc, - struct xsc_cqe *cqe, u8 opcode) + struct xsc_cqe *cqe, + u8 opcode) { wc->opcode = xsc_cqe_opcode[opcode]; if (opcode == XSC_OPCODE_RDMA_REQ_READ) @@ -150,7 +151,6 @@ static void handle_responder(struct ib_wc *wc, struct xsc_cqe *cqe, idx = wq->tail & (wq->wqe_cnt - 1); wc->wr_id = wq->wrid[idx]; ++wq->tail; - } struct ib_mad_list_head { @@ -189,10 +189,10 @@ static void *get_seg_wqe(void *first, int n) } static void xsc_handle_rdma_mad_resp_recv(struct xsc_ib_cq *cq, - struct xsc_ib_qp **cur_qp, - struct ib_wc *wc, - struct xsc_cqe *cqe, - u8 opcode) + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc, + struct xsc_cqe *cqe, + u8 opcode) { struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); void *recv; @@ -246,8 +246,8 @@ static void xsc_handle_rdma_mad_resp_recv(struct xsc_ib_cq *cq, } static int xsc_poll_one(struct xsc_ib_cq *cq, - struct xsc_ib_qp **cur_qp, - struct ib_wc *wc) + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc) { struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); struct xsc_core_qp *xqp; @@ -281,7 +281,7 @@ static int xsc_poll_one(struct xsc_ib_cq *cq, xqp = __xsc_qp_lookup(dev->xdev, qpn); if (unlikely(!xqp)) { xsc_ib_warn(dev, "CQE@CQ %d for unknown QPN %d\n", - cq->xcq.cqn, qpn); + cq->xcq.cqn, qpn); return -EINVAL; } @@ -319,7 +319,7 @@ static int xsc_poll_one(struct xsc_ib_cq *cq, default: xsc_ib_err(dev, "completion error\n%08x %08x %08x %08x %08x %08x\n", - p[0], p[1], p[2], p[3], p[5], p[6]); + p[0], p[1], p[2], p[3], p[5], p[6]); wc->status = IB_WC_GENERAL_ERR; wc->wr_id = 0; break; @@ -356,7 +356,6 @@ int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) spin_unlock_irqrestore(&cq->lock, flags); return npolled; - } int xsc_cqe_is_empty(struct xsc_ib_cq *cq) @@ -411,7 +410,7 @@ static int alloc_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf, int err; err = xsc_buf_alloc(dev->xdev, nent * cqe_size, - PAGE_SIZE, &buf->buf); + PAGE_SIZE, &buf->buf); if (err) return err; @@ -452,19 +451,18 @@ static int create_cq_user(struct xsc_ib_dev *dev, struct ib_udata *udata, } xsc_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, - &ncont, NULL); + &ncont, NULL); if (ncont != npages) { - xsc_ib_warn(dev, "bad page_shift:%d, ncont:%d\n", page_shift, ncont); + xsc_ib_dbg(dev, "bad page_shift:%d, ncont:%d\n", page_shift, ncont); /* amber doesn't support compound pages */ page_shift = PAGE_SHIFT; ncont = npages; - xsc_ib_warn(dev, "overwrite to page_shift:%d, ncont:%d\n", - page_shift, ncont); + xsc_ib_dbg(dev, "overwrite to page_shift:%d, ncont:%d\n", page_shift, ncont); } log_cq_sz = ilog2(entries); hw_npages = DIV_ROUND_UP((1 << log_cq_sz) * sizeof(struct xsc_cqe), PAGE_SIZE_4K); xsc_ib_dbg(dev, "addr 0x%llx, entries %d, size %u, npages %d, page_shift %d, ncont %d, hw_npages %d\n", - ucmd.buf_addr, entries, ucmd.cqe_size, npages, page_shift, ncont, hw_npages); + ucmd.buf_addr, entries, ucmd.cqe_size, npages, page_shift, ncont, hw_npages); *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * hw_npages; *cqb = xsc_vzalloc(*inlen); @@ -548,7 +546,7 @@ xsc_ib_create_cq_def() entries = roundup_pow_of_two(entries + 1); xsc_ib_dbg(dev, "entries:%d, vector:%d, max_cqes:%d\n", entries, vector, - dev->xdev->caps.max_cqes); + dev->xdev->caps.max_cqes); if (entries > dev->xdev->caps.max_cqes) entries = dev->xdev->caps.max_cqes; @@ -613,8 +611,7 @@ xsc_ib_create_cq_def() destroy_cq_kernel(dev, cq); err_create: - kfree(cq); - return err; + return RET_VALUE(err); } xsc_ib_destroy_cq_def() @@ -660,7 +657,7 @@ void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 rsn) /* Now sweep backwards through the CQ, removing CQ entries * that match our QP by copying older entries on top of them. */ - while ((int) --prod_index - (int) cq->xcq.cons_index >= 0) { + while ((int)(--prod_index) - (int)cq->xcq.cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); if (is_equal_rsn(cqe, rsn)) { ++nfreed; diff --git a/drivers/infiniband/hw/xsc/devx.c b/drivers/infiniband/hw/xsc/devx.c index f6ab45d9ad0727f71346406f6d8a220b72fb596e..fca43076bae1838296062a04a56b83072718ec0d 100644 --- a/drivers/infiniband/hw/xsc/devx.c +++ b/drivers/infiniband/hw/xsc/devx.c @@ -3,13 +3,12 @@ * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ - #include #include #include #include #include -#include +#include "common/driver.h" #include "xsc_ib.h" #define UVERBS_MODULE_NAME xsc_ib #include @@ -38,10 +37,8 @@ static int UVERBS_HANDLER(XSC_IB_METHOD_DEVX_OTHER)(struct uverbs_attr_bundle *a { struct xsc_ib_ucontext *c; struct xsc_ib_dev *dev; - void *cmd_in = uverbs_attr_get_alloced_ptr( - attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN); - int cmd_out_len = uverbs_attr_get_len(attrs, - XSC_IB_ATTR_DEVX_OTHER_CMD_OUT); + void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN); + int cmd_out_len = uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT); void *cmd_out; int err; @@ -58,28 +55,25 @@ static int UVERBS_HANDLER(XSC_IB_METHOD_DEVX_OTHER)(struct uverbs_attr_bundle *a return PTR_ERR(cmd_out); err = xsc_cmd_exec(dev->xdev, cmd_in, - uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN), - cmd_out, cmd_out_len); + uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN), + cmd_out, cmd_out_len); if (err) return err; return uverbs_copy_to(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len); } -DECLARE_UVERBS_NAMED_METHOD( - XSC_IB_METHOD_DEVX_OTHER, - UVERBS_ATTR_PTR_IN( - XSC_IB_ATTR_DEVX_OTHER_CMD_IN, - UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_inbox_hdr)), - UA_MANDATORY, - UA_ALLOC_AND_COPY), - UVERBS_ATTR_PTR_OUT( - XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, - UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_outbox_hdr)), - UA_MANDATORY)); +DECLARE_UVERBS_NAMED_METHOD(XSC_IB_METHOD_DEVX_OTHER, + UVERBS_ATTR_PTR_IN(XSC_IB_ATTR_DEVX_OTHER_CMD_IN, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_inbox_hdr)), + UA_MANDATORY, + UA_ALLOC_AND_COPY), + UVERBS_ATTR_PTR_OUT(XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_outbox_hdr)), + UA_MANDATORY)); DECLARE_UVERBS_GLOBAL_METHODS(XSC_IB_OBJECT_DEVX, - &UVERBS_METHOD(XSC_IB_METHOD_DEVX_OTHER)); + &UVERBS_METHOD(XSC_IB_METHOD_DEVX_OTHER)); const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void) { diff --git a/drivers/infiniband/hw/xsc/global.c b/drivers/infiniband/hw/xsc/global.c index e46c1665803236c26b6022163052f6f78c02defc..8d4819b8ef3e95b492fd6f442bded25f3c1a14ea 100644 --- a/drivers/infiniband/hw/xsc/global.c +++ b/drivers/infiniband/hw/xsc/global.c @@ -20,6 +20,7 @@ static int is_valid_pcp(int pcp) return 0; return -1; } + static int is_valid_dscp(int dscp) { if ((dscp >= 0 && dscp <= QOS_DSCP_MAX) || dscp == GLOBAL_UNSET_FORCE_VALUE) diff --git a/drivers/infiniband/hw/xsc/ib_peer_mem.h b/drivers/infiniband/hw/xsc/ib_peer_mem.h index 17da6169f97177bab140202bbc4d09a7cbd0d4e4..ad7997fcece37214ffcd4d6c7c0332b3d22b78f3 100644 --- a/drivers/infiniband/hw/xsc/ib_peer_mem.h +++ b/drivers/infiniband/hw/xsc/ib_peer_mem.h @@ -40,7 +40,7 @@ struct ib_peer_memory_client { enum ib_peer_mem_flags { IB_PEER_MEM_ALLOW = 1, - IB_PEER_MEM_INVAL_SUPP = (1<<1), + IB_PEER_MEM_INVAL_SUPP = (1 << 1), }; struct core_ticket { @@ -57,8 +57,8 @@ void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client, void *peer_client_context); int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, - struct ib_umem_ex *umem, - struct invalidation_ctx **invalidation_ctx); + struct ib_umem_ex *umem, + struct invalidation_ctx **invalidation_ctx); void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, struct invalidation_ctx *invalidation_ctx); diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.c b/drivers/infiniband/hw/xsc/ib_umem_ex.c index cc0566f8c956cc7bdc31e90be92c50f862285465..71c823bdc0f6e6e8fa41cefcee3259d4648a0263 100644 --- a/drivers/infiniband/hw/xsc/ib_umem_ex.c +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.c @@ -11,6 +11,76 @@ #include #include "ib_umem_ex.h" +#if defined(IB_CORE_UMEM_EX_V1) +#define get_mm(umem_ctx) ((umem_ctx)->mm) +#elif defined(IB_CORE_UMEM_EX_V2) +#define get_mm(umem_ctx) ((umem_ctx)->owning_mm) +#endif + +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) +static struct ib_umem_ex *peer_umem_get(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem_ex, unsigned long addr, + int dmasync, unsigned long peer_mem_flags) +{ + int ret; + const struct peer_memory_client *peer_mem = ib_peer_mem->peer_mem; + struct invalidation_ctx *invalidation_ctx = NULL; + struct ib_umem *umem = (struct ib_umem *)umem_ex; + + umem_ex->ib_peer_mem = ib_peer_mem; + if (peer_mem_flags & IB_PEER_MEM_INVAL_SUPP) { + ret = ib_peer_create_invalidation_ctx(ib_peer_mem, umem_ex, &invalidation_ctx); + if (ret) + goto end; + } + + /* + * We always request write permissions to the pages, to force breaking of any CoW + * during the registration of the MR. For read-only MRs we use the "force" flag to + * indicate that CoW breaking is required but the registration should not fail if + * referencing read-only areas. + */ + ret = peer_mem->get_pages(addr, umem->length, + 1, !umem->writable, + &umem->sg_head, + umem_ex->peer_mem_client_context, + invalidation_ctx ? + invalidation_ctx->context_ticket : 0); + if (ret) + goto out; + + umem->page_shift = ilog2(peer_mem->get_page_size + (umem_ex->peer_mem_client_context)); + if (BIT(umem->page_shift) <= 0) + goto put_pages; + + ret = peer_mem->dma_map(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device, + dmasync, + &umem->nmap); + if (ret) + goto put_pages; + + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_reg_pages); + atomic64_add(umem->nmap * BIT(umem->page_shift), &ib_peer_mem->stats.num_reg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_alloc_mrs); + return umem_ex; + +put_pages: + peer_mem->put_pages(&umem->sg_head, umem_ex->peer_mem_client_context); +out: + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); +end: + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + // renamed in different kernel + mmdrop(get_mm(umem)); + kfree(umem_ex); + return ERR_PTR(ret); +} +#endif + struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem) { struct ib_umem_ex *ret_umem; @@ -18,7 +88,7 @@ struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem) if (!umem) return ERR_PTR(-EINVAL); - ret_umem = kzalloc(sizeof(struct ib_umem_ex), GFP_KERNEL); + ret_umem = kzalloc(sizeof(*ret_umem), GFP_KERNEL); if (!ret_umem) return ERR_PTR(-ENOMEM); @@ -28,24 +98,116 @@ struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem) } struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, - unsigned long addr, - size_t size, int access, - int dmasync, u8 *peer_exists) + unsigned long addr, + size_t size, int access, + int dmasync, u8 *peer_exists) { +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *peer_mem_client; + struct ib_umem_ex *umem_ex; + struct ib_umem *umem; + + /* + * If the combination of the addr and size requested for this memory + * region causes an integer overflow, return error. + */ + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) + return ERR_PTR(-EINVAL); + + if (!can_do_mlock()) + return ERR_PTR(-EPERM); + + umem_ex = kzalloc(sizeof(*umem_ex), GFP_KERNEL); + if (!umem_ex) + return ERR_PTR(-ENOMEM); + umem = &umem_ex->umem; + + umem->context = context; + umem->length = size; + umem->address = addr; + umem->writable = ib_access_writable(access); + get_mm(umem) = current->mm; + +#if defined(IB_CORE_UMEM_EX_V1) + umem->odp_data = NULL; +#endif + + mmgrab(get_mm(umem)); + + peer_mem_client = ib_get_peer_client(context, addr, size, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP, + &umem_ex->peer_mem_client_context); + if (peer_mem_client) { + *peer_exists = 1; + umem->hugetlb = 0; + return peer_umem_get(peer_mem_client, umem_ex, addr, dmasync, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP); + } + + return ERR_PTR(-ENOMEM); +#else return NULL; +#endif } void ib_umem_ex_release(struct ib_umem_ex *umem_ex) { struct ib_umem *umem = (struct ib_umem *)umem_ex; +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *ib_peer_mem = umem_ex->ib_peer_mem; + const struct peer_memory_client *peer_mem; + struct invalidation_ctx *invalidation_ctx; + if (ib_peer_mem) { + peer_mem = ib_peer_mem->peer_mem; + invalidation_ctx = umem_ex->invalidation_ctx; + + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); + + peer_mem->dma_unmap(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device); + peer_mem->put_pages(&umem->sg_head, + umem_ex->peer_mem_client_context); + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_dereg_pages); + atomic64_add(umem->nmap * BIT(umem->page_shift), + &ib_peer_mem->stats.num_dereg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_dealloc_mrs); + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + kfree(umem_ex); + } else { + // kernel ib umem release + ib_umem_release(umem); + } +#else ib_umem_release(umem); +#endif } int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, - umem_invalidate_func_t func, - void *cookie) + umem_invalidate_func_t func, + void *cookie) { +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct invalidation_ctx *invalidation_ctx = umem_ex->invalidation_ctx; + int ret = 0; + + mutex_lock(&umem_ex->ib_peer_mem->lock); + if (invalidation_ctx->peer_invalidated) { + pr_err("ib_umem_activate_invalidation_notifier: pages were invalidated by peer\n"); + ret = -EINVAL; + goto end; + } + invalidation_ctx->func = func; + invalidation_ctx->cookie = cookie; + /* from that point any pending invalidations can be called */ +end: + mutex_unlock(&umem_ex->ib_peer_mem->lock); + return ret; +#else return 0; +#endif } diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.h b/drivers/infiniband/hw/xsc/ib_umem_ex.h index 62f8673247dcaadac224fbc3d698307e59d79318..b73eecf3cd90280c120767a253299e0eb803a89b 100644 --- a/drivers/infiniband/hw/xsc/ib_umem_ex.h +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.h @@ -38,13 +38,12 @@ struct invalidation_ctx { }; struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, - unsigned long addr, - size_t size, int access, - int dmasync, u8 *peer_exists); + unsigned long addr, size_t size, int access, + int dmasync, u8 *peer_exists); void ib_umem_ex_release(struct ib_umem_ex *umem_ex); int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, - umem_invalidate_func_t func, - void *cookie); + umem_invalidate_func_t func, + void *cookie); #endif diff --git a/drivers/infiniband/hw/xsc/mad.c b/drivers/infiniband/hw/xsc/mad.c deleted file mode 100644 index 8d93cb820c37dc5c59474382fc7df500a215751b..0000000000000000000000000000000000000000 --- a/drivers/infiniband/hw/xsc/mad.c +++ /dev/null @@ -1,84 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#include -#include -#include "xsc_ib.h" - -enum { - XSC_IB_VENDOR_CLASS1 = 0x9, - XSC_IB_VENDOR_CLASS2 = 0xa -}; - -int xsc_MAD_IFC(struct xsc_ib_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad) -{ - u8 op_modifier = 0; - - /* Key check traps can't be generated unless we have in_wc to - * tell us where to send the trap. - */ - if (ignore_mkey || !in_wc) - op_modifier |= 0x1; - if (ignore_bkey || !in_wc) - op_modifier |= 0x2; - - return xsc_core_mad_ifc(dev->xdev, in_mad, response_mad, op_modifier, port); -} - -int xsc_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) -{ - u16 slid; - int err; - - slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); - - if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; - - if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || - in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { - if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && - in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && - in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) - return IB_MAD_RESULT_SUCCESS; - - /* Don't process SMInfo queries -- the SMA can't handle them. - */ - if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) - return IB_MAD_RESULT_SUCCESS; - } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || - in_mad->mad_hdr.mgmt_class == XSC_IB_VENDOR_CLASS1 || - in_mad->mad_hdr.mgmt_class == XSC_IB_VENDOR_CLASS2 || - in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { - if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && - in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) - return IB_MAD_RESULT_SUCCESS; - } else { - return IB_MAD_RESULT_SUCCESS; - } - - err = xsc_MAD_IFC(to_mdev(ibdev), - mad_flags & IB_MAD_IGNORE_MKEY, - mad_flags & IB_MAD_IGNORE_BKEY, - port_num, in_wc, in_grh, in_mad, out_mad); - if (err) - return IB_MAD_RESULT_FAILURE; - - /* set return bit in status of directed route responses */ - if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) - out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); - - if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) - /* no response for trap repress */ - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; - - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; -} - diff --git a/drivers/infiniband/hw/xsc/main.c b/drivers/infiniband/hw/xsc/main.c index e7ac738af739b111c1f2d0cab2b65a6a30d40ab0..ad3e70690cde1c34fdb03333ca197da1bb64ff3b 100644 --- a/drivers/infiniband/hw/xsc/main.c +++ b/drivers/infiniband/hw/xsc/main.c @@ -12,10 +12,10 @@ #include #include #include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" +#include "common/driver.h" #include #include @@ -26,100 +26,17 @@ #include "xsc_rdma_ctrl.h" #define DRIVER_NAME "xsc_ib" -#define DRIVER_VERSION "1.0.0" +#define DRIVER_VERSION "1.0" #define DRIVER_RELDATE "Jan 2022" -MODULE_DESCRIPTION("Yunsilicon XSC RDMA driver"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Yunsilicon Amber HCA IB driver"); +MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION); -static int prof_sel = 2; -module_param_named(prof_sel, prof_sel, int, 0444); -MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); - static char xsc_version[] = - DRIVER_NAME ": Yunsilicon Amber Infiniband driver" + DRIVER_NAME ": Yunsilicon Infiniband driver" DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; -struct xsc_profile profile[] = { - [0] = { - .mask = 0, - }, - [1] = { - .mask = XSC_PROF_MASK_QP_SIZE, - .log_max_qp = 12, - }, - [2] = { - .mask = XSC_PROF_MASK_QP_SIZE | - XSC_PROF_MASK_MR_CACHE, - .log_max_qp = 17, - .mr_cache[0] = { - .size = 500, - .limit = 250 - }, - .mr_cache[1] = { - .size = 500, - .limit = 250 - }, - .mr_cache[2] = { - .size = 500, - .limit = 250 - }, - .mr_cache[3] = { - .size = 500, - .limit = 250 - }, - .mr_cache[4] = { - .size = 500, - .limit = 250 - }, - .mr_cache[5] = { - .size = 500, - .limit = 250 - }, - .mr_cache[6] = { - .size = 500, - .limit = 250 - }, - .mr_cache[7] = { - .size = 500, - .limit = 250 - }, - .mr_cache[8] = { - .size = 500, - .limit = 250 - }, - .mr_cache[9] = { - .size = 500, - .limit = 250 - }, - .mr_cache[10] = { - .size = 500, - .limit = 250 - }, - .mr_cache[11] = { - .size = 500, - .limit = 250 - }, - .mr_cache[12] = { - .size = 64, - .limit = 32 - }, - .mr_cache[13] = { - .size = 32, - .limit = 16 - }, - .mr_cache[14] = { - .size = 16, - .limit = 8 - }, - .mr_cache[15] = { - .size = 8, - .limit = 4 - }, - }, -}; - __be64 sys_image_guid; void xsc_get_sys_image_guid(u8 *dev_addr, u8 *guid) { @@ -143,8 +60,8 @@ void xsc_get_sys_image_guid(u8 *dev_addr, u8 *guid) } static int xsc_ib_query_device(struct ib_device *ibdev, - struct ib_device_attr *props, - struct ib_udata *udata) + struct ib_device_attr *props, + struct ib_udata *udata) { struct xsc_ib_dev *dev = to_mdev(ibdev); int max_rq_sg; @@ -194,16 +111,17 @@ static int xsc_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->page_size_cap = dev->xdev->caps.min_page_sz; - props->max_mr_size = (1 << dev->xdev->caps.log_max_mtt)*PAGE_SIZE; + props->max_mr_size = (1 << dev->xdev->caps.log_max_mtt) * PAGE_SIZE; props->max_qp = 1 << dev->xdev->caps.log_max_qp; props->max_qp_wr = dev->xdev->caps.max_wqes; max_rq_sg = dev->xdev->caps.max_rq_desc_sz / sizeof(struct xsc_wqe_data_seg); max_sq_sg = (dev->xdev->caps.max_sq_desc_sz - sizeof(struct xsc_wqe_ctrl_seg_2)) / sizeof(struct xsc_wqe_data_seg_2); - props->max_send_sge = dev->xdev->caps.send_ds_num; + props->max_send_sge = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - + XSC_RADDR_SEG_NUM; props->max_recv_sge = dev->xdev->caps.recv_ds_num; - props->max_sge_rd = 0;/*xsc unsupported RD server type*/ + props->max_sge_rd = 1;/*max sge per read wqe*/ props->max_cq = 1 << dev->xdev->caps.log_max_cq; props->max_cqe = dev->xdev->caps.max_cqes - 1; props->max_mr = 1 << dev->xdev->caps.log_max_mkey; @@ -212,16 +130,16 @@ static int xsc_ib_query_device(struct ib_device *ibdev, props->max_qp_init_rd_atom = dev->xdev->caps.max_ra_res_qp; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = - dev->xdev->caps.log_max_srq?(1 << dev->xdev->caps.log_max_srq):0; + dev->xdev->caps.log_max_srq ? (1 << dev->xdev->caps.log_max_srq) : 0; props->max_srq_wr = dev->xdev->caps.max_srq_wqes - 1; - props->max_srq_sge = dev->xdev->caps.log_max_srq?(max_rq_sg - 1):0; + props->max_srq_sge = dev->xdev->caps.log_max_srq ? (max_rq_sg - 1) : 0; props->max_fast_reg_page_list_len = (unsigned int)-1; props->local_ca_ack_delay = dev->xdev->caps.local_ca_ack_delay; props->atomic_cap = dev->xdev->caps.flags & XSC_DEV_CAP_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_HCA; props->max_mcast_grp = - dev->xdev->caps.log_max_mcg?(1 << dev->xdev->caps.log_max_mcg):0; + dev->xdev->caps.log_max_mcg ? (1 << dev->xdev->caps.log_max_mcg) : 0; props->max_mcast_qp_attach = dev->xdev->caps.max_qp_mcg; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; @@ -242,7 +160,7 @@ static int xsc_ib_query_device(struct ib_device *ibdev, /*response tso_caps extend param*/ if (field_avail(typeof(resp), tso_caps, udata->outlen)) { - max_tso = dev->xdev->caps.log_max_tso?(1 << dev->xdev->caps.log_max_tso):0; + max_tso = dev->xdev->caps.log_max_tso ? (1 << dev->xdev->caps.log_max_tso) : 0; if (max_tso) { resp.tso_caps.max_tso = max_tso; resp.tso_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; @@ -295,12 +213,13 @@ static enum rdma_link_layer xsc_ib_port_link_layer(struct ib_device *ibdev, u8 p } int xsc_ib_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props) + struct ib_port_attr *props) { struct xsc_ib_dev *dev = to_mdev(ibdev); struct net_device *ndev = dev->netdev; + struct xsc_core_device *xdev = dev->xdev; - if (port < 1 || port > dev->xdev->caps.num_ports) { + if (port < 1 || port > xdev->caps.num_ports) { xsc_ib_warn(dev, "invalid port number %d\n", port); return -EINVAL; } @@ -323,16 +242,26 @@ int xsc_ib_query_port(struct ib_device *ibdev, u8 port, props->sm_sl = 0; props->subnet_timeout = 0; props->init_type_reply = 0; - props->active_width = 1; - props->active_speed = 32; - props->phys_state = 5; + if (!is_support_rdma(xdev)) { + props->active_width = 1; + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + } else { + if (xsc_get_link_speed(xdev) == XSC_CMD_RESP_LINKSPEED_MODE_100G) + props->active_width = 2; + else + props->active_width = 1; + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + } + props->phys_state = netif_carrier_ok(ndev) ? XSC_RDMA_PHY_STATE_LINK_UP : + XSC_RDMA_PHY_STATE_DISABLED; return 0; } + const struct xsc_gid xsc_gid_zero; static int xsc_ib_query_gid(struct ib_device *ibdev, u8 port_num, - int index, union ib_gid *gid) + int index, union ib_gid *gid) { struct xsc_ib_dev *dev = to_mdev(ibdev); struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; @@ -351,8 +280,8 @@ static int xsc_ib_del_gid(const struct ib_gid_attr *attr, void **context) { int index = 0; struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; - struct xsc_gid *gid = (struct xsc_gid *)&attr->gid; if (!sgid_tbl) return -EINVAL; @@ -361,7 +290,7 @@ static int xsc_ib_del_gid(const struct ib_gid_attr *attr, void **context) return -ENOMEM; for (index = 0; index < sgid_tbl->max; index++) { - if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid))) + if (!memcmp(&sgid_tbl->tbl[index], gid_raw, sizeof(*gid_raw))) break; } @@ -380,8 +309,8 @@ int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) int i = 0; u32 free_idx = 0; struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; - struct xsc_gid *gid = (struct xsc_gid *)&attr->gid; if (!sgid_tbl) return -EINVAL; @@ -391,7 +320,7 @@ int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) free_idx = sgid_tbl->max; for (i = 0; i < sgid_tbl->max; i++) { - if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) { + if (!memcmp(&sgid_tbl->tbl[i], gid_raw, sizeof(*gid_raw))) { return 0; } else if (!memcmp(&sgid_tbl->tbl[i], &xsc_gid_zero, sizeof(xsc_gid_zero)) && free_idx == sgid_tbl->max) { @@ -402,15 +331,16 @@ int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) if (free_idx == sgid_tbl->max) return -ENOMEM; - memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); + memcpy(&sgid_tbl->tbl[free_idx], gid_raw, sizeof(*gid_raw)); sgid_tbl->count++; xsc_ib_dbg(dev, "Add gid to index:%u, count:%u, max:%u\n", free_idx, sgid_tbl->count, - sgid_tbl->max); + sgid_tbl->max); return 0; } + static int xsc_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, - u16 *pkey) + u16 *pkey) { *pkey = 0xffff; return 0; @@ -421,7 +351,7 @@ struct xsc_reg_node_desc { }; static int xsc_ib_modify_device(struct ib_device *ibdev, int mask, - struct ib_device_modify *props) + struct ib_device_modify *props) { struct xsc_ib_dev *dev = to_mdev(ibdev); struct xsc_reg_node_desc in; @@ -436,12 +366,13 @@ static int xsc_ib_modify_device(struct ib_device *ibdev, int mask, if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) return 0; - /* If possible, pass node desc to FW, so it can generate + /* + * If possible, pass node desc to FW, so it can generate * a 144 trap. If cmd fails, just ignore. */ memcpy(&in, props->node_desc, 64); err = xsc_core_access_reg(dev->xdev, &in, sizeof(in), &out, - sizeof(out), XSC_REG_NODE_DESC, 0, 1); + sizeof(out), XSC_REG_NODE_DESC, 0, 1); if (err) return err; @@ -449,8 +380,9 @@ static int xsc_ib_modify_device(struct ib_device *ibdev, int mask, return err; } + static int xsc_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, - struct ib_port_modify *props) + struct ib_port_modify *props) { struct xsc_ib_dev *dev = to_mdev(ibdev); struct ib_port_attr attr; @@ -484,8 +416,6 @@ xsc_ib_alloc_ucontext_def() struct xsc_ib_ucontext *context; int err; - pr_err("[%s:%d]", __func__, __LINE__); - if (!dev->ib_active) return RET_VALUE(-EAGAIN); @@ -520,11 +450,12 @@ xsc_ib_alloc_ucontext_def() return 0; out_ctx: - return err; + return RET_VALUE(err); } xsc_ib_dealloc_ucontext_def() { + return; } static int xsc_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) @@ -582,7 +513,7 @@ xsc_ib_alloc_pd_def() resp.pdn = pd->pdn; if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { xsc_core_dealloc_pd(to_mdev(ibdev)->xdev, pd->pdn); - return -EFAULT; + return RET_VALUE(-EFAULT); } } else { pd->pa_lkey = 0; @@ -602,7 +533,7 @@ xsc_ib_dealloc_pd_def() } static int xsc_port_immutable(struct ib_device *ibdev, u8 port_num, - struct ib_port_immutable *immutable) + struct ib_port_immutable *immutable) { struct ib_port_attr attr; int err; @@ -669,7 +600,7 @@ static int init_node_data(struct xsc_ib_dev *dev) } void xsc_core_event(struct xsc_core_device *xdev, enum xsc_dev_event event, - unsigned long param) + unsigned long param) { struct xsc_priv *priv = &xdev->priv; struct xsc_device_context *dev_ctx; @@ -689,7 +620,7 @@ void xsc_core_event(struct xsc_core_device *xdev, enum xsc_dev_event event, } static void xsc_ib_event(struct xsc_core_device *dev, void *context, - enum xsc_dev_event event, unsigned long data) + enum xsc_dev_event event, unsigned long data) { struct xsc_ib_dev *ibdev = (struct xsc_ib_dev *)context; struct ib_event ibev; @@ -775,7 +706,7 @@ static int get_port_caps(struct xsc_ib_dev *dev) dev->xdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; dev->xdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; xsc_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", - dprops->max_pkeys, pprops->gid_tbl_len); + dprops->max_pkeys, pprops->gid_tbl_len); } out: @@ -793,7 +724,7 @@ static int xsc_create_dev_res(struct xsc_ib_res *ib_res) ib_res->sgid_tbl.max = dev->xdev->caps.port[0].gid_table_len; ib_res->sgid_tbl.tbl = kcalloc(ib_res->sgid_tbl.max, sizeof(struct xsc_gid), - GFP_KERNEL); + GFP_KERNEL); if (!ib_res->sgid_tbl.tbl) return -ENOMEM; @@ -811,7 +742,6 @@ static int populate_specs_root(struct xsc_ib_dev *dev) const struct uverbs_object_tree_def **trees = (const struct uverbs_object_tree_def **)dev->driver_trees; size_t num_trees = 0; - trees[num_trees++] = xsc_ib_get_devx_tree(); WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); @@ -842,7 +772,6 @@ static void xsc_ib_dev_setting(struct xsc_ib_dev *dev) dev->ib_dev.ops.uverbs_abi_ver = XSC_IB_UVERBS_ABI_VERSION; dev->ib_dev.ops.driver_id = RDMA_DRIVER_XSC5; dev->ib_dev.ops.uverbs_no_driver_id_binding = 1; - dev->ib_dev.ops.query_device = xsc_ib_query_device; dev->ib_dev.ops.query_port = xsc_ib_query_port; dev->ib_dev.ops.query_gid = xsc_ib_query_gid; @@ -897,24 +826,13 @@ static int init_one(struct xsc_core_device *xdev, printk_once(KERN_INFO "%s", xsc_version); - pr_err("[%s:%d]", __func__, __LINE__); - dev = (struct xsc_ib_dev *)ib_alloc_device(xsc_ib_dev, ib_dev); if (!dev) return -ENOMEM; - pr_err("[%s:%d]", __func__, __LINE__); - dev->xdev = xdev; xdev->event = xsc_core_event; _xsc_get_netdev(dev); -// if (prof_sel >= ARRAY_SIZE(profile)) { -// pr_warn("selected pofile out of range, selceting default\n"); -// prof_sel = 0; -// } -// mdev->profile = &profile[prof_sel]; - - _xsc_get_netdev(dev); xsc_get_sys_image_guid(dev->netdev->dev_addr, (u8 *)&sys_image_guid); err = get_port_caps(dev); if (err) @@ -932,7 +850,9 @@ static int init_one(struct xsc_core_device *xdev, else dev->num_comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; - pr_err("[%s:%d]", __func__, __LINE__); +// dev->ib_dev.dev.dma_ops = &dma_virt_ops; +// dma_coerce_mask_and_coherent(&dev->ib_dev.dev, +// dma_get_required_mask(&dev->ib_dev.dev)); strlcpy(dev->ib_dev.name, "xscale_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.node_type = RDMA_NODE_IB_CA; @@ -988,7 +908,6 @@ static int init_one(struct xsc_core_device *xdev, if (err) goto err_free; - pr_err("[%s:%d]", __func__, __LINE__); crc_table_init(dev); populate_specs_root(dev); @@ -997,7 +916,6 @@ static int init_one(struct xsc_core_device *xdev, if (ib_register_device(&dev->ib_dev, dev->ib_dev.name, dev->xdev->device)) goto err_rsrc; - pr_err("[%s:%d]", __func__, __LINE__); dev->ib_active = true; *m_ibdev = dev; @@ -1008,6 +926,10 @@ static int init_one(struct xsc_core_device *xdev, xsc_rtt_sysfs_init(&dev->ib_dev, xdev); + err = xsc_ib_sysfs_init(&dev->ib_dev, xdev); + if (err) + pr_err("fail to init ib sysfs\n"); + return 0; err_rsrc: @@ -1024,6 +946,7 @@ static void remove_one(struct xsc_core_device *xdev, void *intf_ctx) struct xsc_ib_dev *dev = (struct xsc_ib_dev *)intf_ctx; xsc_rtt_sysfs_fini(xdev); + xsc_ib_sysfs_fini(&dev->ib_dev, xdev); xsc_priv_dev_fini(&dev->ib_dev, xdev); xsc_counters_fini(&dev->ib_dev, xdev); ib_unregister_device(&dev->ib_dev); @@ -1035,27 +958,23 @@ static void *xsc_add(struct xsc_core_device *xpdev) struct xsc_ib_dev *m_ibdev = NULL; int ret = -1; - pr_err("===> %s: enter\n", __func__); -#ifdef USE_VIRTIO - pr_err("pcidev:%p bar0:%x\n", xpdev, xpdev->bar0); -#endif /* USE_VIRTIO */ - pr_err("enter virt_rdma_probe\n"); + pr_err("add rdma driver\n"); ret = init_one(xpdev, &m_ibdev); if (ret) { - pr_err("%s fail, ret = %d\n", __func__, ret); + pr_err("xsc ib dev add fail, ret = %d\n", ret); return NULL; } - xpdev->rdma_ready = true; + xpdev->rdma_ready = 1; return m_ibdev; } static void xsc_remove(struct xsc_core_device *xpdev, void *context) { - pr_err("<=== %s: enter\n", __func__); + pr_err("remove rdma driver\n"); remove_one(xpdev, context); - xpdev->rdma_ready = false; + xpdev->rdma_ready = 0; } static struct xsc_interface xsc_interface = { diff --git a/drivers/infiniband/hw/xsc/mem.c b/drivers/infiniband/hw/xsc/mem.c index f78c715d4ceff8ca0e17068a260ebf32652f51b5..28d3795f035c3546ed90a5e273f7ecb34f13203d 100644 --- a/drivers/infiniband/hw/xsc/mem.c +++ b/drivers/infiniband/hw/xsc/mem.c @@ -8,6 +8,181 @@ #include #include "xsc_ib.h" +static inline int xsc_count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +int xsc_find_chunk_cont_0(struct xsc_pa_chunk *chunk, + int is_first, + int is_last) +{ + const static int max_count = sizeof(int) << 3; + dma_addr_t pa, end_pa; + u64 va, end_va; + size_t length; + int start_count, end_count; + int va_start_count, va_end_count; + + pa = chunk->pa; + va = chunk->va; + length = chunk->length; + end_pa = pa + length; + end_va = va + length; + start_count = max_count; + end_count = max_count; + + if (!is_first) { + start_count = xsc_count_trailing_zeros((unsigned long)pa); + va_start_count = xsc_count_trailing_zeros(va); + start_count = min_t(int, start_count, va_start_count); + } + + if (!is_last) { + end_count = xsc_count_trailing_zeros((unsigned long)end_pa); + va_end_count = xsc_count_trailing_zeros(end_va); + end_count = min_t(int, end_count, va_end_count); + } + + return start_count > end_count ? end_count : start_count; +} + +int xsc_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt, + int *npages, + int *shift, + u64 **pas) +{ + struct scatterlist *sg; + unsigned long va; + dma_addr_t pa; + struct xsc_pa_chunk *chunk, *tmp; + struct list_head chunk_list; + int i; + int chunk_cnt; + int min_count_0 = sizeof(int) << 3; + int count_0; + int is_first = 0, is_end = 0; + size_t pgsz; + u64 mask; + int err = 0; + int pa_index; + u64 chunk_pa; + int chunk_npages; + unsigned long page_shift = PAGE_SHIFT; + + pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, 0); + + va = (virt >> page_shift) << page_shift; + + INIT_LIST_HEAD(&chunk_list); + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + list_add_tail(&chunk->list, &chunk_list); + + chunk_cnt = 1; + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { + pa = sg_dma_address(sg); + if (i == 0) { + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + continue; + } + + if (pa == chunk->pa + chunk->length) { + chunk->length += sg_dma_len(sg); + va += chunk->length; + } else { + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + list_add_tail(&chunk->list, &chunk_list); + chunk_cnt++; + } + } + + i = 0; + list_for_each_entry(chunk, &chunk_list, list) { + is_first = (i == 0 ? 1 : 0); + is_end = (i == chunk_cnt - 1 ? 1 : 0); + count_0 = xsc_find_chunk_cont_0(chunk, is_first, is_end); + if (count_0 < min_count_0) + min_count_0 = count_0; + i++; + } + + pgsz_bitmap &= GENMASK(min_count_0, 0); + pgsz = rounddown_pow_of_two(pgsz_bitmap); + *shift = ilog2(pgsz); + *npages = 0; + + if (chunk_cnt == 1) { + list_for_each_entry(chunk, &chunk_list, list) { + mask = GENMASK(*shift - 1, min_t(int, page_shift, *shift - 1)); + *npages += DIV_ROUND_UP(chunk->length + (virt & mask), pgsz); + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + chunk_pa = chunk->pa - (virt & mask); + for (i = 0; i < *npages; i++) + (*pas)[i] = chunk_pa + i * pgsz; + } + } else { + list_for_each_entry(chunk, &chunk_list, list) { + *npages += DIV_ROUND_UP(chunk->length, pgsz); + } + + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + pa_index = 0; + list_for_each_entry(chunk, &chunk_list, list) { + chunk_npages = DIV_ROUND_UP(chunk->length, pgsz); + chunk_pa = chunk->pa; + for (i = 0; i < chunk_npages; i++) { + if (pa_index == 0) { + mask = GENMASK(*shift - 1, + min_t(int, page_shift, *shift - 1)); + chunk_pa -= (virt & mask); + } + (*pas)[pa_index] = chunk_pa + i * pgsz; + + pa_index++; + } + } + } + +err_alloc: + list_for_each_entry_safe(chunk, tmp, &chunk_list, list) { + list_del(&chunk->list); + kfree(chunk); + } + return err; +} + /* @umem: umem object to scan * @addr: ib virtual address requested by the user * @count: number of PAGE_SIZE pages covered by umem @@ -16,9 +191,9 @@ * @order: log2 of the number of compound pages */ void __xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, - unsigned long max_page_shift, - int *count, int *shift, - int *ncont, int *order) + unsigned long max_page_shift, + int *count, int *shift, + int *ncont, int *order) { unsigned long tmp; unsigned long m; @@ -74,27 +249,27 @@ void __xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, } void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, - int *count, int *shift, - int *ncont, int *order) + int *count, int *shift, + int *ncont, int *order) { // no limit for page_shift __xsc_ib_cont_pages(umem, addr, 0, count, shift, ncont, order); } void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, - int page_shift, size_t offset, size_t num_pages, - __be64 *pas, int access_flags, bool need_to_devide) + int page_shift, size_t offset, size_t num_pages, + __be64 *pas, int access_flags, bool need_to_devide) { unsigned long umem_page_shift = PAGE_SHIFT; int shift = page_shift - umem_page_shift; int mask = (1 << shift) - 1; + int i = 0; int k, idx; u64 cur = 0; u64 base; int len; struct scatterlist *sg; int entry; - int i = 0; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { len = sg_dma_len(sg) >> umem_page_shift; @@ -129,7 +304,7 @@ void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, pas[idx] = cpu_to_be64(cur); xsc_ib_dbg(dev, "pas[%d] 0x%llx\n", - i >> shift, be64_to_cpu(pas[idx])); + i >> shift, be64_to_cpu(pas[idx])); } i++; @@ -141,10 +316,10 @@ void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, } void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, - int page_shift, __be64 *pas, int npages, bool need_to_devide) + int page_shift, __be64 *pas, int npages, bool need_to_devide) { return __xsc_ib_populate_pas(dev, umem, page_shift, 0, - npages, pas, 0, need_to_devide); + npages, pas, 0, need_to_devide); } int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) diff --git a/drivers/infiniband/hw/xsc/mr.c b/drivers/infiniband/hw/xsc/mr.c index e3804cc6fe3f7291e7a79b3681c837f376aa4f0a..4376478d5060fd76d533de33826c712b2b42c4f6 100644 --- a/drivers/infiniband/hw/xsc/mr.c +++ b/drivers/infiniband/hw/xsc/mr.c @@ -9,402 +9,19 @@ #include #include #include -#include +#include "common/xsc_cmd.h" #include #include "ib_umem_ex.h" #include "xsc_ib.h" static void xsc_invalidate_umem(void *invalidation_cookie, - struct ib_umem_ex *umem, unsigned long addr, size_t size); + struct ib_umem_ex *umem, + unsigned long addr, size_t size); enum { DEF_CACHE_SIZE = 10, }; -static __be64 *mr_align(__be64 *ptr, int align) -{ - unsigned long mask = align - 1; - - return (__be64 *)(((unsigned long)ptr + mask) & ~mask); -} - -static int order2idx(struct xsc_ib_dev *dev, int order) -{ - struct xsc_mr_cache *cache = &dev->cache; - - if (order < cache->ent[0].order) - return 0; - else - return order - cache->ent[0].order; -} - -static int add_keys(struct xsc_ib_dev *dev, int c, int num) -{ - struct device *ddev = dev->ib_dev.dma_device; - struct xsc_mr_cache *cache = &dev->cache; - struct xsc_cache_ent *ent = &cache->ent[c]; - struct xsc_register_mr_mbox_in *in; - struct xsc_ib_mr *mr; - int npages = 1 << ent->order; - int size = sizeof(u64) * npages; - int err = 0; - int i; - - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - for (i = 0; i < num; i++) { - mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - goto out; - } - mr->order = ent->order; - mr->pas = kmalloc(size + 0x3f, GFP_KERNEL); - if (!mr->pas) { - kfree(mr); - err = -ENOMEM; - goto out; - } - mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size, - DMA_TO_DEVICE); - if (dma_mapping_error(ddev, mr->dma)) { - kfree(mr->pas); - kfree(mr); - err = -ENOMEM; - goto out; - } - - in->req.acc = XSC_ACCESS_MODE_MTT; - in->req.page_mode = 0; - - err = xsc_core_create_mkey(dev->xdev, &mr->mmr); - if (err) { - xsc_ib_warn(dev, "create mkey failed %d\n", err); - dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); - kfree(mr->pas); - kfree(mr); - goto out; - } - in->req.mkey = cpu_to_be32(mr->mmr.key); - err = xsc_core_register_mr(dev->xdev, &mr->mmr, in, - sizeof(*in)); - if (err) { - xsc_ib_warn(dev, "register mr failed %d\n", err); - xsc_core_destroy_mkey(dev->xdev, &mr->mmr); - dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); - kfree(mr->pas); - kfree(mr); - goto out; - } - cache->last_add = jiffies; - - spin_lock(&ent->lock); - list_add_tail(&mr->list, &ent->head); - ent->cur++; - ent->size++; - spin_unlock(&ent->lock); - } - -out: - kfree(in); - return err; -} - -static void remove_keys(struct xsc_ib_dev *dev, int c, int num) -{ - struct device *ddev = dev->ib_dev.dma_device; - struct xsc_mr_cache *cache = &dev->cache; - struct xsc_cache_ent *ent = &cache->ent[c]; - struct xsc_ib_mr *mr; - int size; - int err; - int i; - - for (i = 0; i < num; i++) { - spin_lock(&ent->lock); - if (list_empty(&ent->head)) { - spin_unlock(&ent->lock); - return; - } - mr = list_first_entry(&ent->head, struct xsc_ib_mr, list); - list_del(&mr->list); - ent->cur--; - ent->size--; - spin_unlock(&ent->lock); - err = xsc_core_destroy_mkey(dev->xdev, &mr->mmr); - if (err) { - xsc_ib_warn(dev, "failed destroy mkey\n"); - } else { - size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); - dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); - kfree(mr->pas); - kfree(mr); - } - } -} - -static ssize_t size_write(struct file *filp, const char __user *buf, - size_t count, loff_t *pos) -{ - struct xsc_cache_ent *ent = filp->private_data; - struct xsc_ib_dev *dev = ent->dev; - char lbuf[20]; - u32 var; - int err; - int c; - - if (copy_from_user(lbuf, buf, sizeof(lbuf))) - return -EPERM; - - c = order2idx(dev, ent->order); - lbuf[sizeof(lbuf) - 1] = 0; - - if (kstrtou32(lbuf, 10, &var) != 1) - return -EINVAL; - - if (var < ent->limit) - return -EINVAL; - - if (var > ent->size) { - err = add_keys(dev, c, var - ent->size); - if (err) - return err; - } else if (var < ent->size) { - remove_keys(dev, c, ent->size - var); - } - - return count; -} - -static ssize_t size_read(struct file *filp, char __user *buf, size_t count, - loff_t *pos) -{ - struct xsc_cache_ent *ent = filp->private_data; - char lbuf[20]; - int err; - - if (*pos) - return 0; - - err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); - if (err < 0) - return err; - - if (copy_to_user(buf, lbuf, err)) - return -EPERM; - - *pos += err; - - return err; -} - -static const struct file_operations size_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = size_write, - .read = size_read, -}; - -static ssize_t limit_write(struct file *filp, const char __user *buf, - size_t count, loff_t *pos) -{ - struct xsc_cache_ent *ent = filp->private_data; - struct xsc_ib_dev *dev = ent->dev; - char lbuf[20]; - u32 var; - int err; - int c; - - if (copy_from_user(lbuf, buf, sizeof(lbuf))) - return -EPERM; - - c = order2idx(dev, ent->order); - lbuf[sizeof(lbuf) - 1] = 0; - - if (kstrtou32(lbuf, 10, &var) != 1) - return -EINVAL; - - if (var > ent->size) - return -EINVAL; - - ent->limit = var; - - if (ent->cur < ent->limit) { - err = add_keys(dev, c, 2 * ent->limit - ent->cur); - if (err) - return err; - } - - return count; -} - -static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, - loff_t *pos) -{ - struct xsc_cache_ent *ent = filp->private_data; - char lbuf[20]; - int err; - - if (*pos) - return 0; - - err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); - if (err < 0) - return err; - - if (copy_to_user(buf, lbuf, err)) - return -EPERM; - - *pos += err; - - return err; -} - -static const struct file_operations limit_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = limit_write, - .read = limit_read, -}; - -static int someone_adding(struct xsc_mr_cache *cache) -{ - int i; - - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - if (cache->ent[i].cur < cache->ent[i].limit) - return 1; - } - - return 0; -} - -static void __cache_work_func(struct xsc_cache_ent *ent) -{ - struct xsc_ib_dev *dev = ent->dev; - struct xsc_mr_cache *cache = &dev->cache; - int i = order2idx(dev, ent->order); - - if (cache->stopped) - return; - - ent = &dev->cache.ent[i]; - if (ent->cur < 2 * ent->limit) { - add_keys(dev, i, 1); - if (ent->cur < 2 * ent->limit) - queue_work(cache->wq, &ent->work); - } else if (ent->cur > 2 * ent->limit) { - if (!someone_adding(cache) && - time_after(jiffies, cache->last_add + 60 * HZ)) { - remove_keys(dev, i, 1); - if (ent->cur > ent->limit) - queue_work(cache->wq, &ent->work); - } else { - queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ); - } - } -} - -static void delayed_cache_work_func(struct work_struct *work) -{ - struct xsc_cache_ent *ent; - - ent = container_of(work, struct xsc_cache_ent, dwork.work); - __cache_work_func(ent); -} - -static void cache_work_func(struct work_struct *work) -{ - struct xsc_cache_ent *ent; - - ent = container_of(work, struct xsc_cache_ent, work); - __cache_work_func(ent); -} - -static int xsc_mr_cache_debugfs_init(struct xsc_ib_dev *dev) -{ - struct xsc_mr_cache *cache = &dev->cache; - struct xsc_cache_ent *ent; - int i; - - if (!xsc_debugfs_root) - return 0; - - cache->root = debugfs_create_dir("mr_cache", dev->xdev->dev_res->dbg_root); - if (!cache->root) - return -ENOMEM; - - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - ent = &cache->ent[i]; - sprintf(ent->name, "%d", ent->order); - ent->dir = debugfs_create_dir(ent->name, cache->root); - if (!ent->dir) - return -ENOMEM; - - ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, - &size_fops); - if (!ent->fsize) - return -ENOMEM; - - ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, - &limit_fops); - if (!ent->flimit) - return -ENOMEM; - - debugfs_create_u32("cur", 0400, ent->dir, &ent->cur); - debugfs_create_u32("miss", 0600, ent->dir, &ent->miss); - } - - return 0; -} - -int xsc_mr_cache_init(struct xsc_ib_dev *dev) -{ - struct xsc_mr_cache *cache = &dev->cache; - struct xsc_cache_ent *ent; - int limit; - int size; - int err; - int i; - - cache->wq = create_singlethread_workqueue("mkey_cache"); - if (!cache->wq) { - xsc_ib_warn(dev, "failed to create work queue\n"); - return -ENOMEM; - } - - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { - INIT_LIST_HEAD(&cache->ent[i].head); - spin_lock_init(&cache->ent[i].lock); - - ent = &cache->ent[i]; - INIT_LIST_HEAD(&ent->head); - spin_lock_init(&ent->lock); - ent->order = i + 2; - ent->dev = dev; - - if (dev->xdev->profile->mask & XSC_PROF_MASK_MR_CACHE) { - size = dev->xdev->profile->mr_cache[i].size; - limit = dev->xdev->profile->mr_cache[i].limit; - } else { - size = DEF_CACHE_SIZE; - limit = 0; - } - INIT_WORK(&ent->work, cache_work_func); - INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); - ent->limit = limit; - queue_work(cache->wq, &ent->work); - } - - err = xsc_mr_cache_debugfs_init(dev); - if (err) - xsc_ib_warn(dev, "cache debugfs failure\n"); - - return 0; -} - struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc) { struct xsc_ib_dev *dev = to_mdev(pd->device); @@ -418,7 +35,6 @@ struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc) if (!mr) return ERR_PTR(-ENOMEM); - pr_err("[%s:%d]", __func__, __LINE__); return &mr->ibmr; in = kzalloc(sizeof(*in), GFP_KERNEL); @@ -456,25 +72,18 @@ struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc) return ERR_PTR(err); } -void xsc_fill_pas(struct ib_umem *umem, int page_shift, __be64 *pas) +void xsc_fill_pas(int npages, u64 *pas, __be64 *req_pas) { - struct scatterlist *sg; - int entry; - u64 base; - - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { - base = sg_dma_address(sg); - break; - } + int i; - pas[0] = base & (~((1 << page_shift) - 1)); - pas[0] = cpu_to_be64(pas[0]); + for (i = 0; i < npages; i++) + req_pas[i] = cpu_to_be64(pas[i]); } static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, - u64 length, struct ib_umem *umem, - int npages, int page_shift, - int access_flags) + u64 length, struct ib_umem *umem, + int npages, u64 *pas, int page_shift, + int access_flags) { struct xsc_ib_dev *dev = to_mdev(pd->device); struct xsc_register_mr_mbox_in *in; @@ -483,8 +92,10 @@ static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, int err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) - return ERR_PTR(-ENOMEM); + if (!mr) { + err = -ENOMEM; + goto err_0; + } inlen = sizeof(*in) + sizeof(*in->req.pas) * npages; in = xsc_vzalloc(inlen); @@ -498,10 +109,7 @@ static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, goto err_2; } - if (npages != 1) - xsc_ib_populate_pas(dev, umem, page_shift, in->req.pas, npages, false); - else - xsc_fill_pas(umem, page_shift, in->req.pas); + xsc_fill_pas(npages, pas, in->req.pas); in->req.acc = convert_access(access_flags); in->req.pa_num = cpu_to_be32(npages); @@ -520,6 +128,7 @@ static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, } mr->umem = umem; xsc_vfree(in); + vfree(pas); xsc_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); @@ -528,33 +137,32 @@ static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, xsc_core_destroy_mkey(dev->xdev, &mr->mmr); err_2: xsc_vfree(in); - err_1: kfree(mr); +err_0: + vfree(pas); return ERR_PTR(err); } struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, - u64 virt_addr, int access_flags, - struct ib_udata *udata) + u64 virt_addr, int access_flags, + struct ib_udata *udata) { struct xsc_ib_dev *dev = to_mdev(pd->device); struct xsc_ib_mr *mr = NULL; struct ib_umem_ex *umem_ex; struct ib_umem *umem; int page_shift; - int page_shift_adjust; int npages; - int ncont; - int order; + u64 *pas; int err; int using_peer_mem = 0; struct ib_peer_memory_client *ib_peer_mem = NULL; struct xsc_ib_peer_id *xsc_ib_peer_id = NULL; xsc_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", - start, virt_addr, length); + start, virt_addr, length); umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); if (IS_ERR(umem)) { @@ -562,7 +170,7 @@ struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u8 peer_exists = 0; umem_ex = ib_client_umem_get(pd->uobject->context, - start, length, access_flags, 0, &peer_exists); + start, length, access_flags, 0, &peer_exists); if (!peer_exists) { xsc_ib_dbg(dev, "umem get failed\n"); return (void *)umem; @@ -574,12 +182,12 @@ struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto error; } init_completion(&xsc_ib_peer_id->comp); - err = ib_client_umem_activate_invalidation_notifier( - umem_ex, xsc_invalidate_umem, xsc_ib_peer_id); + err = ib_client_umem_activate_invalidation_notifier(umem_ex, + xsc_invalidate_umem, + xsc_ib_peer_id); if (err) goto error; using_peer_mem = 1; - } else { umem_ex = ib_umem_ex(umem); if (IS_ERR(umem_ex)) { @@ -589,33 +197,22 @@ struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, } umem = &umem_ex->umem; - xsc_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order); + err = xsc_find_best_pgsz(umem, 0x40211000, start, &npages, &page_shift, &pas); + if (err) { + vfree(pas); + pas = NULL; + xsc_ib_warn(dev, "find best page size failed\n"); + goto error; + } if (!npages) { xsc_ib_warn(dev, "avoid zero region\n"); err = -EINVAL; goto error; } - xsc_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", - npages, ncont, order, page_shift); + xsc_ib_dbg(dev, "npages %d, page_shift %d\n", npages, page_shift); - if (ncont == 1) { - page_shift_adjust = page_shift > XSC_PAGE_SHIFT_2M ? XSC_PAGE_SHIFT_1G : - page_shift > XSC_PAGE_SHIFT_64K ? XSC_PAGE_SHIFT_2M : - page_shift > XSC_PAGE_SHIFT_4K ? XSC_PAGE_SHIFT_64K : XSC_PAGE_SHIFT_4K; - } else { - page_shift_adjust = page_shift >= XSC_PAGE_SHIFT_1G ? XSC_PAGE_SHIFT_1G : - page_shift >= XSC_PAGE_SHIFT_2M ? XSC_PAGE_SHIFT_2M : - page_shift >= XSC_PAGE_SHIFT_64K ? XSC_PAGE_SHIFT_64K : XSC_PAGE_SHIFT_4K; - ncont = ncont << (page_shift - page_shift_adjust); - } - - if (using_peer_mem == 1) { - ncont = npages; - page_shift_adjust = PAGE_SHIFT; - } - xsc_ib_dbg(dev, "xsc pageshit=%d, npages=%d\n", page_shift_adjust, ncont); - mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift_adjust, access_flags); + mr = reg_create(pd, virt_addr, length, umem, npages, pas, page_shift, access_flags); if (IS_ERR(mr)) { err = PTR_ERR(mr); goto error; @@ -674,7 +271,7 @@ xsc_ib_dereg_mr_def() err = xsc_core_dereg_mr(dev->xdev, &mr->mmr); if (err) { xsc_ib_warn(dev, "failed to dereg mr 0x%x (%d)\n", - mr->mmr.key, err); + mr->mmr.key, err); return err; } } @@ -682,7 +279,7 @@ xsc_ib_dereg_mr_def() err = xsc_core_destroy_mkey(dev->xdev, &mr->mmr); if (err) { xsc_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", - mr->mmr.key, err); + mr->mmr.key, err); return err; } @@ -699,7 +296,9 @@ xsc_ib_dereg_mr_def() } static void xsc_invalidate_umem(void *invalidation_cookie, - struct ib_umem_ex *umem, unsigned long addr, size_t size) + struct ib_umem_ex *umem, + unsigned long addr, + size_t size) { struct xsc_ib_mr *mr; struct xsc_ib_dev *dev; @@ -762,7 +361,7 @@ static int xsc_set_page(struct ib_mr *ibmr, u64 pa) } int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, - int sg_nents, unsigned int *sg_offset) + int sg_nents, unsigned int *sg_offset) { struct xsc_ib_mr *mmr = to_mmr(ibmr); diff --git a/drivers/infiniband/hw/xsc/peer_mem.c b/drivers/infiniband/hw/xsc/peer_mem.c index 2389eb92df59dad5595e47b3264f535319b27dc6..596a0ca7ca40d98122508115cf12ca4253fe584d 100644 --- a/drivers/infiniband/hw/xsc/peer_mem.c +++ b/drivers/infiniband/hw/xsc/peer_mem.c @@ -4,7 +4,7 @@ * All rights reserved. */ -#include +#include "ib_peer_mem.h" #include #include "ib_umem_ex.h" @@ -169,7 +169,7 @@ static int create_peer_sysfs(struct ib_peer_memory_client *ib_peer_client) /* Dir alreday was created explicitly to get its kernel object for further usage */ ib_peer_client->peer_mem_attr_group.name = NULL; ib_peer_client->kobj = kobject_create_and_add(ib_peer_client->peer_mem->name, - peers_kobj); + peers_kobj); if (!ib_peer_client->kobj) { ret = -EINVAL; @@ -284,7 +284,8 @@ static int ib_peer_insert_context(struct ib_peer_memory_client *ib_peer_client, return 0; } -/* Caller should be holding the peer client lock, specifically, +/* + * Caller should be holding the peer client lock, specifically, * the caller should hold ib_peer_client->lock */ static int ib_peer_remove_context(struct ib_peer_memory_client *ib_peer_client, @@ -304,13 +305,15 @@ static int ib_peer_remove_context(struct ib_peer_memory_client *ib_peer_client, return 1; } -/* ib_peer_create_invalidation_ctx - creates invalidation context for a given umem +/* + * ib_peer_create_invalidation_ctx - creates invalidation context for a given umem * @ib_peer_mem: peer client to be used * @umem: umem struct belongs to that context * @invalidation_ctx: output context */ int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, - struct ib_umem_ex *umem_ex, struct invalidation_ctx **invalidation_ctx) + struct ib_umem_ex *umem_ex, + struct invalidation_ctx **invalidation_ctx) { int ret; struct invalidation_ctx *ctx; @@ -368,6 +371,7 @@ void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, if (!peer_callback && !inflight_invalidation) kfree(invalidation_ctx); } + static int ib_memory_peer_check_mandatory(const struct peer_memory_client *peer_client) { diff --git a/drivers/infiniband/hw/xsc/private_dev.c b/drivers/infiniband/hw/xsc/private_dev.c index ecb3728d71f86d076f136e5573ca4595a175a163..5b4848dd583f6b970b4f6b6507e2f7dd18295d92 100644 --- a/drivers/infiniband/hw/xsc/private_dev.c +++ b/drivers/infiniband/hw/xsc/private_dev.c @@ -8,35 +8,63 @@ #include #include #include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/res_obj.h" #include "global.h" -#define FEATURE_ONCHIP_FT_MASK (1<<4) -#define FEATURE_DMA_RW_TBL_MASK (1<<8) -#define FEATURE_PCT_EXP_MASK (1<<9) +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(9) static int xsc_priv_dev_open(struct inode *inode, struct file *file) { - struct xsc_priv_device *priv_dev - = container_of(inode->i_cdev, struct xsc_priv_device, cdev); + struct xsc_priv_device *priv_dev = + container_of(inode->i_cdev, struct xsc_priv_device, cdev); + struct xsc_core_device *xdev = + container_of(priv_dev, struct xsc_core_device, priv_device); + struct xsc_bdf_file *bdf_file; - file->private_data = priv_dev; + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!file) + return -ENOMEM; + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + bdf_file->xdev = xdev; + bdf_file->key = bdf_to_key(pci_domain_nr(xdev->pdev->bus), + xdev->pdev->bus->number, xdev->pdev->devfn); + radix_tree_preload(GFP_KERNEL); + spin_lock(&priv_dev->bdf_lock); + radix_tree_insert(&priv_dev->bdf_tree, bdf_file->key, bdf_file); + spin_unlock(&priv_dev->bdf_lock); + radix_tree_preload_end(); + file->private_data = bdf_file; + + return 0; +} + +static int xsc_priv_dev_release(struct inode *inode, struct file *filp) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + + xsc_close_bdf_file(bdf_file); + spin_lock(&bdf_file->xdev->priv_device.bdf_lock); + radix_tree_delete(&bdf_file->xdev->priv_device.bdf_tree, bdf_file->key); + spin_unlock(&bdf_file->xdev->priv_device.bdf_lock); + kfree(bdf_file); - pr_err("[%s:%d] %s succ\n", - __func__, __LINE__, priv_dev->device_name); return 0; } static long xsc_ioctl_mem_free(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) { struct xsc_ioctl_mem_info *minfo; struct xsc_ioctl_data_tl *tl; struct xsc_ioctl_mbox_in *in; - struct xsc_mem_entry *mem_ent; + struct xsc_mem_entry *m_ent; char tname[TASK_COMM_LEN]; int in_size; int err = 0; @@ -66,26 +94,27 @@ static long xsc_ioctl_mem_free(struct xsc_priv_device *priv_dev, struct xsc_core get_task_comm(tname, current); spin_lock_irq(&priv_dev->mem_lock); - list_for_each_entry(mem_ent, &priv_dev->mem_list, list) { - if ((!strcmp(mem_ent->task_name, tname)) && - (mem_ent->mem_info.mem_num == minfo->mem_num) && - (mem_ent->mem_info.size == minfo->size)) { - if ((mem_ent->mem_info.phy_addr == minfo->phy_addr) && - (mem_ent->mem_info.vir_addr == minfo->vir_addr)) { + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num && + m_ent->mem_info.size == minfo->size) { + if (m_ent->mem_info.phy_addr == minfo->phy_addr && + m_ent->mem_info.vir_addr == minfo->vir_addr) { lfound = 1; - list_del(&mem_ent->list); - } else + list_del(&m_ent->list); + } else { err = -ENOMEM; + } break; } } spin_unlock_irq(&priv_dev->mem_lock); if (lfound) { - dma_free_coherent(&(xdev->pdev->dev), - minfo->size, - (void *)minfo->vir_addr, - minfo->phy_addr); + dma_free_coherent(&xdev->pdev->dev, + minfo->size, + (void *)minfo->vir_addr, + minfo->phy_addr); } } else { kvfree(in); @@ -103,13 +132,15 @@ static long xsc_ioctl_mem_free(struct xsc_priv_device *priv_dev, struct xsc_core return err; } -static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) { struct xsc_ioctl_mem_info *minfo; struct xsc_ioctl_data_tl *tl; struct xsc_ioctl_mbox_in *in; - struct xsc_mem_entry *mem_ent; + struct xsc_mem_entry *m_ent; char tname[TASK_COMM_LEN]; u64 vaddr = 0; u64 paddr = 0; @@ -141,16 +172,16 @@ static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, struct xsc_cor get_task_comm(tname, current); spin_lock_irq(&priv_dev->mem_lock); - list_for_each_entry(mem_ent, &priv_dev->mem_list, list) { - if ((!strcmp(mem_ent->task_name, tname)) && - (mem_ent->mem_info.mem_num == minfo->mem_num)) { - if (mem_ent->mem_info.size == minfo->size) { - minfo->phy_addr = mem_ent->mem_info.phy_addr; - minfo->vir_addr = mem_ent->mem_info.vir_addr; + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num) { + if (m_ent->mem_info.size == minfo->size) { + minfo->phy_addr = m_ent->mem_info.phy_addr; + minfo->vir_addr = m_ent->mem_info.vir_addr; lfound = 1; } else { needfree = 1; - list_del(&mem_ent->list); + list_del(&m_ent->list); } break; } @@ -158,14 +189,14 @@ static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, struct xsc_cor spin_unlock_irq(&priv_dev->mem_lock); if (needfree) { - dma_free_coherent(&(xdev->pdev->dev), - mem_ent->mem_info.size, - (void *)mem_ent->mem_info.vir_addr, - mem_ent->mem_info.phy_addr); + dma_free_coherent(&xdev->pdev->dev, + m_ent->mem_info.size, + (void *)m_ent->mem_info.vir_addr, + m_ent->mem_info.phy_addr); } if (!lfound) { - vaddr = (u64)dma_alloc_coherent(&(xdev->pdev->dev), + vaddr = (u64)dma_alloc_coherent(&xdev->pdev->dev, minfo->size, (dma_addr_t *)&paddr, GFP_KERNEL); @@ -173,18 +204,18 @@ static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, struct xsc_cor memset((void *)vaddr, 0, minfo->size); minfo->phy_addr = paddr; minfo->vir_addr = vaddr; - mem_ent = kzalloc(sizeof(struct xsc_mem_entry), GFP_KERNEL); - if (!mem_ent) { + m_ent = kzalloc(sizeof(*m_ent), GFP_KERNEL); + if (!m_ent) { kvfree(in); return -ENOMEM; } - strcpy(mem_ent->task_name, tname); - mem_ent->mem_info.mem_num = minfo->mem_num; - mem_ent->mem_info.size = minfo->size; - mem_ent->mem_info.phy_addr = paddr; - mem_ent->mem_info.vir_addr = vaddr; + strcpy(m_ent->task_name, tname); + m_ent->mem_info.mem_num = minfo->mem_num; + m_ent->mem_info.size = minfo->size; + m_ent->mem_info.phy_addr = paddr; + m_ent->mem_info.vir_addr = vaddr; spin_lock_irq(&priv_dev->mem_lock); - list_add(&mem_ent->list, &priv_dev->mem_list); + list_add(&m_ent->list, &priv_dev->mem_list); spin_unlock_irq(&priv_dev->mem_lock); } else { kvfree(in); @@ -205,16 +236,14 @@ static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, struct xsc_cor static long xsc_priv_dev_ioctl_mem(struct file *filp, unsigned long arg) { - struct xsc_priv_device *priv_dev = filp->private_data; - struct xsc_core_device *xdev; + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_priv_device *priv_dev = &xdev->priv_device; struct xsc_ioctl_hdr __user *user_hdr = (struct xsc_ioctl_hdr __user *)arg; struct xsc_ioctl_hdr hdr; int err; - /* get xdev */ - xdev = container_of(priv_dev, struct xsc_core_device, priv_device); - err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); if (err) return -EFAULT; @@ -247,34 +276,31 @@ static int xsc_priv_modify_qp(struct xsc_core_device *xdev, void *in, void *out) tl = (struct xsc_ioctl_data_tl *)out; resp = (struct xsc_ioctl_qp_range *)(tl + 1); xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", - resp->qpn, resp->num, resp->opcode); + resp->qpn, resp->num, resp->opcode); if (resp->num == 0) { - xsc_core_dbg(xdev, "xsc_ioctl_qp_range: resp->num ==0\n"); + xsc_core_err(xdev, "xsc_ioctl_qp_range: resp->num == 0\n"); return 0; } qpn = resp->qpn; insize = sizeof(struct xsc_modify_qp_mbox_in); mailin = kvzalloc(insize, GFP_KERNEL); - if (!mailin) { - xsc_core_dbg(xdev, "xsc_ioctl_qp_range: enomem\n"); + if (!mailin) return -ENOMEM; - } if (resp->opcode == XSC_CMD_OP_RTR2RTS_QP) { for (i = 0; i < resp->num; i++) { mailin->hdr.opcode = cpu_to_be16(XSC_CMD_OP_RTR2RTS_QP); - mailin->qpn = cpu_to_be32(qpn + 0); + mailin->qpn = cpu_to_be32(qpn + i); ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); } } - kvfree(mailin); return ret; } static int xsc_priv_dev_ioctl_get_phy(struct xsc_core_device *xdev, - void *in, void *out) + void *in, void *out) { int ret = 0; struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; @@ -312,17 +338,18 @@ static int xsc_priv_dev_ioctl_get_phy(struct xsc_core_device *xdev, (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", resp->phy_port, - resp->func_id, resp->logic_in_port, - resp->mac_phy_port, resp->mac_logic_in_port, - resp->lag_id); - resp->funcid_encode[0] = XSC_PCIE0_VF0_FUNC_ID; - resp->funcid_encode[1] = XSC_PCIE0_VF_FUNC_ID_END; - resp->funcid_encode[2] = XSC_PCIE0_PF0_FUNC_ID; - resp->funcid_encode[3] = XSC_PCIE0_PF_FUNC_ID_END; - resp->funcid_encode[4] = XSC_PCIE1_VF0_FUNC_ID; - resp->funcid_encode[5] = XSC_PCIE1_VF_FUNC_ID_END; - resp->funcid_encode[6] = XSC_PCIE1_PF0_FUNC_ID; - resp->funcid_encode[7] = XSC_PCIE1_PF_FUNC_ID_END; + resp->func_id, resp->logic_in_port, + resp->mac_phy_port, resp->mac_logic_in_port, + resp->lag_id); + resp->funcid[0] = xdev->caps.funcid[0]; + resp->funcid[1] = xdev->caps.funcid[1]; + resp->funcid[2] = xdev->caps.funcid[2]; + resp->funcid[3] = xdev->caps.funcid[3]; + resp->funcid[4] = xdev->caps.funcid[4]; + resp->funcid[5] = xdev->caps.funcid[5]; + resp->funcid[6] = xdev->caps.funcid[6]; + resp->funcid[7] = xdev->caps.funcid[7]; + resp->hca_core_clock = xdev->caps.hca_core_clock; break; case XSC_IOCTL_OP_GET_VF_INFO: @@ -348,7 +375,7 @@ static int xsc_priv_dev_ioctl_get_global_pcp(struct xsc_core_device *xdev, void int ret = 0; struct xsc_ioctl_global_pcp *resp = (struct xsc_ioctl_global_pcp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) { + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) { ret = -EOPNOTSUPP; return ret; } @@ -362,7 +389,7 @@ static int xsc_priv_dev_ioctl_get_global_dscp(struct xsc_core_device *xdev, void int ret = 0; struct xsc_ioctl_global_dscp *resp = (struct xsc_ioctl_global_dscp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) { + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) { ret = -EOPNOTSUPP; return ret; } @@ -376,7 +403,7 @@ static int xsc_priv_dev_ioctl_set_global_pcp(struct xsc_core_device *xdev, void int ret = 0; struct xsc_ioctl_global_pcp *req = (struct xsc_ioctl_global_pcp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) { + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) { ret = -EOPNOTSUPP; return ret; } @@ -390,7 +417,7 @@ static int xsc_priv_dev_ioctl_set_global_dscp(struct xsc_core_device *xdev, void int ret = 0; struct xsc_ioctl_global_dscp *req = (struct xsc_ioctl_global_dscp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) { + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) { ret = -EOPNOTSUPP; return ret; } @@ -400,7 +427,7 @@ static int xsc_priv_dev_ioctl_set_global_dscp(struct xsc_core_device *xdev, void } int xsc_priv_dev_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, - int out_size) + int out_size) { int opcode, ret = 0; struct xsc_ioctl_attr *hdr; @@ -435,15 +462,15 @@ int xsc_priv_dev_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, break; } - xsc_core_dbg(xdev, "%s failed ret=%u\n", __func__, ret); + xsc_core_dbg(xdev, "xsc_priv_dev exec_ioctl.ret=%u\n", ret); return ret; } static long xsc_priv_dev_ioctl_getinfo(struct file *filp, unsigned long arg) { - struct xsc_priv_device *priv_dev = filp->private_data; - struct xsc_core_device *xdev; + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; struct xsc_ioctl_hdr __user *user_hdr = (struct xsc_ioctl_hdr __user *)arg; struct xsc_ioctl_hdr hdr; @@ -479,8 +506,9 @@ static long xsc_priv_dev_ioctl_getinfo(struct file *filp, unsigned long arg) kvfree(in); return -EFAULT; } - xdev = container_of(priv_dev, struct xsc_core_device, priv_device); - err = xsc_priv_dev_exec_ioctl(xdev, &in->attr, (in_size-sizeof(u32)), in->attr.data, + err = xsc_priv_dev_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, hdr.attr.length); in->attr.error = err; if (copy_to_user((void *)arg, in, in_size)) @@ -489,8 +517,77 @@ static long xsc_priv_dev_ioctl_getinfo(struct file *filp, unsigned long arg) return err; } -static int xsc_ioctl_flow_cmdq(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) { struct xsc_ioctl_mbox_in *in; struct xsc_ioctl_mbox_out *out; @@ -511,6 +608,12 @@ static int xsc_ioctl_flow_cmdq(struct xsc_priv_device *priv_dev, struct xsc_core return -EFAULT; } + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; out = kvzalloc(out_size, GFP_KERNEL); if (!out) { @@ -519,7 +622,7 @@ static int xsc_ioctl_flow_cmdq(struct xsc_priv_device *priv_dev, struct xsc_core } memcpy(out->data, in->data, hdr->attr.length); out->len = in->len; - err = xsc_cmd_exec(xdev, in, in_size, out, out_size); + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); hdr->attr.error = __be32_to_cpu(out->error); if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) @@ -532,166 +635,10 @@ static int xsc_ioctl_flow_cmdq(struct xsc_priv_device *priv_dev, struct xsc_core return err; } -static int xsc_ioctl_qos(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, u16 expect_req_size, - u16 expect_resp_size, void (*encode)(void *, u32), void (*decode)(void *)) -{ - struct xsc_qos_mbox_in *in; - struct xsc_qos_mbox_out *out; - u16 user_size; - int err; - - user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; - if (hdr->attr.length != user_size) - return -EINVAL; - - in = kvzalloc(sizeof(struct xsc_qos_mbox_in) + expect_req_size, GFP_KERNEL); - if (!in) - goto err_in; - out = kvzalloc(sizeof(struct xsc_qos_mbox_out) + expect_resp_size, GFP_KERNEL); - if (!out) - goto err_out; - - err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); - if (err) - goto err; - - in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - in->req_prfx.mac_port = xdev->mac_port; - - if (encode) - encode((void *)in->data, xdev->mac_port); - - err = xsc_cmd_exec( - xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); - - hdr->attr.error = out->hdr.status; - if (decode) - decode((void *)out->data); - - if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) - goto err; - if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) - goto err; - - kvfree(in); - kvfree(out); - return 0; - -err: - kvfree(out); -err_out: - kvfree(in); -err_in: - return -EFAULT; -} - -static int xsc_ioctl_cc(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, u16 expect_req_size, - u16 expect_resp_size, void (*encode)(void *, u32), void (*decode)(void *)) -{ - struct xsc_cc_mbox_in *in; - struct xsc_cc_mbox_out *out; - u16 user_size; - int err; - - user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; - if (hdr->attr.length != user_size) - return -EINVAL; - - in = kvzalloc(sizeof(struct xsc_cc_mbox_in) + expect_req_size, GFP_KERNEL); - if (!in) - goto err_in; - out = kvzalloc(sizeof(struct xsc_cc_mbox_out) + expect_resp_size, GFP_KERNEL); - if (!out) - goto err_out; - - err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); - if (err) - goto err; - - in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - if (encode) - encode((void *)in->data, xdev->mac_port); - - err = xsc_cmd_exec( - xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); - - hdr->attr.error = out->hdr.status; - if (decode) - decode((void *)out->data); - - if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) - goto err; - if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) - goto err; - - kvfree(in); - kvfree(out); - return 0; - -err: - kvfree(out); -err_out: - kvfree(in); -err_in: - return -EFAULT; -} - -static int xsc_ioctl_hwconfig(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, u16 expect_req_size, - u16 expect_resp_size, void (*encode)(void *, u32), void (*decode)(void *)) -{ - struct xsc_hwc_mbox_in *in; - struct xsc_hwc_mbox_out *out; - u16 user_size; - int err; - - user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; - if (hdr->attr.length != user_size) - return -EINVAL; - - in = kvzalloc(sizeof(struct xsc_hwc_mbox_in) + expect_req_size, GFP_KERNEL); - if (!in) - goto err_in; - out = kvzalloc(sizeof(struct xsc_hwc_mbox_out) + expect_resp_size, GFP_KERNEL); - if (!out) - goto err_out; - - err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); - if (err) - goto err; - - in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - if (encode) - encode((void *)in->data, xdev->mac_port); - - err = xsc_cmd_exec( - xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); - - hdr->attr.error = out->hdr.status; - if (decode) - decode((void *)out->data); - - if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) - goto err; - if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) - goto err; - - kvfree(in); - kvfree(out); - return 0; - -err: - kvfree(out); -err_out: - kvfree(in); -err_in: - return -EFAULT; -} - -static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) { struct xsc_modify_raw_qp_mbox_in *in; struct xsc_modify_raw_qp_mbox_out *out; @@ -708,15 +655,15 @@ static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, struct xsc_ goto err_out; err = copy_from_user(&in->req, user_hdr->attr.data, - sizeof(struct xsc_modify_raw_qp_request)); + sizeof(struct xsc_modify_raw_qp_request)); if (err) goto err; in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - in->pcie_no = xsc_get_pcie_no(); + in->pcie_no = g_xsc_pcie_no; err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), - out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); hdr->attr.error = __be32_to_cpu(out->hdr.status); @@ -735,263 +682,58 @@ static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, struct xsc_ return -EFAULT; } -static void encode_rlimit_set(void *data, u32 mac_port) -{ - struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *) data; - - req->rate_cir = __cpu_to_be32(req->rate_cir); - req->limit_id = __cpu_to_be32(req->limit_id); -} - -static void decode_rlimit_get(void *data) -{ - struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *) data; - int i; - - for (i = 0; i <= QOS_PRIO_MAX; i++) - resp->rate_cir[i] = __be32_to_cpu(resp->rate_cir[i]); - - resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); -} - -static void encode_cc_cmd_enable_rp(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_enable_rp *cc_cmd = (struct xsc_cc_cmd_enable_rp *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_enable_np(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_enable_np *cc_cmd = (struct xsc_cc_cmd_enable_np *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_init_alpha(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_init_alpha *cc_cmd = (struct xsc_cc_cmd_init_alpha *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->alpha = __cpu_to_be32(cc_cmd->alpha); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_g(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_g *cc_cmd = (struct xsc_cc_cmd_g *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->g = __cpu_to_be32(cc_cmd->g); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_ai(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_ai *cc_cmd = (struct xsc_cc_cmd_ai *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->ai = __cpu_to_be32(cc_cmd->ai); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_hai(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_hai *cc_cmd = (struct xsc_cc_cmd_hai *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->hai = __cpu_to_be32(cc_cmd->hai); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_th(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_th *cc_cmd = (struct xsc_cc_cmd_th *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->threshold = __cpu_to_be32(cc_cmd->threshold); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_bc(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_bc *cc_cmd = (struct xsc_cc_cmd_bc *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->bytecount = __cpu_to_be32(cc_cmd->bytecount); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_cnp_opcode(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_cnp_opcode *cc_cmd = (struct xsc_cc_cmd_cnp_opcode *) data; - - cc_cmd->opcode = __cpu_to_be32(cc_cmd->opcode); -} - -static void encode_cc_cmd_cnp_bth_b(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_cnp_bth_b *cc_cmd = (struct xsc_cc_cmd_cnp_bth_b *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->bth_b = __cpu_to_be32(cc_cmd->bth_b); -} - -static void encode_cc_cmd_cnp_bth_f(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_cnp_bth_f *cc_cmd = (struct xsc_cc_cmd_cnp_bth_f *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->bth_f = __cpu_to_be32(cc_cmd->bth_f); -} - -static void encode_cc_cmd_cnp_ecn(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_cnp_ecn *cc_cmd = (struct xsc_cc_cmd_cnp_ecn *) data; - - cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); -} - -static void encode_cc_cmd_data_ecn(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_data_ecn *cc_cmd = (struct xsc_cc_cmd_data_ecn *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); -} - -static void encode_cc_cmd_cnp_tx_interval(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_cnp_tx_interval *cc_cmd = (struct xsc_cc_cmd_cnp_tx_interval *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->interval = __cpu_to_be32(cc_cmd->interval); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_evt_rsttime(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_evt_rsttime *cc_cmd = - (struct xsc_cc_cmd_evt_rsttime *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->period = __cpu_to_be32(cc_cmd->period); -} - -static void encode_cc_cmd_cnp_dscp(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_cnp_dscp *cc_cmd = (struct xsc_cc_cmd_cnp_dscp *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->dscp = __cpu_to_be32(cc_cmd->dscp); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_cnp_pcp(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_cnp_pcp *cc_cmd = (struct xsc_cc_cmd_cnp_pcp *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->pcp = __cpu_to_be32(cc_cmd->pcp); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void encode_cc_cmd_evt_period_alpha(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_evt_period_alpha *cc_cmd = (struct xsc_cc_cmd_evt_period_alpha *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->period = __cpu_to_be32(cc_cmd->period); -} - -static void encode_cc_get_cfg(void *data, u32 mac_port) +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, + void *in, unsigned int inlen, void *out, int opcode) { - struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->section = __cpu_to_be32(mac_port); -} - -static void decode_cc_get_cfg(void *data) -{ - struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *) data; - - cc_cmd->cmd = __be16_to_cpu(cc_cmd->cmd); - cc_cmd->len = __be16_to_cpu(cc_cmd->len); - cc_cmd->enable_rp = __be32_to_cpu(cc_cmd->enable_rp); - cc_cmd->enable_np = __be32_to_cpu(cc_cmd->enable_np); - cc_cmd->init_alpha = __be32_to_cpu(cc_cmd->init_alpha); - cc_cmd->g = __be32_to_cpu(cc_cmd->g); - cc_cmd->ai = __be32_to_cpu(cc_cmd->ai); - cc_cmd->hai = __be32_to_cpu(cc_cmd->hai); - cc_cmd->threshold = __be32_to_cpu(cc_cmd->threshold); - cc_cmd->bytecount = __be32_to_cpu(cc_cmd->bytecount); - cc_cmd->opcode = __be32_to_cpu(cc_cmd->opcode); - cc_cmd->bth_b = __be32_to_cpu(cc_cmd->bth_b); - cc_cmd->bth_f = __be32_to_cpu(cc_cmd->bth_f); - cc_cmd->cnp_ecn = __be32_to_cpu(cc_cmd->cnp_ecn); - cc_cmd->data_ecn = __be32_to_cpu(cc_cmd->data_ecn); - cc_cmd->cnp_tx_interval = __be32_to_cpu(cc_cmd->cnp_tx_interval); - cc_cmd->evt_period_rsttime = __be32_to_cpu(cc_cmd->evt_period_rsttime); - cc_cmd->cnp_dscp = __be32_to_cpu(cc_cmd->cnp_dscp); - cc_cmd->cnp_pcp = __be32_to_cpu(cc_cmd->cnp_pcp); - cc_cmd->evt_period_alpha = __be32_to_cpu(cc_cmd->evt_period_alpha); - cc_cmd->section = __be32_to_cpu(cc_cmd->section); -} - -static void encode_cc_get_stat(void *data, u32 mac_port) -{ - struct xsc_cc_cmd_get_stat *cc_cmd = (struct xsc_cc_cmd_get_stat *) data; - - cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); - cc_cmd->len = __cpu_to_be16(cc_cmd->len); - cc_cmd->section = __cpu_to_be32(mac_port); -} + unsigned int idx; -static void decode_cc_get_stat(void *data) -{ - struct xsc_cc_cmd_stat *cc_cmd = (struct xsc_cc_cmd_stat *) data; - - cc_cmd->cnp_handled = __be32_to_cpu(cc_cmd->cnp_handled); - cc_cmd->alpha_recovery = __be32_to_cpu(cc_cmd->alpha_recovery); - cc_cmd->reset_timeout = __be32_to_cpu(cc_cmd->reset_timeout); - cc_cmd->reset_bytecount = __be32_to_cpu(cc_cmd->reset_bytecount); + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + default: + break; + } } static long xsc_priv_dev_ioctl_cmdq(struct file *filp, unsigned long arg) { - struct xsc_priv_device *priv_dev = filp->private_data; - struct xsc_core_device *xdev; + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_priv_device *priv_dev = &bdf_file->xdev->priv_device; + struct xsc_core_device *xdev = bdf_file->xdev; struct xsc_ioctl_hdr __user *user_hdr = (struct xsc_ioctl_hdr __user *)arg; struct xsc_ioctl_hdr hdr; int err; - void *in; - void *out; - - /* get xdev */ - xdev = container_of(priv_dev, struct xsc_core_device, priv_device); err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); if (err) @@ -1004,180 +746,32 @@ static long xsc_priv_dev_ioctl_cmdq(struct file *filp, unsigned long arg) /* check ioctl cmd */ switch (hdr.attr.opcode) { case XSC_CMD_OP_IOCTL_FLOW: - return xsc_ioctl_flow_cmdq(priv_dev, xdev, user_hdr, &hdr); - case XSC_CMD_OP_CREATE_QP: - break; - case XSC_CMD_OP_DESTROY_QP: - break; - case XSC_CMD_OP_CREATE_CQ: - break; - case XSC_CMD_OP_DESTROY_CQ: - break; - case XSC_CMD_OP_CREATE_MULTI_QP: - break; - case XSC_CMD_OP_ALLOC_MULTI_VIRTQ_CQ: - break; - case XSC_CMD_OP_RELEASE_MULTI_VIRTQ_CQ: - break; - case XSC_CMD_OP_ALLOC_MULTI_VIRTQ: - break; - case XSC_CMD_OP_RELEASE_MULTI_VIRTQ: - break; - case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_dscp_pmt_set), 0, - NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, 0, sizeof(struct xsc_dscp_pmt_get), - NULL, NULL); - case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_trust_mode_set), 0, - NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, 0, sizeof(struct xsc_trust_mode_get), - NULL, NULL); - case XSC_CMD_OP_IOCTL_SET_PCP_PMT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_pcp_pmt_set), 0, - NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_PCP_PMT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, 0, sizeof(struct xsc_pcp_pmt_get), - NULL, NULL); - case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_default_pri_set), 0, - NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, 0, sizeof(struct xsc_default_pri_get), - NULL, NULL); - case XSC_CMD_OP_IOCTL_SET_PFC: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_pfc_set), 0, - NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_PFC: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, 0, sizeof(struct xsc_pfc_get), - NULL, NULL); - case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_set), 0, - encode_rlimit_set, NULL); - case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_get), - sizeof(struct xsc_rate_limit_get), NULL, decode_rlimit_get); - case XSC_CMD_OP_IOCTL_SET_SP: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_sp_set), 0, - NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_SP: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, 0, sizeof(struct xsc_sp_get), - NULL, NULL); - case XSC_CMD_OP_IOCTL_SET_WEIGHT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_weight_set), 0, - NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_WEIGHT: - return xsc_ioctl_qos( - priv_dev, xdev, user_hdr, &hdr, 0, sizeof(struct xsc_weight_get), - NULL, NULL); - case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_enable_rp), - 0, encode_cc_cmd_enable_rp, NULL); - case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_enable_np), - 0, encode_cc_cmd_enable_np, NULL); - case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_init_alpha), - 0, encode_cc_cmd_init_alpha, NULL); - case XSC_CMD_OP_IOCTL_SET_G: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_g), - 0, encode_cc_cmd_g, NULL); - case XSC_CMD_OP_IOCTL_SET_AI: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_ai), - 0, encode_cc_cmd_ai, NULL); - case XSC_CMD_OP_IOCTL_SET_HAI: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_hai), - 0, encode_cc_cmd_hai, NULL); - case XSC_CMD_OP_IOCTL_SET_TH: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_th), - 0, encode_cc_cmd_th, NULL); - case XSC_CMD_OP_IOCTL_SET_BC_TH: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_bc), - 0, encode_cc_cmd_bc, NULL); - case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_opcode), - 0, encode_cc_cmd_cnp_opcode, NULL); - case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_bth_b), - 0, encode_cc_cmd_cnp_bth_b, NULL); - case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_bth_f), - 0, encode_cc_cmd_cnp_bth_f, NULL); - case XSC_CMD_OP_IOCTL_SET_CNP_ECN: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_ecn), - 0, encode_cc_cmd_cnp_ecn, NULL); - case XSC_CMD_OP_IOCTL_SET_DATA_ECN: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_data_ecn), - 0, encode_cc_cmd_data_ecn, NULL); - case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_tx_interval), - 0, encode_cc_cmd_cnp_tx_interval, NULL); - case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, - sizeof(struct xsc_cc_cmd_evt_rsttime), 0, encode_cc_cmd_evt_rsttime, NULL); - case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_dscp), - 0, encode_cc_cmd_cnp_dscp, NULL); - case XSC_CMD_OP_IOCTL_SET_CNP_PCP: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_pcp), - 0, encode_cc_cmd_cnp_pcp, NULL); - case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_evt_period_alpha), - 0, encode_cc_cmd_evt_period_alpha, NULL); - case XSC_CMD_OP_IOCTL_GET_CC_CFG: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_cfg), - sizeof(struct xsc_cc_cmd_get_cfg), encode_cc_get_cfg, decode_cc_get_cfg); - case XSC_CMD_OP_IOCTL_GET_CC_STAT: - return xsc_ioctl_cc( - priv_dev, xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_stat), - sizeof(struct xsc_cc_cmd_stat), encode_cc_get_stat, decode_cc_get_stat); - case XSC_CMD_OP_IOCTL_SET_HWC: - return xsc_ioctl_hwconfig(priv_dev, xdev, user_hdr, &hdr, - sizeof(struct hwc_set_t), 0, NULL, NULL); - case XSC_CMD_OP_IOCTL_GET_HWC: - return xsc_ioctl_hwconfig(priv_dev, xdev, user_hdr, &hdr, - sizeof(struct hwc_get_t), sizeof(struct hwc_get_t), NULL, NULL); + return xsc_ioctl_flow_cmdq(bdf_file, user_hdr, &hdr); case XSC_CMD_OP_MODIFY_RAW_QP: return xsc_ioctl_modify_raw_qp(priv_dev, xdev, user_hdr, &hdr); default: return -EINVAL; } +} + +static long xsc_priv_dev_ioctl_cmdq_raw(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; in = kvzalloc(hdr.attr.length, GFP_KERNEL); if (!in) @@ -1195,6 +789,7 @@ static long xsc_priv_dev_ioctl_cmdq(struct file *filp, unsigned long arg) } xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + xsc_pci_ctrl_cmdq_handle_res_obj(bdf_file, in, hdr.attr.length, out, hdr.attr.opcode); if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) err = -EFAULT; @@ -1222,10 +817,12 @@ static long xsc_priv_dev_ioctl(struct file *filp, unsigned int cmd, unsigned lon case XSC_IOCTL_MEM: err = xsc_priv_dev_ioctl_mem(filp, arg); break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_priv_dev_ioctl_cmdq_raw(filp, arg); + break; default: err = -EFAULT; break; - } return err; } @@ -1235,6 +832,7 @@ static const struct file_operations dev_fops = { .open = xsc_priv_dev_open, .unlocked_ioctl = xsc_priv_dev_ioctl, .compat_ioctl = xsc_priv_dev_ioctl, + .release = xsc_priv_dev_release, }; int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev) @@ -1249,7 +847,7 @@ int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev) ret = alloc_chrdev_region(&priv_dev->devno, 0, 1, priv_dev->device_name); if (ret) { xsc_core_err(dev, "%s cant't get major %d\n", - priv_dev->device_name, MAJOR(priv_dev->devno)); + priv_dev->device_name, MAJOR(priv_dev->devno)); return ret; } @@ -1259,17 +857,20 @@ int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev) ret = cdev_add(&priv_dev->cdev, priv_dev->devno, 1); if (ret) { xsc_core_err(dev, "%s cdev_add error ret:%d major:%d\n", - priv_dev->device_name, ret, MAJOR(priv_dev->devno)); + priv_dev->device_name, ret, MAJOR(priv_dev->devno)); return ret; } priv_dev->priv_class = class_create(THIS_MODULE, priv_dev->device_name); device_create(priv_dev->priv_class, NULL, priv_dev->devno, - NULL, "%s", priv_dev->device_name); + NULL, "%s", priv_dev->device_name); INIT_LIST_HEAD(&priv_dev->mem_list); spin_lock_init(&priv_dev->mem_lock); + INIT_RADIX_TREE(&priv_dev->bdf_tree, GFP_ATOMIC); + spin_lock_init(&priv_dev->bdf_lock); + xsc_core_dbg(dev, "init success\n"); return 0; @@ -1279,6 +880,9 @@ void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) { struct xsc_priv_device *priv_dev; struct cdev *char_dev; + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; if (!dev || !ib_dev) { pr_err("[%s:%d] device is null pointer\n", __func__, __LINE__); @@ -1292,6 +896,14 @@ void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) if (!char_dev) return; + spin_lock(&priv_dev->bdf_lock); + radix_tree_for_each_slot(slot, &priv_dev->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&priv_dev->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&priv_dev->bdf_lock); device_destroy(priv_dev->priv_class, priv_dev->devno); cdev_del(&priv_dev->cdev); unregister_chrdev_region(priv_dev->devno, 1); diff --git a/drivers/infiniband/hw/xsc/qp.c b/drivers/infiniband/hw/xsc/qp.c index a744d70722f9bdffce30f170383823c7aa6c07b2..3874a1e771ff8ba388039766b4588a7608e49fda 100644 --- a/drivers/infiniband/hw/xsc/qp.c +++ b/drivers/infiniband/hw/xsc/qp.c @@ -9,8 +9,8 @@ #include "xsc_ib.h" #include "global.h" #include "user.h" -#include -#include +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" #include #include #include @@ -24,8 +24,15 @@ enum { XSC_IB_CACHE_LINE_SIZE = 64, }; -#define LAG_PORT_NUM_MASK 0xffff0000 -#define UDP_SPORT_MASK 0x0000ffff +#define LAG_PORT_NUM_MASK_EN 0x80000000 +#define LAG_PORT_NUM_MASK_EN_OFFSET 31 +#define LAG_PORT_NUM_MASK 0x30000 +#define LAG_PORT_NUM_OFFSET 16 + +#define UDP_SPORT_MASK_EN 0x40000000 +#define UDP_SPORT_MASK_EN_OFFSET 30 +#define UDP_SPORT_MASK 0xffff +#define UDP_SPORT_OFFSET 0 static const u32 xsc_ib_opcode[] = { [IB_WR_SEND] = XSC_MSG_OPCODE_SEND, @@ -91,7 +98,7 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos) } static inline void set_remote_addr_seg(struct xsc_wqe_data_seg *remote_seg, - u32 msg_len, u64 remote_addr, u32 rkey) + u32 msg_len, u64 remote_addr, u32 rkey) { remote_seg->in_line = 0; WR_LE_32(remote_seg->seg_len, msg_len); @@ -219,7 +226,7 @@ static int calc_sq_size(struct xsc_ib_dev *dev, struct ib_qp_init_attr *attr, qp->sq.ds_cnt = qp->sq.wqe_cnt << (dev->xdev->caps.send_wqe_shift - XSC_BASE_WQE_SHIFT); wq_size = qp->sq.wqe_cnt * wqe_size; qp->sq.wqe_shift = ilog2(wqe_size); - qp->sq.max_gs = attr->cap.max_send_wr; + qp->sq.max_gs = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - XSC_RADDR_SEG_NUM; qp->sq.max_post = qp->sq.wqe_cnt; return wq_size; @@ -265,9 +272,9 @@ static int create_user_qp(struct xsc_ib_dev *dev, struct ib_pd *pd, err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); xsc_ib_dbg(dev, - "buf_addr:0x%lx db_addr:0x%lx sq cnt:%u, rq cnt:%u, rq shift:%u\n", - (uintptr_t)ucmd.buf_addr, (uintptr_t)ucmd.db_addr, - ucmd.sq_wqe_count, ucmd.rq_wqe_count, ucmd.rq_wqe_shift); + "buf_addr:0x%lx db_addr:0x%lx sq cnt:%u, rq cnt:%u, rq shift:%u\n", + (uintptr_t)ucmd.buf_addr, (uintptr_t)ucmd.db_addr, + ucmd.sq_wqe_count, ucmd.rq_wqe_count, ucmd.rq_wqe_shift); if (err) { xsc_ib_dbg(dev, "copy failed\n"); return err; @@ -292,7 +299,7 @@ static int create_user_qp(struct xsc_ib_dev *dev, struct ib_pd *pd, } xsc_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, - &ncont, NULL); + &ncont, NULL); if (ncont != npages) { // TODO: peer memory support failed page_shift = PAGE_SHIFT; @@ -306,7 +313,7 @@ static int create_user_qp(struct xsc_ib_dev *dev, struct ib_pd *pd, goto err_umem; } xsc_ib_dbg(dev, "npage:%d, page_shift:%d, ncont:%d, offset:%d, hw_npages %d\n", - npages, page_shift, ncont, offset, hw_npages); + npages, page_shift, ncont, offset, hw_npages); *inlen = sizeof(**in) + sizeof(*((*in)->req.pas)) * hw_npages; *in = xsc_vzalloc(*inlen); @@ -330,6 +337,7 @@ static int create_user_qp(struct xsc_ib_dev *dev, struct ib_pd *pd, ib_umem_release(qp->umem); err_uuar: +// free_uuar(&context->uuari, uuarn); return err; } @@ -339,6 +347,8 @@ static void destroy_qp_user(struct ib_pd *pd, struct xsc_ib_qp *qp) context = to_xucontext(pd->uobject->context); ib_umem_release(qp->umem); + +// free_uuar(&context->uuari, qp->uuarn); } #define MAX_QP1_SQ_HDR_SIZE_V2 512 @@ -408,8 +418,9 @@ static int create_kernel_qp(struct xsc_ib_dev *dev, qp->sq.mad_queue_depth = MAD_QUEUE_DEPTH; qp->sq.hdr_size = MAX_QP1_SQ_HDR_SIZE_V2 * MAD_QUEUE_DEPTH; qp->sq.hdr_buf = ib_dma_alloc_coherent(&dev->ib_dev, - qp->sq.hdr_size, - &qp->sq.hdr_dma, GFP_KERNEL); + qp->sq.hdr_size, + &qp->sq.hdr_dma, + GFP_KERNEL); if (!qp->sq.hdr_buf) { err = -ENOMEM; xsc_ib_err(dev, "Failed to create sq_hdr_buf"); @@ -435,7 +446,7 @@ static void destroy_qp_kernel(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) { if (qp->sq.hdr_buf) ib_dma_free_coherent(&dev->ib_dev, qp->sq.hdr_size, - qp->sq.hdr_buf, qp->sq.hdr_dma); + qp->sq.hdr_buf, qp->sq.hdr_dma); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); @@ -453,6 +464,8 @@ static u8 ib_to_xsc_qp_type(enum ib_qp_type qp_type, __u32 flags) } else if (qp_type == IB_QPT_RAW_PACKET) { if (flags & XSC_QP_FLAG_RAWPACKET_TSO) return XSC_QUEUE_TYPE_RAW_TSO; + else if (flags & XSC_QP_FLAG_RAWPACKET_TX) + return XSC_QUEUE_TYPE_RAW_TX; else return XSC_QUEUE_TYPE_RAW; } else { @@ -545,8 +558,6 @@ static int create_qp_common(struct xsc_ib_dev *dev, struct ib_pd *pd, else in->req.log_sq_sz = ilog2(0x80); - pr_debug("[%s:%d]\n", __func__, __LINE__); - if (init_attr->send_cq) { qp->send_cq = init_attr->send_cq; in->req.cqn_send = to_xcq(init_attr->send_cq)->xcq.cqn; @@ -570,6 +581,7 @@ static int create_qp_common(struct xsc_ib_dev *dev, struct ib_pd *pd, if (in->req.qp_type == XSC_QUEUE_TYPE_INVALID) goto err_create; in->req.glb_funcid = cpu_to_be16(dev->xdev->glb_func_id); + in->req.logic_port = cpu_to_be16(dev->xdev->logic_port); qp->xqp.qp_type_internal = in->req.qp_type; @@ -694,10 +706,10 @@ static void destroy_qp_common(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) if (!in) return; - if ((qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW) || - (qp->state != IB_QPS_RESET)) + if (qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW || + qp->state != IB_QPS_RESET) if (xsc_core_qp_modify(dev->xdev, to_xsc_state(qp->state), - XSC_QP_STATE_RST, in, sizeof(*in), &qp->xqp)) + XSC_QP_STATE_RST, in, sizeof(*in), &qp->xqp)) xsc_ib_warn(dev, "modify QP %06x to RESET failed\n", qp->xqp.qpn); get_cqs(qp, &send_cq, &recv_cq); @@ -753,8 +765,8 @@ static const char *ib_qp_type_str(enum ib_qp_type type) } struct ib_qp *xsc_ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata) + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) { struct xsc_ib_dev *dev; struct xsc_ib_qp *qp; @@ -796,7 +808,6 @@ struct ib_qp *xsc_ib_create_qp(struct ib_pd *pd, } else { qp->ibqp.qp_num = qp->xqp.qpn; } - xsc_ib_dbg(dev, "ib qpnum 0x%x, qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", qp->ibqp.qp_num, qp->xqp.qpn, to_xcq(init_attr->recv_cq)->xcq.cqn, @@ -809,7 +820,7 @@ struct ib_qp *xsc_ib_create_qp(struct ib_pd *pd, case IB_QPT_MAX: default: xsc_ib_dbg(dev, "unsupported qp type %d\n", - init_attr->qp_type); + init_attr->qp_type); /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } @@ -845,23 +856,28 @@ static inline u16 xsc_calc_udp_sport(u32 lqpn, u32 rqpn) } static inline void xsc_path_set_udp_sport(struct xsc_qp_path *path, - const struct rdma_ah_attr *ah, - u32 lqpn, u32 rqpn) + const struct rdma_ah_attr *ah, + u32 lqpn, u32 rqpn) { - if ((ah->grh.flow_label & UDP_SPORT_MASK) != 0) - path->sport = cpu_to_be16(xsc_flow_label_to_udp_sport(ah->grh.flow_label)); - else + if ((ah->grh.flow_label & UDP_SPORT_MASK) != 0) { + if ((ah->grh.flow_label & UDP_SPORT_MASK_EN) == 0) + path->sport = cpu_to_be16(xsc_flow_label_to_udp_sport(ah->grh.flow_label)); + else + path->sport = cpu_to_be16((ah->grh.flow_label & UDP_SPORT_MASK) >> + UDP_SPORT_OFFSET); + } else { path->sport = cpu_to_be16(xsc_calc_udp_sport(lqpn, rqpn)); + } } static int xsc_set_path(struct xsc_ib_dev *dev, const struct rdma_ah_attr *ah, - struct xsc_qp_path *path, u8 port, int attr_mask, - u32 path_flags, const struct ib_qp_attr *attr, struct xsc_ib_qp *qp) + struct xsc_qp_path *path, u8 port, int attr_mask, + u32 path_flags, const struct ib_qp_attr *attr, struct xsc_ib_qp *qp) { struct ib_global_route *grh = rdma_ah_retrieve_grh((struct rdma_ah_attr *)ah); + union ib_gid *dgid = &grh->dgid; const struct ib_gid_attr *sgid_attr = grh->sgid_attr; union ib_gid *sgid = &((struct ib_gid_attr *)sgid_attr)->gid; - union ib_gid *dgid = &grh->dgid; union { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; @@ -874,13 +890,17 @@ static int xsc_set_path(struct xsc_ib_dev *dev, const struct rdma_ah_attr *ah, return -EINVAL; if (qp->ibqp.qp_type == IB_QPT_RC || - qp->ibqp.qp_type == IB_QPT_UC || - qp->ibqp.qp_type == IB_QPT_XRC_INI || - qp->ibqp.qp_type == IB_QPT_XRC_TGT) + qp->ibqp.qp_type == IB_QPT_UC || + qp->ibqp.qp_type == IB_QPT_XRC_INI || + qp->ibqp.qp_type == IB_QPT_XRC_TGT) xsc_path_set_udp_sport(path, ah, qp->ibqp.qp_num, attr->dest_qp_num); // if (ah->ddgrh.sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) // path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f; + if (sgid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) { + xsc_ib_err(dev, "gid type not ROCEv2\n"); + return -EINVAL; + } global_dscp = get_global_force_dscp(); if (global_dscp == GLOBAL_UNSET_FORCE_VALUE) @@ -893,29 +913,32 @@ static int xsc_set_path(struct xsc_ib_dev *dev, const struct rdma_ah_attr *ah, rdma_gid2ip(&dgid_addr._sockaddr, dgid); if (sgid_addr._sockaddr.sa_family == AF_INET && - dgid_addr._sockaddr.sa_family == AF_INET) { + dgid_addr._sockaddr.sa_family == AF_INET) { memcpy(path->sip, &sgid_addr._sockaddr_in.sin_addr.s_addr, - sizeof(struct in_addr)); + sizeof(struct in_addr)); memcpy(path->dip, &dgid_addr._sockaddr_in.sin_addr.s_addr, - sizeof(struct in_addr)); + sizeof(struct in_addr)); path->af_type = AF_INET; } else if (sgid_addr._sockaddr.sa_family == AF_INET6 && - dgid_addr._sockaddr.sa_family == AF_INET6) { + dgid_addr._sockaddr.sa_family == AF_INET6) { memcpy(path->sip, &sgid_addr._sockaddr_in6.sin6_addr.s6_addr, - sizeof(path->sip)); + sizeof(path->sip)); memcpy(path->dip, &dgid_addr._sockaddr_in6.sin6_addr.s6_addr, - sizeof(path->dip)); + sizeof(path->dip)); path->af_type = AF_INET6; } else { return -EINVAL; } ether_addr_copy(path->smac, sgid_attr->ndev->dev_addr); + ether_addr_copy(path->smac, dev->netdev->dev_addr); + memcpy(path->dmac, ah->roce.dmac, sizeof(ah->roce.dmac)); if (is_vlan_dev(sgid_attr->ndev)) { path->vlan_valid = 1; path->vlan_id = cpu_to_be16(vlan_dev_vlan_id(sgid_attr->ndev)); + global_pcp = get_global_force_pcp(); if (global_pcp == GLOBAL_UNSET_FORCE_VALUE) path->dci_cfi_prio_sl = (ah->sl & 0x7); @@ -930,8 +953,8 @@ static int xsc_set_path(struct xsc_ib_dev *dev, const struct rdma_ah_attr *ah, } static int __xsc_ib_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, int attr_mask, - enum ib_qp_state cur_state, enum ib_qp_state new_state) + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct xsc_ib_dev *dev = to_mdev(ibqp->device); struct xsc_ib_qp *qp = to_xqp(ibqp); @@ -964,8 +987,8 @@ static int __xsc_ib_modify_qp(struct ib_qp *ibqp, if (attr_mask & IB_QP_AV) { err = xsc_set_path(dev, &attr->ah_attr, &path, - attr_mask & IB_QP_PORT ? attr->port_num : qp->port, - attr_mask, 0, attr, qp); + attr_mask & IB_QP_PORT ? attr->port_num : qp->port, + attr_mask, 0, attr, qp); if (err) goto out; @@ -986,10 +1009,11 @@ static int __xsc_ib_modify_qp(struct ib_qp *ibqp, if (ldev && __xsc_lag_is_roce(ldev)) { context->lag_id = cpu_to_be16(ldev->lag_id); context->lag_sel_en = 1; - if ((attr->ah_attr.grh.flow_label & LAG_PORT_NUM_MASK) != 0) - context->lag_sel = ((attr->ah_attr.grh.flow_label - & LAG_PORT_NUM_MASK) >> 16) - % lag_port_num; + if ((attr->ah_attr.grh.flow_label & LAG_PORT_NUM_MASK_EN) != 0) + context->lag_sel = ((attr->ah_attr.grh.flow_label & + LAG_PORT_NUM_MASK) >> + LAG_PORT_NUM_OFFSET) % + lag_port_num; else context->lag_sel = qp->xqp.qpn % XSC_MAX_PORTS; } @@ -1015,8 +1039,8 @@ static int __xsc_ib_modify_qp(struct ib_qp *ibqp, memcpy(&in->ctx, context, sizeof(*context)); err = xsc_core_qp_modify(dev->xdev, to_xsc_state(cur_state), - to_xsc_state(new_state), in, sqd_event, - &qp->xqp); + to_xsc_state(new_state), in, sqd_event, + &qp->xqp); if (err) goto out; @@ -1031,7 +1055,8 @@ static int __xsc_ib_modify_qp(struct ib_qp *ibqp, if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; - /* If we moved a kernel QP to RESET, clean up all old CQ + /* + * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !ibqp->uobject) { @@ -1054,16 +1079,17 @@ static int __xsc_ib_modify_qp(struct ib_qp *ibqp, } int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_udata *udata) + int attr_mask, struct ib_udata *udata) { struct xsc_ib_dev *dev = to_mdev(ibqp->device); struct xsc_ib_qp *qp = to_xqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; -#ifdef XSC_CHIP_RDMA_UNSUPPORTED - xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); - return 0; -#endif + + if (!is_support_rdma(dev->xdev)) { + xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); + return 0; + } mutex_lock(&qp->mutex); @@ -1071,7 +1097,7 @@ int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; xsc_ib_dbg(dev, "cur_state:%u, new_state:%u attr_mask:0x%x\n", - cur_state, new_state, attr_mask); + cur_state, new_state, attr_mask); //if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) // goto out; @@ -1082,14 +1108,19 @@ int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; } + //if (attr_mask & IB_QP_PKEY_INDEX) { + // port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + // if (attr->pkey_index >= dev->xdev.caps.port[port - 1].pkey_table_len) + // goto out; + //} if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > dev->xdev->caps.max_ra_res_qp) { + attr->max_rd_atomic > dev->xdev->caps.max_ra_res_qp) { xsc_ib_err(dev, "rd atomic:%u exeeded", attr->max_rd_atomic); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > dev->xdev->caps.max_ra_req_qp) { + attr->max_dest_rd_atomic > dev->xdev->caps.max_ra_req_qp) { xsc_ib_err(dev, "dest rd atomic:%u exeeded", attr->max_dest_rd_atomic); goto out; } @@ -1119,6 +1150,7 @@ static int xsc_wq_overflow(struct xsc_ib_wq *wq, int nreq, struct xsc_ib_cq *cq) return cur + nreq >= wq->max_post; } + #ifdef XSC_DEBUG static void dump_wqe(struct xsc_ib_qp *qp, int idx) { @@ -1137,7 +1169,8 @@ static void dump_wqe(struct xsc_ib_qp *qp, int idx) #endif static inline void xsc_post_send_db(struct xsc_ib_qp *qp, - struct xsc_core_device *xdev, int nreq) + struct xsc_core_device *xdev, + int nreq) { u16 next_pid; union xsc_db_data db; @@ -1150,8 +1183,8 @@ static inline void xsc_post_send_db(struct xsc_ib_qp *qp, next_pid = qp->sq.head << (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT); db.sq_next_pid = next_pid; db.sqn = qp->doorbell_qpn; - - /* Make sure that descriptors are written before + /* + * Make sure that descriptors are written before * updating doorbell record and ringing the doorbell */ wmb(); @@ -1215,7 +1248,7 @@ u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) if (crc_field_len != size) { xsc_ib_err(dev, "Unmatched hdr: expect %d actual %d\n", - crc_field_len, size); + crc_field_len, size); return -EINVAL; } @@ -1263,13 +1296,13 @@ u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) */ // TO BE DONE: sq hdr buf should be create dynamically for mult entry int build_qp1_send_v2(struct xsc_ib_dev *dev, - struct xsc_ib_qp *qp, - const struct ib_send_wr *wr, - struct ib_sge *sge, - int payload_size, u32 *crc) + struct xsc_ib_qp *qp, + const struct ib_send_wr *wr, + struct ib_sge *sge, + int payload_size, u32 *crc) { - struct xsc_ib_ah *ah = container_of(ud_wr(wr)->ah, struct xsc_ib_ah, - ibah); + struct xsc_ib_ah *ah = container_of(ud_wr((struct ib_send_wr *)wr)->ah, struct xsc_ib_ah, + ibah); const struct ib_gid_attr *sgid_attr = ah->ibah.sgid_attr; u16 ether_type; union ib_gid dgid; @@ -1283,19 +1316,24 @@ int build_qp1_send_v2(struct xsc_ib_dev *dev, int cm_pcp = 0; void *hdr_buf; + memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); + if (!qp->sq.hdr_buf) { xsc_ib_err(dev, "QP1 buffer is empty!"); return -ENOMEM; } hdr_buf = (u8 *)qp->sq.hdr_buf + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index; - memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); + if (!sgid_attr || !sgid_attr->ndev) { + xsc_ib_err(dev, "sgid_addr or ndev is null\n"); + return -ENXIO; + } if (is_vlan_dev(sgid_attr->ndev)) vlan_id = vlan_dev_vlan_id(sgid_attr->ndev); - memcpy(&dgid.raw, &ah->av.rgid, 16);// TO DO SOMETHING is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; + memcpy(&dgid.raw, &ah->av.rgid, 16); if (is_udp) { if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { ip_version = 4; @@ -1329,7 +1367,7 @@ int build_qp1_send_v2(struct xsc_ib_dev *dev, else cm_pcp = (iboe_tos_to_sl(sgid_attr->ndev, ah->av.tclass) << 13); qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); - qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id|cm_pcp); + qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id | cm_pcp); } // if (is_grh || (ip_version == 6)) { @@ -1391,8 +1429,8 @@ int build_qp1_send_v2(struct xsc_ib_dev *dev, sge->length = MAX_QP1_SQ_HDR_SIZE; ib_ud_header_pack(&qp->qp1_hdr, hdr_buf); - - /* Max Header buf size for IPV6 RoCE V2 is 86, + /* + * Max Header buf size for IPV6 RoCE V2 is 86, * which is same as the QP1 SQ header buffer. * Header buf size for IPV4 RoCE V2 can be 66. * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). @@ -1400,8 +1438,8 @@ int build_qp1_send_v2(struct xsc_ib_dev *dev, */ if (is_udp && ip_version == 4) sge->length -= 20; - - /* Max Header buf size for RoCE V1 is 78. + /* + * Max Header buf size for RoCE V1 is 78. * ETH(14) + VLAN(4) + GRH(40) + BTH(20). * Subtract 8 bytes from QP1 SQ header buf size */ @@ -1415,21 +1453,36 @@ int build_qp1_send_v2(struct xsc_ib_dev *dev, rc = xsc_icrc_hdr(dev, hdr_buf, sge->length - sizeof(struct ib_unpacked_eth), crc); if (rc) { xsc_ib_err(dev, "CRC error: hdr size %ld\n", - sge->length - sizeof(struct ib_unpacked_eth)); + sge->length - sizeof(struct ib_unpacked_eth)); } return rc; } +static void zero_send_ds(struct xsc_ib_qp *qp, int idx) +{ + void *seg; + int i; + int ds_num; + u64 *p; + + ds_num = XSC_SEND_SEG_NUM << (qp->sq.wqe_shift - XSC_SEND_WQE_SHIFT); + seg = (void *)xsc_get_send_wqe(qp, idx); + for (i = 1; i < ds_num; i++) { + p = get_seg_wqe(seg, i); + p[0] = 0; + p[1] = 0; + } +} + int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, - const struct ib_send_wr **bad_wr) + const struct ib_send_wr **bad_wr) { struct xsc_ib_qp *qp = to_xqp(ibqp); struct xsc_ib_dev *dev = to_mdev(ibqp->device); void *seg; struct xsc_send_wqe_ctrl_seg *ctrl; struct xsc_wqe_data_seg *data_seg; - u32 crc; - + u32 crc; int nreq; int err = 0; int i; @@ -1440,10 +1493,11 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, u8 *mad_send_base = NULL; struct ib_wc wc; unsigned long qp_irqflag = 0; -#ifdef XSC_CHIP_RDMA_UNSUPPORTED - xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); - return 0; -#endif + + if (!is_support_rdma(dev->xdev)) { + xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); + return 0; + } if (wr->opcode == IB_WR_LOCAL_INV) { spin_lock_irqsave(&qp->lock, qp_irqflag); @@ -1476,8 +1530,7 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct ib_sge *sgl = &wr->sg_list[0]; int sg_n = wr->num_sge; - if (unlikely(wr->opcode < 0 - || wr->opcode >= ARRAY_SIZE(xsc_ib_opcode))) { + if (unlikely(wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(xsc_ib_opcode))) { xsc_ib_err(dev, "bad opcode %d\n", wr->opcode); err = EINVAL; *bad_wr = wr; @@ -1485,7 +1538,7 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, } if (unlikely(xsc_wq_overflow(&qp->sq, nreq, - to_xcq(qp->ibqp.send_cq)))) { + to_xcq(qp->ibqp.send_cq)))) { xsc_ib_err(dev, "send work queue overflow\n"); err = ENOMEM; *bad_wr = wr; @@ -1494,22 +1547,24 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, if (unlikely(wr->num_sge > qp->sq.max_gs)) { xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", - wr->num_sge, qp->sq.max_gs); + wr->num_sge, qp->sq.max_gs); err = ENOMEM; *bad_wr = wr; goto out; } - if (unlikely((wr->opcode == IB_WR_RDMA_READ) && (wr->num_sge > 1))) { + if (unlikely(wr->opcode == IB_WR_RDMA_READ && wr->num_sge > 1)) { xsc_ib_err(dev, "rdma read, max gs exceeded %d (max = 1)\n", - wr->num_sge); + wr->num_sge); err = ENOMEM; *bad_wr = wr; goto out; } idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); - ctrl = seg = xsc_get_send_wqe(qp, idx); + zero_send_ds(qp, idx); + seg = xsc_get_send_wqe(qp, idx); + ctrl = seg; ctrl->wqe_id = cpu_to_le16(qp->sq.cur_post << (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT)); ctrl->ds_data_num = 0; @@ -1540,11 +1595,10 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ctrl->with_immdt = 0; ctrl->ds_data_num++; data_seg = get_seg_wqe(ctrl, seg_index); - set_remote_addr_seg( - data_seg, - msg_len, - rdma_wr(wr)->remote_addr, - rdma_wr(wr)->rkey); + set_remote_addr_seg(data_seg, + msg_len, + rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); seg_index++; break; case IB_WR_REG_MR: @@ -1559,7 +1613,7 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; case IB_QPT_UD: case IB_QPT_GSI: - xsc_ib_err(dev, "debug: %d UD/GSI hit\n", wr->opcode); + xsc_ib_dbg(dev, "send MAD packet\n"); ctrl->msg_opcode = XSC_MSG_OPCODE_MAD; ctrl->ds_data_num++; data_seg = get_seg_wqe(ctrl, seg_index); @@ -1571,9 +1625,10 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, cur_p = mad_send_base + sg.length; for (i = 0; i < wr->num_sge; ++i) { if (likely(wr->sg_list[i].length)) - memcpy(cur_p, phys_to_virt( - dma_to_phys(dev->ib_dev.dma_device, - wr->sg_list[i].addr)), wr->sg_list[i].length); + memcpy(cur_p, + phys_to_virt(dma_to_phys(dev->ib_dev.dma_device, + wr->sg_list[i].addr)), + wr->sg_list[i].length); cur_p += wr->sg_list[i].length; } crc = xsc_crc32(dev, crc, mad_send_base + sg.length, ctrl->msg_len); @@ -1632,7 +1687,7 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, } int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, - const struct ib_recv_wr **bad_wr) + const struct ib_recv_wr **bad_wr) { struct xsc_ib_qp *qp = to_xqp(ibqp); struct xsc_ib_dev *dev = to_mdev(ibqp->device); @@ -1646,18 +1701,18 @@ int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int nreq; u16 idx; int i; -#ifdef XSC_CHIP_RDMA_UNSUPPORTED - xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); - return 0; -#endif + + if (!is_support_rdma(xdev)) { + xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); + return 0; + } spin_lock_irqsave(&qp->rq.lock, flags); idx = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (unlikely(xsc_wq_overflow(&qp->rq, nreq, - to_xcq(qp->ibqp.recv_cq)))) { + if (unlikely(xsc_wq_overflow(&qp->rq, nreq, to_xcq(qp->ibqp.recv_cq)))) { xsc_ib_err(dev, "recv work queue overflow\n"); err = ENOMEM; *bad_wr = wr; @@ -1666,7 +1721,7 @@ int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, if (unlikely(wr->num_sge > qp->rq.max_gs)) { xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", - wr->num_sge, qp->rq.max_gs); + wr->num_sge, qp->rq.max_gs); err = EINVAL; *bad_wr = wr; goto out; @@ -1699,7 +1754,8 @@ int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, db.rq_next_pid = next_pid; db.rqn = qp->doorbell_qpn; - /* Make sure that descriptors are written before + /* + * Make sure that descriptors are written before * doorbell record. */ wmb(); @@ -1738,7 +1794,7 @@ static inline enum ib_mig_state to_ib_mig_state(int xsc_mig_state) } int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) + struct ib_qp_init_attr *qp_init_attr) { struct xsc_ib_dev *dev = to_mdev(ibqp->device); struct xsc_ib_qp *qp = to_xqp(ibqp); @@ -1746,10 +1802,12 @@ int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_ struct xsc_qp_context *context; int xsc_state; int err = 0; -#ifdef XSC_CHIP_RDMA_UNSUPPORTED - xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); - return 0; -#endif + + if (!is_support_rdma(dev->xdev)) { + xsc_ib_dbg(dev, "rdma unsupported,%s no action.\n", __func__); + return 0; + } + mutex_lock(&qp->mutex); outb = kzalloc(sizeof(*outb), GFP_KERNEL); if (!outb) { diff --git a/drivers/infiniband/hw/xsc/rtt.c b/drivers/infiniband/hw/xsc/rtt.c index 3c6e461e7249acabffa07bd289c127c6e7ef12c9..3782aa2affb517ad7592cc97277f0098af7f18d9 100644 --- a/drivers/infiniband/hw/xsc/rtt.c +++ b/drivers/infiniband/hw/xsc/rtt.c @@ -6,10 +6,10 @@ #include #include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" struct xsc_rtt_interface { struct xsc_core_device *xdev; @@ -18,14 +18,14 @@ struct xsc_rtt_interface { struct xsc_rtt_attributes { struct attribute attr; - ssize_t (*show)(struct xsc_rtt_interface *interface, struct xsc_rtt_attributes *attr, + ssize_t (*show)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, char *buf); - ssize_t (*store)(struct xsc_rtt_interface *interface, struct xsc_rtt_attributes *attr, + ssize_t (*store)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, const char *buf, size_t count); }; static ssize_t enable_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - char *buf) + char *buf) { int err; struct xsc_inbox_hdr in; @@ -36,10 +36,10 @@ static ssize_t enable_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_EN); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), - (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt en, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -47,7 +47,7 @@ static ssize_t enable_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute } static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - const char *buf, size_t count) + const char *buf, size_t count) { int err; u16 rtt_enable; @@ -60,7 +60,7 @@ static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attribut if (rtt_enable > 1) { xsc_core_err(g->xdev, "Failed to set rtt en, rtt_enable(%hu) out of range[0,1]\n", - rtt_enable); + rtt_enable); return -EINVAL; } @@ -71,10 +71,10 @@ static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attribut in.en = rtt_enable; err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), - (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to set rtt en, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -82,7 +82,7 @@ static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attribut } static ssize_t qpn_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - char *buf) + char *buf) { int err, i; u32 count = 0; @@ -94,10 +94,10 @@ static ssize_t qpn_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes * in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_QPN); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), - (void *)&out, sizeof(struct xsc_get_rtt_qpn_mbox_out)); + (void *)&out, sizeof(struct xsc_get_rtt_qpn_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt qpn, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -110,10 +110,10 @@ static ssize_t qpn_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes * } #define RTT_CFG_QPN_FORMAT "%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu," \ - "%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu" +"%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu" static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - const char *buf, size_t count) + const char *buf, size_t count) { int err, i; struct xsc_rtt_qpn_mbox_in in; @@ -124,10 +124,10 @@ static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes memset(&out, 0, sizeof(out)); err = sscanf(buf, RTT_CFG_QPN_FORMAT, &ptr[0], &ptr[1], &ptr[2], &ptr[3], &ptr[4], - &ptr[5], &ptr[6], &ptr[7], &ptr[8], &ptr[9], &ptr[10], &ptr[11], &ptr[12], - &ptr[13], &ptr[14], &ptr[15], &ptr[16], &ptr[17], &ptr[18], &ptr[19], - &ptr[20], &ptr[21], &ptr[22], &ptr[23], &ptr[24], &ptr[25], &ptr[26], - &ptr[27], &ptr[28], &ptr[29], &ptr[30], &ptr[31]); + &ptr[5], &ptr[6], &ptr[7], &ptr[8], &ptr[9], &ptr[10], &ptr[11], &ptr[12], + &ptr[13], &ptr[14], &ptr[15], &ptr[16], &ptr[17], &ptr[18], &ptr[19], + &ptr[20], &ptr[21], &ptr[22], &ptr[23], &ptr[24], &ptr[25], &ptr[26], + &ptr[27], &ptr[28], &ptr[29], &ptr[30], &ptr[31]); if (err != XSC_RTT_CFG_QPN_MAX) return -EINVAL; @@ -137,10 +137,10 @@ static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes in.qpn[i] = __cpu_to_be16(ptr[i]); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_qpn_mbox_in), - (void *)&out, sizeof(struct xsc_rtt_qpn_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_qpn_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to set rtt qpn, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -148,7 +148,7 @@ static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes } static ssize_t period_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - char *buf) + char *buf) { int err; struct xsc_inbox_hdr in; @@ -159,10 +159,10 @@ static ssize_t period_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_PERIOD); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), - (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt period, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -171,7 +171,7 @@ static ssize_t period_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute #define RTT_CFG_PERIOD_MAX 10000 //ms, 10s static ssize_t period_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - const char *buf, size_t count) + const char *buf, size_t count) { int err; u32 rtt_period; @@ -192,10 +192,10 @@ static ssize_t period_store(struct xsc_rtt_interface *g, struct xsc_rtt_attribut in.period = __cpu_to_be32(rtt_period); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_period_mbox_in), - (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to set rtt period, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -203,7 +203,7 @@ static ssize_t period_store(struct xsc_rtt_interface *g, struct xsc_rtt_attribut } static ssize_t result_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - char *buf) + char *buf) { int i, err; u32 count = 0; @@ -216,10 +216,10 @@ static ssize_t result_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_RESULT); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), - (void *)&out, sizeof(struct xsc_rtt_result_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_result_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt result, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -232,13 +232,13 @@ static ssize_t result_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute } static ssize_t result_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - const char *buf, size_t count) + const char *buf, size_t count) { return -EOPNOTSUPP; } static ssize_t stats_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - char *buf) + char *buf) { int err; u32 count = 0; @@ -251,10 +251,10 @@ static ssize_t stats_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_STATS); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), - (void *)&out, sizeof(struct xsc_rtt_stats_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_stats_mbox_out)); if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt stats, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -279,7 +279,7 @@ static ssize_t stats_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes } static ssize_t stats_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, - const char *buf, size_t count) + const char *buf, size_t count) { return -EOPNOTSUPP; } @@ -294,7 +294,7 @@ RTT_ATTR(result); RTT_ATTR(stats); static ssize_t rtt_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) + struct attribute *attr, char *buf) { struct xsc_rtt_attributes *ga = container_of(attr, struct xsc_rtt_attributes, attr); @@ -307,8 +307,8 @@ static ssize_t rtt_attr_show(struct kobject *kobj, } static ssize_t rtt_attr_store(struct kobject *kobj, - struct attribute *attr, - const char *buf, size_t size) + struct attribute *attr, + const char *buf, size_t size) { struct xsc_rtt_attributes *ga = container_of(attr, struct xsc_rtt_attributes, attr); @@ -347,12 +347,12 @@ int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) if (!xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) return -EACCES; - tmp = kzalloc(sizeof(struct xsc_rtt_interface), GFP_KERNEL); + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; err = kobject_init_and_add(&tmp->kobj, &rtt_ktype, - &(ib_dev->dev.kobj), "rtt"); + &ib_dev->dev.kobj, "rtt"); if (err) goto rtt_attr_err; @@ -385,10 +385,10 @@ void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev) in.en = 0; err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), - (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); if (err || out.hdr.status) xsc_core_err(xdev, "Failed to set rtt disable, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); memset(&period_in, 0, sizeof(period_in)); memset(&period_out, 0, sizeof(period_out)); @@ -397,10 +397,10 @@ void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev) period_in.period = __cpu_to_be32(RTT_CFG_PERIOD_MAX); err = xsc_cmd_exec(xdev, (void *)&period_in, sizeof(struct xsc_rtt_period_mbox_in), - (void *)&period_out, sizeof(struct xsc_rtt_period_mbox_out)); + (void *)&period_out, sizeof(struct xsc_rtt_period_mbox_out)); if (err || period_out.hdr.status) xsc_core_err(xdev, "Failed to set rtt period default, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); rtt = xdev->rtt_priv; kobject_put(&rtt->kobj); diff --git a/drivers/infiniband/hw/xsc/user.h b/drivers/infiniband/hw/xsc/user.h index cccc5de39d1e26d2bbf987895fbacfbc5d6b4f84..6e2b6ff542ae8de163a5190f651db3d1d4754450 100644 --- a/drivers/infiniband/hw/xsc/user.h +++ b/drivers/infiniband/hw/xsc/user.h @@ -52,6 +52,7 @@ enum { XSC_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, XSC_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, XSC_QP_FLAG_RAWPACKET_TSO = 1 << 9, + XSC_QP_FLAG_RAWPACKET_TX = 1 << 10, }; struct xsc_ib_alloc_ucontext_req { @@ -132,7 +133,28 @@ enum xsc_rx_hash_function_flags { XSC_RX_HASH_FUNC_TOEPLITZ = 1 << 0, }; -/* RX Hash flags, these flags allows to set which incoming packet's field should +enum xsc_rdma_link_speed { + XSC_RDMA_LINK_SPEED_2_5GB = 1 << 0, + XSC_RDMA_LINK_SPEED_5GB = 1 << 1, + XSC_RDMA_LINK_SPEED_10GB = 1 << 3, + XSC_RDMA_LINK_SPEED_14GB = 1 << 4, + XSC_RDMA_LINK_SPEED_25GB = 1 << 5, + XSC_RDMA_LINK_SPEED_50GB = 1 << 6, + XSC_RDMA_LINK_SPEED_100GB = 1 << 7, +}; + +enum xsc_rdma_phys_state { + XSC_RDMA_PHY_STATE_SLEEP = 1, + XSC_RDMA_PHY_STATE_POLLING, + XSC_RDMA_PHY_STATE_DISABLED, + XSC_RDMA_PHY_STATE_PORT_CONFIGURATION_TRAINNING, + XSC_RDMA_PHY_STATE_LINK_UP, + XSC_RDMA_PHY_STATE_LINK_ERROR_RECOVERY, + XSC_RDMA_PHY_STATE_PHY_TEST, +}; + +/* + * RX Hash flags, these flags allows to set which incoming packet's field should * participates in RX Hash. Each flag represent certain packet's field, * when the flag is set the field that is represented by the flag will * participate in RX Hash calculation. diff --git a/drivers/infiniband/hw/xsc/xsc_ib.h b/drivers/infiniband/hw/xsc/xsc_ib.h index 121f2ad682e864b65f9d422d251efd8f85b1d14e..6971fc54c3b08a5e7e409a9ecfa977a4e3af58da 100644 --- a/drivers/infiniband/hw/xsc/xsc_ib.h +++ b/drivers/infiniband/hw/xsc/xsc_ib.h @@ -12,42 +12,35 @@ #include #include #include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" #include #include #include "xsc_ib_compat.h" -#define DISABLE_XSC_IB_DBG - -#ifdef DISABLE_XSC_IB_DBG -#define xsc_ib_dbg(dev, format, arg...) \ -pr_debug("%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ - __LINE__, current->pid, ##arg) - -#define xsc_ib_err(dev, format, arg...) \ -pr_err("%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ - __LINE__, current->pid, ##arg) - -#define xsc_ib_warn(dev, format, arg...) \ -pr_warn("%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ - __LINE__, current->pid, ##arg) -#else -#define xsc_ib_dbg(dev, format, arg...) \ -pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ - __LINE__, current->pid, ##arg) - -#define xsc_ib_err(dev, format, arg...) \ -pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ - __LINE__, current->pid, ##arg) - -#define xsc_ib_warn(dev, format, arg...) \ -pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ - __LINE__, current->pid, ##arg) -#endif +#define xsc_ib_dbg(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_err(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_ERR) \ + pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_warn(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_WARN) \ + pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) struct xsc_ib_ucontext { struct ib_ucontext ibucontext; @@ -101,8 +94,8 @@ struct xsc_ib_wq { u64 *wrid; u32 *wr_data; struct wr_list *w_list; - unsigned int *wqe_head; - u16 unsig_count; + unsigned long *wqe_head; + u16 unsig_count; /* serialize post to the work queue */ @@ -173,6 +166,7 @@ struct xsc_ib_qp { struct xsc_qp_context ctx; struct ib_cq *send_cq; struct ib_cq *recv_cq; + /* For qp resources */ spinlock_t lock; }; @@ -305,7 +299,7 @@ struct xsc_ib_dev { /* serialize update of capability mask */ struct mutex cap_mask_mutex; - bool ib_active; + u8 ib_active; /* sync used page count stats */ spinlock_t mr_lock; @@ -327,6 +321,13 @@ union xsc_ib_fw_ver { } s; }; +struct xsc_pa_chunk { + struct list_head list; + u64 va; + dma_addr_t pa; + size_t length; +}; + static inline struct xsc_ib_cq *to_xibcq(struct xsc_core_cq *xcq) { return container_of(xcq, struct xsc_ib_cq, xcq); @@ -379,53 +380,49 @@ static inline struct xsc_ib_ah *to_mah(struct ib_ah *ibah) static inline struct xsc_ib_dev *xdev2ibdev(struct xsc_core_device *xdev) { - return container_of(xdev, struct xsc_ib_dev, xdev); + return container_of((void *)xdev, struct xsc_ib_dev, xdev); } int xsc_ib_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props); + struct ib_port_attr *props); struct ib_qp *xsc_ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata); - + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); void xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); -int xsc_MAD_IFC(struct xsc_ib_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad); int xsc_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_udata *udata); + int attr_mask, struct ib_udata *udata); int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr); + struct ib_qp_init_attr *qp_init_attr); + int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, - const struct ib_send_wr **bad_wr); + const struct ib_send_wr **bad_wr); int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, - const struct ib_recv_wr **bad_wr); + const struct ib_recv_wr **bad_wr); + void *xsc_get_send_wqe(struct xsc_ib_qp *qp, int n); int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, - u64 virt_addr, int access_flags, - struct ib_udata *udata); -int xsc_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad); + u64 virt_addr, int access_flags, + struct ib_udata *udata); int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, - int *ncont, int *order); + int *ncont, int *order); void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, - int page_shift, __be64 *pas, int npages, bool need_to_devide); -int xsc_mr_cache_init(struct xsc_ib_dev *dev); + int page_shift, __be64 *pas, int npages, bool need_to_devide); const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void); int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, - int sg_nents, unsigned int *sg_offset); + int sg_nents, unsigned int *sg_offset); int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); int xsc_wr_invalidate_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); +int xsc_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, + unsigned long addr, int *npage, int *shift, u64 **pas); static inline void init_query_mad(struct ib_smp *mad) { diff --git a/drivers/infiniband/hw/xsc/xsc_ib_compat.h b/drivers/infiniband/hw/xsc/xsc_ib_compat.h index 9c67be098f700eb16be0c9e2750c453206087e31..04c71442f416c9061639eda003599906cac55375 100644 --- a/drivers/infiniband/hw/xsc/xsc_ib_compat.h +++ b/drivers/infiniband/hw/xsc/xsc_ib_compat.h @@ -7,13 +7,14 @@ #ifndef XSC_IB_COMPAT_H #define XSC_IB_COMPAT_H -/* adaptive to different ib_core versions +/* + * adaptive to different ib_core versions */ struct xsc_ib_ucontext; int xsc_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *ah_attr, - struct ib_udata *udata); + struct ib_udata *udata); int xsc_ib_destroy_ah(struct ib_ah *ibah, u32 destroy_flags); #define xsc_ib_create_ah_def() int xsc_ib_create_ah(\ struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) @@ -21,19 +22,17 @@ int xsc_ib_destroy_ah(struct ib_ah *ibah, u32 destroy_flags); int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); int xsc_ib_create_cq(struct ib_cq *ibcq, - const struct ib_cq_init_attr *attr, - struct ib_udata *udata); + const struct ib_cq_init_attr *attr, + struct ib_udata *udata); int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); // from main.c static functions int xsc_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); - int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); -struct ib_mr *xsc_ib_alloc_mr( - struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); +struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); #define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) @@ -46,9 +45,7 @@ struct ib_mr *xsc_ib_alloc_mr( struct ib_ucontext *uctx, struct ib_udata *udata) #define xsc_ib_dealloc_ucontext_def() void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) #define xsc_ib_alloc_pd_def() int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) - #define xsc_ib_dealloc_pd_def() int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) - #define RET_VALUE(x) (x) #endif diff --git a/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..b5052fe9f7e312c6a69b7622071e995c50b0d632 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" + +int xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + int err = 0; + + return err; +} + +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ +} + diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c index 7c49a038554c3005e39dcfaa10b2b30f3b00f8f1..2a504b796b1d8b230cce673c22211d3a4817c619 100644 --- a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c @@ -8,10 +8,10 @@ #include #include #include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" #include "global.h" #include "xsc_ib.h" @@ -19,7 +19,7 @@ static void encode_cc_cmd_enable_rp(void *data, u32 mac_port) { - struct xsc_cc_cmd_enable_rp *cc_cmd = (struct xsc_cc_cmd_enable_rp *) data; + struct xsc_cc_cmd_enable_rp *cc_cmd = (struct xsc_cc_cmd_enable_rp *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -29,7 +29,7 @@ static void encode_cc_cmd_enable_rp(void *data, u32 mac_port) static void encode_cc_cmd_enable_np(void *data, u32 mac_port) { - struct xsc_cc_cmd_enable_np *cc_cmd = (struct xsc_cc_cmd_enable_np *) data; + struct xsc_cc_cmd_enable_np *cc_cmd = (struct xsc_cc_cmd_enable_np *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -39,7 +39,7 @@ static void encode_cc_cmd_enable_np(void *data, u32 mac_port) static void encode_cc_cmd_init_alpha(void *data, u32 mac_port) { - struct xsc_cc_cmd_init_alpha *cc_cmd = (struct xsc_cc_cmd_init_alpha *) data; + struct xsc_cc_cmd_init_alpha *cc_cmd = (struct xsc_cc_cmd_init_alpha *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -49,7 +49,7 @@ static void encode_cc_cmd_init_alpha(void *data, u32 mac_port) static void encode_cc_cmd_g(void *data, u32 mac_port) { - struct xsc_cc_cmd_g *cc_cmd = (struct xsc_cc_cmd_g *) data; + struct xsc_cc_cmd_g *cc_cmd = (struct xsc_cc_cmd_g *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -59,7 +59,7 @@ static void encode_cc_cmd_g(void *data, u32 mac_port) static void encode_cc_cmd_ai(void *data, u32 mac_port) { - struct xsc_cc_cmd_ai *cc_cmd = (struct xsc_cc_cmd_ai *) data; + struct xsc_cc_cmd_ai *cc_cmd = (struct xsc_cc_cmd_ai *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -69,7 +69,7 @@ static void encode_cc_cmd_ai(void *data, u32 mac_port) static void encode_cc_cmd_hai(void *data, u32 mac_port) { - struct xsc_cc_cmd_hai *cc_cmd = (struct xsc_cc_cmd_hai *) data; + struct xsc_cc_cmd_hai *cc_cmd = (struct xsc_cc_cmd_hai *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -79,7 +79,7 @@ static void encode_cc_cmd_hai(void *data, u32 mac_port) static void encode_cc_cmd_th(void *data, u32 mac_port) { - struct xsc_cc_cmd_th *cc_cmd = (struct xsc_cc_cmd_th *) data; + struct xsc_cc_cmd_th *cc_cmd = (struct xsc_cc_cmd_th *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -89,7 +89,7 @@ static void encode_cc_cmd_th(void *data, u32 mac_port) static void encode_cc_cmd_bc(void *data, u32 mac_port) { - struct xsc_cc_cmd_bc *cc_cmd = (struct xsc_cc_cmd_bc *) data; + struct xsc_cc_cmd_bc *cc_cmd = (struct xsc_cc_cmd_bc *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -99,14 +99,14 @@ static void encode_cc_cmd_bc(void *data, u32 mac_port) static void encode_cc_cmd_cnp_opcode(void *data, u32 mac_port) { - struct xsc_cc_cmd_cnp_opcode *cc_cmd = (struct xsc_cc_cmd_cnp_opcode *) data; + struct xsc_cc_cmd_cnp_opcode *cc_cmd = (struct xsc_cc_cmd_cnp_opcode *)data; cc_cmd->opcode = __cpu_to_be32(cc_cmd->opcode); } static void encode_cc_cmd_cnp_bth_b(void *data, u32 mac_port) { - struct xsc_cc_cmd_cnp_bth_b *cc_cmd = (struct xsc_cc_cmd_cnp_bth_b *) data; + struct xsc_cc_cmd_cnp_bth_b *cc_cmd = (struct xsc_cc_cmd_cnp_bth_b *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -115,7 +115,7 @@ static void encode_cc_cmd_cnp_bth_b(void *data, u32 mac_port) static void encode_cc_cmd_cnp_bth_f(void *data, u32 mac_port) { - struct xsc_cc_cmd_cnp_bth_f *cc_cmd = (struct xsc_cc_cmd_cnp_bth_f *) data; + struct xsc_cc_cmd_cnp_bth_f *cc_cmd = (struct xsc_cc_cmd_cnp_bth_f *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -124,14 +124,14 @@ static void encode_cc_cmd_cnp_bth_f(void *data, u32 mac_port) static void encode_cc_cmd_cnp_ecn(void *data, u32 mac_port) { - struct xsc_cc_cmd_cnp_ecn *cc_cmd = (struct xsc_cc_cmd_cnp_ecn *) data; + struct xsc_cc_cmd_cnp_ecn *cc_cmd = (struct xsc_cc_cmd_cnp_ecn *)data; cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); } static void encode_cc_cmd_data_ecn(void *data, u32 mac_port) { - struct xsc_cc_cmd_data_ecn *cc_cmd = (struct xsc_cc_cmd_data_ecn *) data; + struct xsc_cc_cmd_data_ecn *cc_cmd = (struct xsc_cc_cmd_data_ecn *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -140,7 +140,7 @@ static void encode_cc_cmd_data_ecn(void *data, u32 mac_port) static void encode_cc_cmd_cnp_tx_interval(void *data, u32 mac_port) { - struct xsc_cc_cmd_cnp_tx_interval *cc_cmd = (struct xsc_cc_cmd_cnp_tx_interval *) data; + struct xsc_cc_cmd_cnp_tx_interval *cc_cmd = (struct xsc_cc_cmd_cnp_tx_interval *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -151,7 +151,7 @@ static void encode_cc_cmd_cnp_tx_interval(void *data, u32 mac_port) static void encode_cc_cmd_evt_rsttime(void *data, u32 mac_port) { struct xsc_cc_cmd_evt_rsttime *cc_cmd = - (struct xsc_cc_cmd_evt_rsttime *) data; + (struct xsc_cc_cmd_evt_rsttime *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -160,7 +160,7 @@ static void encode_cc_cmd_evt_rsttime(void *data, u32 mac_port) static void encode_cc_cmd_cnp_dscp(void *data, u32 mac_port) { - struct xsc_cc_cmd_cnp_dscp *cc_cmd = (struct xsc_cc_cmd_cnp_dscp *) data; + struct xsc_cc_cmd_cnp_dscp *cc_cmd = (struct xsc_cc_cmd_cnp_dscp *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -170,7 +170,7 @@ static void encode_cc_cmd_cnp_dscp(void *data, u32 mac_port) static void encode_cc_cmd_cnp_pcp(void *data, u32 mac_port) { - struct xsc_cc_cmd_cnp_pcp *cc_cmd = (struct xsc_cc_cmd_cnp_pcp *) data; + struct xsc_cc_cmd_cnp_pcp *cc_cmd = (struct xsc_cc_cmd_cnp_pcp *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -180,7 +180,7 @@ static void encode_cc_cmd_cnp_pcp(void *data, u32 mac_port) static void encode_cc_cmd_evt_period_alpha(void *data, u32 mac_port) { - struct xsc_cc_cmd_evt_period_alpha *cc_cmd = (struct xsc_cc_cmd_evt_period_alpha *) data; + struct xsc_cc_cmd_evt_period_alpha *cc_cmd = (struct xsc_cc_cmd_evt_period_alpha *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -189,7 +189,7 @@ static void encode_cc_cmd_evt_period_alpha(void *data, u32 mac_port) static void encode_cc_cmd_clamp_tgt_rate(void *data, u32 mac_port) { - struct xsc_cc_cmd_clamp_tgt_rate *cc_cmd = (struct xsc_cc_cmd_clamp_tgt_rate *) data; + struct xsc_cc_cmd_clamp_tgt_rate *cc_cmd = (struct xsc_cc_cmd_clamp_tgt_rate *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -197,9 +197,19 @@ static void encode_cc_cmd_clamp_tgt_rate(void *data, u32 mac_port) cc_cmd->section = __cpu_to_be32(mac_port); } +static void encode_cc_cmd_max_hai_factor(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_max_hai_factor *cc_cmd = (struct xsc_cc_cmd_max_hai_factor *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->max_hai_factor = __cpu_to_be32(cc_cmd->max_hai_factor); + cc_cmd->section = __cpu_to_be32(mac_port); +} + static void encode_cc_get_cfg(void *data, u32 mac_port) { - struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *) data; + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -208,7 +218,7 @@ static void encode_cc_get_cfg(void *data, u32 mac_port) static void decode_cc_get_cfg(void *data) { - struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *) data; + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; cc_cmd->cmd = __be16_to_cpu(cc_cmd->cmd); cc_cmd->len = __be16_to_cpu(cc_cmd->len); @@ -231,12 +241,13 @@ static void decode_cc_get_cfg(void *data) cc_cmd->cnp_pcp = __be32_to_cpu(cc_cmd->cnp_pcp); cc_cmd->evt_period_alpha = __be32_to_cpu(cc_cmd->evt_period_alpha); cc_cmd->clamp_tgt_rate = __be32_to_cpu(cc_cmd->clamp_tgt_rate); + cc_cmd->max_hai_factor = __be32_to_cpu(cc_cmd->max_hai_factor); cc_cmd->section = __be32_to_cpu(cc_cmd->section); } static void encode_cc_get_stat(void *data, u32 mac_port) { - struct xsc_cc_cmd_get_stat *cc_cmd = (struct xsc_cc_cmd_get_stat *) data; + struct xsc_cc_cmd_get_stat *cc_cmd = (struct xsc_cc_cmd_get_stat *)data; cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); cc_cmd->len = __cpu_to_be16(cc_cmd->len); @@ -245,7 +256,7 @@ static void encode_cc_get_stat(void *data, u32 mac_port) static void decode_cc_get_stat(void *data) { - struct xsc_cc_cmd_stat *cc_cmd = (struct xsc_cc_cmd_stat *) data; + struct xsc_cc_cmd_stat *cc_cmd = (struct xsc_cc_cmd_stat *)data; cc_cmd->cnp_handled = __be32_to_cpu(cc_cmd->cnp_handled); cc_cmd->alpha_recovery = __be32_to_cpu(cc_cmd->alpha_recovery); @@ -292,7 +303,7 @@ static int xsc_priv_dev_ioctl_get_cma_pcp(struct xsc_core_device *xdev, void *in struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; struct xsc_ioctl_cma_pcp *resp = (struct xsc_ioctl_cma_pcp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) return -EOPNOTSUPP; resp->pcp = ib_dev->cm_pcp; @@ -304,7 +315,7 @@ static int xsc_priv_dev_ioctl_get_cma_dscp(struct xsc_core_device *xdev, void *i struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; struct xsc_ioctl_cma_dscp *resp = (struct xsc_ioctl_cma_dscp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) return -EOPNOTSUPP; resp->dscp = ib_dev->cm_dscp; @@ -316,10 +327,10 @@ static int xsc_priv_dev_ioctl_set_cma_pcp(struct xsc_core_device *xdev, void *in struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; struct xsc_ioctl_cma_pcp *req = (struct xsc_ioctl_cma_pcp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) return -EOPNOTSUPP; - if (req->pcp < 0 || req->pcp > QOS_PCP_MAX) + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) return -EINVAL; ib_dev->cm_pcp = req->pcp; @@ -331,10 +342,10 @@ static int xsc_priv_dev_ioctl_set_cma_dscp(struct xsc_core_device *xdev, void *i struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; struct xsc_ioctl_cma_dscp *req = (struct xsc_ioctl_cma_dscp *)out; - if (!XSC_IS_PF(xdev->glb_func_id)) + if (!check_is_pf(&xdev->caps, xdev->glb_func_id)) return -EOPNOTSUPP; - if (req->dscp < 0 || req->dscp > QOS_DSCP_MAX) + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) return -EINVAL; ib_dev->cm_dscp = req->dscp; @@ -342,8 +353,9 @@ static int xsc_priv_dev_ioctl_set_cma_dscp(struct xsc_core_device *xdev, void *i } static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, u16 expect_req_size, - u16 expect_resp_size, void (*encode)(void *, u32), void (*decode)(void *)) + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, u16 expect_resp_size, + void (*encode)(void *, u32), void (*decode)(void *)) { struct xsc_cc_mbox_in *in; struct xsc_cc_mbox_out *out; @@ -369,8 +381,8 @@ static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, if (encode) encode((void *)in->data, xdev->mac_port); - err = xsc_cmd_exec( - xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); hdr->attr.error = __be32_to_cpu(out->hdr.status); if (decode) @@ -394,7 +406,7 @@ static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, } int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, - int out_size) + int out_size) { int opcode, ret = 0; struct xsc_ioctl_attr *hdr; @@ -436,7 +448,7 @@ int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, v } static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr) + struct xsc_ioctl_hdr __user *user_hdr) { struct xsc_ioctl_hdr hdr; struct xsc_ioctl_hdr *in; @@ -473,8 +485,8 @@ static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, return -EFAULT; } - err = _rdma_ctrl_exec_ioctl(xdev, &in->attr, (in_size-sizeof(u32)), in->attr.data, - hdr.attr.length); + err = _rdma_ctrl_exec_ioctl(xdev, &in->attr, (in_size - sizeof(u32)), in->attr.data, + hdr.attr.length); in->attr.error = err; if (copy_to_user(user_hdr, in, in_size)) err = -EFAULT; @@ -483,7 +495,7 @@ static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, } static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr) + struct xsc_ioctl_hdr __user *user_hdr) { struct xsc_ioctl_hdr hdr; int err; @@ -501,89 +513,92 @@ static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, /* check ioctl cmd */ switch (hdr.attr.opcode) { case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_enable_rp), - 0, encode_cc_cmd_enable_rp, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_rp), + 0, encode_cc_cmd_enable_rp, NULL); case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_enable_np), - 0, encode_cc_cmd_enable_np, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_np), + 0, encode_cc_cmd_enable_np, NULL); case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_init_alpha), - 0, encode_cc_cmd_init_alpha, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_init_alpha), + 0, encode_cc_cmd_init_alpha, NULL); case XSC_CMD_OP_IOCTL_SET_G: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_g), - 0, encode_cc_cmd_g, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_g), + 0, encode_cc_cmd_g, NULL); case XSC_CMD_OP_IOCTL_SET_AI: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_ai), - 0, encode_cc_cmd_ai, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_ai), + 0, encode_cc_cmd_ai, NULL); case XSC_CMD_OP_IOCTL_SET_HAI: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_hai), - 0, encode_cc_cmd_hai, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_hai), + 0, encode_cc_cmd_hai, NULL); case XSC_CMD_OP_IOCTL_SET_TH: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_th), - 0, encode_cc_cmd_th, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_th), + 0, encode_cc_cmd_th, NULL); case XSC_CMD_OP_IOCTL_SET_BC_TH: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_bc), - 0, encode_cc_cmd_bc, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_bc), + 0, encode_cc_cmd_bc, NULL); case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_opcode), - 0, encode_cc_cmd_cnp_opcode, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_opcode), + 0, encode_cc_cmd_cnp_opcode, NULL); case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_bth_b), - 0, encode_cc_cmd_cnp_bth_b, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_b), + 0, encode_cc_cmd_cnp_bth_b, NULL); case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_bth_f), - 0, encode_cc_cmd_cnp_bth_f, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_f), + 0, encode_cc_cmd_cnp_bth_f, NULL); case XSC_CMD_OP_IOCTL_SET_CNP_ECN: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_ecn), + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_ecn), 0, encode_cc_cmd_cnp_ecn, NULL); case XSC_CMD_OP_IOCTL_SET_DATA_ECN: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_data_ecn), - 0, encode_cc_cmd_data_ecn, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_data_ecn), + 0, encode_cc_cmd_data_ecn, NULL); case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_tx_interval), - 0, encode_cc_cmd_cnp_tx_interval, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_tx_interval), + 0, encode_cc_cmd_cnp_tx_interval, NULL); case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, - sizeof(struct xsc_cc_cmd_evt_rsttime), 0, encode_cc_cmd_evt_rsttime, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_rsttime), + 0, encode_cc_cmd_evt_rsttime, NULL); case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_dscp), - 0, encode_cc_cmd_cnp_dscp, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_dscp), + 0, encode_cc_cmd_cnp_dscp, NULL); case XSC_CMD_OP_IOCTL_SET_CNP_PCP: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_pcp), - 0, encode_cc_cmd_cnp_pcp, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_pcp), + 0, encode_cc_cmd_cnp_pcp, NULL); case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_evt_period_alpha), - 0, encode_cc_cmd_evt_period_alpha, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_period_alpha), + 0, encode_cc_cmd_evt_period_alpha, NULL); case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_clamp_tgt_rate), - 0, encode_cc_cmd_clamp_tgt_rate, NULL); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_clamp_tgt_rate), + 0, encode_cc_cmd_clamp_tgt_rate, NULL); + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_max_hai_factor), + 0, encode_cc_cmd_max_hai_factor, NULL); case XSC_CMD_OP_IOCTL_GET_CC_CFG: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_cfg), - sizeof(struct xsc_cc_cmd_get_cfg), encode_cc_get_cfg, decode_cc_get_cfg); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_cfg), + sizeof(struct xsc_cc_cmd_get_cfg), + encode_cc_get_cfg, decode_cc_get_cfg); case XSC_CMD_OP_IOCTL_GET_CC_STAT: - return _rdma_ctrl_ioctl_cc( - xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_stat), - sizeof(struct xsc_cc_cmd_stat), encode_cc_get_stat, decode_cc_get_stat); + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_stat), + sizeof(struct xsc_cc_cmd_stat), + encode_cc_get_stat, decode_cc_get_stat); default: return -EINVAL; } @@ -615,9 +630,10 @@ static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, return err; } -static void _rdma_ctrl_reg_cb(struct xsc_core_device *xdev, unsigned int cmd, - struct xsc_ioctl_hdr __user *user_hdr, void *data) +static int _rdma_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) { + struct xsc_core_device *xdev = file->xdev; int err; switch (cmd) { @@ -633,6 +649,8 @@ static void _rdma_ctrl_reg_cb(struct xsc_core_device *xdev, unsigned int cmd, err = -EFAULT; break; } + + return err; } static void _rdma_ctrl_reg_fini(void) @@ -646,8 +664,7 @@ static int _rdma_ctrl_reg_init(void) ret = xsc_port_ctrl_cb_reg(XSC_RDMA_CTRL_NAME, _rdma_ctrl_reg_cb, NULL); if (ret != 0) - pr_err("failed to register port control node for %s\n", - XSC_RDMA_CTRL_NAME); + pr_err("failed to register port control node for %s\n", XSC_RDMA_CTRL_NAME); return ret; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/chip_scale_defines.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/chip_scale_defines.h deleted file mode 100644 index 76885db35120f13034e376e0472eb319bc523295..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/chip_scale_defines.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _CHIP_SCALE_DEFINES_H_ -#define _CHIP_SCALE_DEFINES_H_ - -#define MAIN_CLK_FREQ (100 * 1000 * 1000) -#define NIF_PORT_NUM 2 -#define PCIE_PORT_NUM 1 -#define FUNC_ID_NUM 1026 -#define MSIX_VEC_NUM 4096 -#define PIO_TLPQ_NUM 2 - -#define PCIE0_PF_NUM 2 -#define PCIE0_PF0_VF_NUM 512 -#define PCIE0_PF1_VF_NUM 512 - -#define PCIE1_PF_NUM 0 -#define QP_NUM_MAX 32768 -#define RAW_QP_NUM_MAX 8192 -#define TSO_QP_NUM_MAX 1024 -#define CQ_NUM_MAX 32768 -#define SQ_SIZE_MAX 1024 -#define RQ_SIZE_MAX 1024 -#define CQ_SIZE_MAX 32768 -#define MPT_SIZE 32768 -#define MTT_SIZE 65536 -#define GRP_NUM_MAX 1024 -#define CLUSTER_NUM_MAX 1 - -#define PP_PCT_DEPTH 512 -#define PP_PCT_KEY_WIDTH 352 -#define PP_PCT_AD_WIDTH 42 - -#define PP_WCT_DEPTH 64 -#define PP_WCT_SHORT_KEY_WIDTH 240 -#define PP_WCT_LONG_KEY_WIDTH 480 -#define PP_WCT_AD_WIDTH 19 - -#define PP_IACL_DEPTH 16 -#define PP_IACL_KEY_WIDTH 463 -#define PP_IACL_AD_WIDTH 38 - -#define PP_TUNNEL_ENCAP_TBL_DEPTH 10240 -#define PP_TUNNEL_ENCAP_TBL_WIDTH 45 - -#define PP_MIRROR_TBL_DEPTH 256 -#define PP_MIRROR_TBL_WIDTH 106 - -#define PP_IPAT_DEPTH 2048 -#define PP_IPAT_WIDTH 119 - -#define PP_EPAT_DEPTH 2048 -#define PP_EPAT_WIDTH 160 - -#define PP_ONCHIP_FT_DEPTH 16384 -#define PP_ONCHIP_FT_WIDTH 439 - -#define PP_ONCHIP_FAT_DEPTH (16384 + 128) -#define PP_ONCHIP_FAT_WIDTH 326 - -#define PP_ONCHIP_CT_DEPTH (16384 + 128) -#define PP_ONCHIP_CT_WIDTH 156 - -#define PP_ONCHIP_VER_TBL_DEPTH 2048 -#define PP_ONCHIP_VER_TBL_WIDTH 13 - -#define PP_BOMT_DEPTH 1040 -#define PP_BOMT_WIDTH 12 - -#define PP_PST_DEPTH 2048 -#define PP_PST_WIDTH 1 - -#define PRI_NUM 8 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/chip_version.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/chip_version.h deleted file mode 100644 index 8505c6a687964eea7985b8723a293cd204566a65..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/chip_version.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _CHIP_VERSION_H_ -#define _CHIP_VERSION_H_ - -#define CHIP_VERSION_H 0x100 -#define CHIP_VERSION_M 0x47a9bdfd -#define CHIP_VERSION_L 0xb13 -#define CHIP_HOTFIX_NUM 0x30 -#define CHIP_FEATURE_FLAG 0b1110010101 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/clsf_dma_csr_defines.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/clsf_dma_csr_defines.h deleted file mode 100644 index 90f0dc17a9381ea3bf80424a0dfb6e5842a78d24..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/clsf_dma_csr_defines.h +++ /dev/null @@ -1,751 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _CLSF_DMA_CSR_DEFINES_H_ -#define _CLSF_DMA_CSR_DEFINES_H_ - -#define CLSF_DMA_SOFT_RESET_REG_ADDR 0xa6010000 -#define CLSF_DMA_SOFT_RESET_REG_WIDTH 32 -#define CLSF_DMA_SOFT_RESET_REG_LENGTH 32 -#define CLSF_DMA_SOFT_RESET_REG_SOFT_RESET_MASK 0x1 -#define CLSF_DMA_SOFT_RESET_REG_SOFT_RESET_SHIFT 0 -#define CLSF_DMA_SOFT_RESET_REG_SOFT_RESET_WIDTH 1 -#define CLSF_DMA_SOFT_RESET_REG_SOFT_RESET_MAX_VAL 0x1 -#define CLSF_DMA_SOFT_RESET_REG_SOFT_RESET_MIN_VAL 0x0 - -#define CLSF_DMA_SCRATCH_PAD_REG_ADDR 0xa601000c -#define CLSF_DMA_SCRATCH_PAD_REG_WIDTH 32 -#define CLSF_DMA_SCRATCH_PAD_REG_LENGTH 32 -#define CLSF_DMA_SCRATCH_PAD_REG_SCRATCH_PAD_MASK 0xffffffff -#define CLSF_DMA_SCRATCH_PAD_REG_SCRATCH_PAD_SHIFT 0 -#define CLSF_DMA_SCRATCH_PAD_REG_SCRATCH_PAD_WIDTH 32 -#define CLSF_DMA_SCRATCH_PAD_REG_SCRATCH_PAD_MAX_VAL 0xffffffff -#define CLSF_DMA_SCRATCH_PAD_REG_SCRATCH_PAD_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR 0xa6010010 -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_WIDTH 32 -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_LENGTH 32 -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK 0x7f -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_SHIFT 0 -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_WIDTH 7 -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MAX_VAL 0x7f -#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_RD_ADDR_REG_ADDR 0xa6010014 -#define CLSF_DMA_DMA_RD_ADDR_REG_WIDTH 32 -#define CLSF_DMA_DMA_RD_ADDR_REG_LENGTH 32 -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_START_ADDR_MASK 0xffff -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_START_ADDR_SHIFT 0 -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_START_ADDR_WIDTH 16 -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_START_ADDR_MAX_VAL 0xffff -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_START_ADDR_MIN_VAL 0x0 -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_MASK 0xffff0000 -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT 16 -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_WIDTH 16 -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_MAX_VAL 0xffff -#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_MIN_VAL 0x0 - -#define CLSF_DMA_INDRW_RD_START_REG_ADDR 0xa6010018 -#define CLSF_DMA_INDRW_RD_START_REG_WIDTH 32 -#define CLSF_DMA_INDRW_RD_START_REG_LENGTH 32 -#define CLSF_DMA_INDRW_RD_START_REG_INDRW_START_MASK 0x1 -#define CLSF_DMA_INDRW_RD_START_REG_INDRW_START_SHIFT 0 -#define CLSF_DMA_INDRW_RD_START_REG_INDRW_START_WIDTH 1 -#define CLSF_DMA_INDRW_RD_START_REG_INDRW_START_MAX_VAL 0x1 -#define CLSF_DMA_INDRW_RD_START_REG_INDRW_START_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_UL_TIMEOUT_REG_ADDR 0xa6010030 -#define CLSF_DMA_DMA_UL_TIMEOUT_REG_WIDTH 32 -#define CLSF_DMA_DMA_UL_TIMEOUT_REG_LENGTH 32 -#define CLSF_DMA_DMA_UL_TIMEOUT_REG_DMA_UL_TIMEOUT_MASK 0x1 -#define CLSF_DMA_DMA_UL_TIMEOUT_REG_DMA_UL_TIMEOUT_SHIFT 0 -#define CLSF_DMA_DMA_UL_TIMEOUT_REG_DMA_UL_TIMEOUT_WIDTH 1 - -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_ADDR 0xa6010034 -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_WIDTH 32 -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_LENGTH 32 -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_DMA_UL_TIMEOUT_INT_MASK_MASK 0x1 -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_DMA_UL_TIMEOUT_INT_MASK_SHIFT 0 -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_DMA_UL_TIMEOUT_INT_MASK_WIDTH 1 -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_DMA_UL_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_TIMEOUT_INT_MASK_REG_DMA_UL_TIMEOUT_INT_MASK_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_UL_BUSY_REG_ADDR 0xa6010038 -#define CLSF_DMA_DMA_UL_BUSY_REG_WIDTH 32 -#define CLSF_DMA_DMA_UL_BUSY_REG_LENGTH 32 -#define CLSF_DMA_DMA_UL_BUSY_REG_DMA_UL_BUSY_MASK 0x1 -#define CLSF_DMA_DMA_UL_BUSY_REG_DMA_UL_BUSY_SHIFT 0 -#define CLSF_DMA_DMA_UL_BUSY_REG_DMA_UL_BUSY_WIDTH 1 - -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ADDR 0xa6010040 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_WIDTH 512 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_LENGTH 512 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_1_MASK 0xffffffff -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_1_SHIFT 0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_1_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_2_MASK 0xffffffff00000000 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_2_SHIFT 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_2_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_3_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_3_SHIFT 64 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_3_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_4_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_4_SHIFT 96 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_4_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_5_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_5_SHIFT 128 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_5_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_6_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_6_SHIFT 160 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_6_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_7_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_7_SHIFT 192 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_7_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_8_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_8_SHIFT 224 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_8_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_9_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_9_SHIFT 256 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_9_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_10_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_10_SHIFT 288 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_10_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_11_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_11_SHIFT 320 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_11_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_12_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_12_SHIFT 352 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_12_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_13_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_13_SHIFT 384 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_13_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_14_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_14_SHIFT 416 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_14_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_15_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_15_SHIFT 448 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_15_WIDTH 32 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_16_MASK 0x0 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_16_SHIFT 480 -#define CLSF_DMA_EM_ERR_CODE_STS_REG_ERR_CODE_16_WIDTH 32 - -#define CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR 0xa6010080 -#define CLSF_DMA_DMA_DL_SUCCESS_REG_WIDTH 128 -#define CLSF_DMA_DMA_DL_SUCCESS_REG_LENGTH 128 -#define CLSF_DMA_DMA_DL_SUCCESS_REG_SUCCESS_MASK 0xffffffffffffffff -#define CLSF_DMA_DMA_DL_SUCCESS_REG_SUCCESS_SHIFT 0 -#define CLSF_DMA_DMA_DL_SUCCESS_REG_SUCCESS_WIDTH 128 - -#define CLSF_DMA_DMA_DL_DONE_REG_ADDR 0xa6010090 -#define CLSF_DMA_DMA_DL_DONE_REG_WIDTH 32 -#define CLSF_DMA_DMA_DL_DONE_REG_LENGTH 32 -#define CLSF_DMA_DMA_DL_DONE_REG_DL_DONE_MASK 0x1 -#define CLSF_DMA_DMA_DL_DONE_REG_DL_DONE_SHIFT 0 -#define CLSF_DMA_DMA_DL_DONE_REG_DL_DONE_WIDTH 1 -#define CLSF_DMA_DMA_DL_DONE_REG_DL_STS_MASK 0x2 -#define CLSF_DMA_DMA_DL_DONE_REG_DL_STS_SHIFT 1 -#define CLSF_DMA_DMA_DL_DONE_REG_DL_STS_WIDTH 1 - -#define CLSF_DMA_ERR_CODE_CLR_REG_ADDR 0xa6010094 -#define CLSF_DMA_ERR_CODE_CLR_REG_WIDTH 32 -#define CLSF_DMA_ERR_CODE_CLR_REG_LENGTH 32 -#define CLSF_DMA_ERR_CODE_CLR_REG_ERR_CODE_CLR_MASK 0x1 -#define CLSF_DMA_ERR_CODE_CLR_REG_ERR_CODE_CLR_SHIFT 0 -#define CLSF_DMA_ERR_CODE_CLR_REG_ERR_CODE_CLR_WIDTH 1 -#define CLSF_DMA_ERR_CODE_CLR_REG_ERR_CODE_CLR_MAX_VAL 0x1 -#define CLSF_DMA_ERR_CODE_CLR_REG_ERR_CODE_CLR_MIN_VAL 0x0 - -#define CLSF_DMA_UL_CFG_ERR_INT_REG_ADDR 0xa6010098 -#define CLSF_DMA_UL_CFG_ERR_INT_REG_WIDTH 32 -#define CLSF_DMA_UL_CFG_ERR_INT_REG_LENGTH 32 -#define CLSF_DMA_UL_CFG_ERR_INT_REG_UL_CFG_ERR_MASK 0x1 -#define CLSF_DMA_UL_CFG_ERR_INT_REG_UL_CFG_ERR_SHIFT 0 -#define CLSF_DMA_UL_CFG_ERR_INT_REG_UL_CFG_ERR_WIDTH 1 - -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_ADDR 0xa601009c -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_WIDTH 32 -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_LENGTH 32 -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_UL_CFG_ERR_INT_MASK_MASK 0x1 -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_UL_CFG_ERR_INT_MASK_SHIFT 0 -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_UL_CFG_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_UL_CFG_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_CFG_ERR_INT_MASK_REG_UL_CFG_ERR_INT_MASK_MIN_VAL 0x0 - -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_ADDR 0xa60100a0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_WIDTH 32 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_LENGTH 32 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_EM_MONITOR_EOF_ERR_MASK 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_EM_MONITOR_EOF_ERR_SHIFT 0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_EM_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_EM_MONITOR_SOF_ERR_MASK 0x2 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_EM_MONITOR_SOF_ERR_SHIFT 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_EM_MONITOR_SOF_ERR_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_TCAM_MONITOR_EOF_ERR_MASK 0x4 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_TCAM_MONITOR_EOF_ERR_SHIFT 2 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_TCAM_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_TCAM_MONITOR_SOF_ERR_MASK 0x8 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_TCAM_MONITOR_SOF_ERR_SHIFT 3 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_TCAM_MONITOR_SOF_ERR_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_DIR_MONITOR_EOF_ERR_MASK 0x10 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_DIR_MONITOR_EOF_ERR_SHIFT 4 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_DIR_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_DIR_MONITOR_SOF_ERR_MASK 0x20 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_DIR_MONITOR_SOF_ERR_SHIFT 5 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_DIR_MONITOR_SOF_ERR_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_RWE_MONITOR_EOF_ERR_MASK 0x40 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_RWE_MONITOR_EOF_ERR_SHIFT 6 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_RWE_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_RWE_MONITOR_SOF_ERR_MASK 0x80 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_RWE_MONITOR_SOF_ERR_SHIFT 7 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_REG_DL_RWE_MONITOR_SOF_ERR_WIDTH 1 - -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_ADDR 0xa60100a4 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_WIDTH 32 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_LENGTH 32 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_EOF_ERR_INT_MASK_MASK 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_EOF_ERR_INT_MASK_SHIFT 0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_SOF_ERR_INT_MASK_MASK 0x2 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_SOF_ERR_INT_MASK_SHIFT 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_EM_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_EOF_ERR_INT_MASK_MASK 0x4 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_EOF_ERR_INT_MASK_SHIFT 2 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_SOF_ERR_INT_MASK_MASK 0x8 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_SOF_ERR_INT_MASK_SHIFT 3 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_TCAM_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_EOF_ERR_INT_MASK_MASK 0x10 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_EOF_ERR_INT_MASK_SHIFT 4 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_SOF_ERR_INT_MASK_MASK 0x20 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_SOF_ERR_INT_MASK_SHIFT 5 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_DIR_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_EOF_ERR_INT_MASK_MASK 0x40 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_EOF_ERR_INT_MASK_SHIFT 6 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_SOF_ERR_INT_MASK_MASK 0x80 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_SOF_ERR_INT_MASK_SHIFT 7 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_DL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_DL_RWE_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 - -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_ADDR 0xa60100a8 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_WIDTH 32 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_LENGTH 32 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_EM_MONITOR_EOF_ERR_MASK 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_EM_MONITOR_EOF_ERR_SHIFT 0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_EM_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_TCAM_MONITOR_EOF_ERR_MASK 0x2 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_TCAM_MONITOR_EOF_ERR_SHIFT 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_TCAM_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_DIR_MONITOR_EOF_ERR_MASK 0x4 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_DIR_MONITOR_EOF_ERR_SHIFT 2 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_DIR_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_RWE_MONITOR_EOF_ERR_MASK 0x8 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_RWE_MONITOR_EOF_ERR_SHIFT 3 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_RWE_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_STAT_MONITOR_EOF_ERR_MASK 0x10 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_STAT_MONITOR_EOF_ERR_SHIFT 4 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_STAT_MONITOR_EOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_EM_MONITOR_SOF_ERR_MASK 0x20 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_EM_MONITOR_SOF_ERR_SHIFT 5 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_EM_MONITOR_SOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_TCAM_MONITOR_SOF_ERR_MASK 0x40 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_TCAM_MONITOR_SOF_ERR_SHIFT 6 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_TCAM_MONITOR_SOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_DIR_MONITOR_SOF_ERR_MASK 0x80 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_DIR_MONITOR_SOF_ERR_SHIFT 7 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_DIR_MONITOR_SOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_RWE_MONITOR_SOF_ERR_MASK 0x100 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_RWE_MONITOR_SOF_ERR_SHIFT 8 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_RWE_MONITOR_SOF_ERR_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_STAT_MONITOR_SOF_ERR_MASK 0x200 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_STAT_MONITOR_SOF_ERR_SHIFT 9 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_REG_UL_STAT_MONITOR_SOF_ERR_WIDTH 1 - -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_ADDR 0xa60100ac -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_WIDTH 32 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_LENGTH 32 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_EOF_ERR_INT_MASK_MASK 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_EOF_ERR_INT_MASK_SHIFT 0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_EOF_ERR_INT_MASK_MASK 0x2 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_EOF_ERR_INT_MASK_SHIFT 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_EOF_ERR_INT_MASK_MASK 0x4 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_EOF_ERR_INT_MASK_SHIFT 2 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_EOF_ERR_INT_MASK_MASK 0x8 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_EOF_ERR_INT_MASK_SHIFT 3 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_EOF_ERR_INT_MASK_MASK 0x10 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_EOF_ERR_INT_MASK_SHIFT 4 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_EOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_EOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_EOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_SOF_ERR_INT_MASK_MASK 0x20 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_SOF_ERR_INT_MASK_SHIFT 5 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_EM_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_SOF_ERR_INT_MASK_MASK 0x40 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_SOF_ERR_INT_MASK_SHIFT 6 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_TCAM_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_SOF_ERR_INT_MASK_MASK 0x80 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_SOF_ERR_INT_MASK_SHIFT 7 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_DIR_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_SOF_ERR_INT_MASK_MASK 0x100 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_SOF_ERR_INT_MASK_SHIFT 8 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_RWE_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_SOF_ERR_INT_MASK_MASK 0x200 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_SOF_ERR_INT_MASK_SHIFT 9 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_SOF_ERR_INT_MASK_WIDTH 1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_SOF_ERR_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_UL_MONITOR_SOF_EOF_ERR_INT_MASK_REG_UL_STAT_MONITOR_SOF_ERR_INT_MASK_MIN_VAL 0x0 - -#define CLSF_DMA_FIFO_AFUL_TH_REG_ADDR 0xa60100b0 -#define CLSF_DMA_FIFO_AFUL_TH_REG_WIDTH 32 -#define CLSF_DMA_FIFO_AFUL_TH_REG_LENGTH 32 -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_DL_FIFO_AFUL_TH_MASK 0x3f -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_DL_FIFO_AFUL_TH_SHIFT 0 -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_DL_FIFO_AFUL_TH_WIDTH 6 -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_DL_FIFO_AFUL_TH_MAX_VAL 0x3f -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_DL_FIFO_AFUL_TH_MIN_VAL 0x0 -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_UL_FIFO_AFUL_TH_MASK 0xfc0 -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_UL_FIFO_AFUL_TH_SHIFT 6 -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_UL_FIFO_AFUL_TH_WIDTH 6 -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_UL_FIFO_AFUL_TH_MAX_VAL 0x3f -#define CLSF_DMA_FIFO_AFUL_TH_REG_DMA_UL_FIFO_AFUL_TH_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_DL_WMH_REG_ADDR 0xa60100b8 -#define CLSF_DMA_DMA_DL_WMH_REG_WIDTH 32 -#define CLSF_DMA_DMA_DL_WMH_REG_LENGTH 32 -#define CLSF_DMA_DMA_DL_WMH_REG_DMA_DL_FIFO_USED_CNT_WMH_MASK 0x3f -#define CLSF_DMA_DMA_DL_WMH_REG_DMA_DL_FIFO_USED_CNT_WMH_SHIFT 0 -#define CLSF_DMA_DMA_DL_WMH_REG_DMA_DL_FIFO_USED_CNT_WMH_WIDTH 6 - -#define CLSF_DMA_DMA_UL_WMH_REG_ADDR 0xa60100bc -#define CLSF_DMA_DMA_UL_WMH_REG_WIDTH 32 -#define CLSF_DMA_DMA_UL_WMH_REG_LENGTH 32 -#define CLSF_DMA_DMA_UL_WMH_REG_DMA_UL_FIFO_USED_CNT_WMH_MASK 0x3f -#define CLSF_DMA_DMA_UL_WMH_REG_DMA_UL_FIFO_USED_CNT_WMH_SHIFT 0 -#define CLSF_DMA_DMA_UL_WMH_REG_DMA_UL_FIFO_USED_CNT_WMH_WIDTH 6 - -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_ADDR 0xa60100c0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_WIDTH 32 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_LENGTH 32 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC0_MASK 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC0_SHIFT 0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC0_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC0_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC0_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC1_MASK 0x2 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC1_SHIFT 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC1_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC1_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_EM_RSP_FORCE_FC1_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC0_MASK 0x4 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC0_SHIFT 2 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC0_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC0_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC0_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC1_MASK 0x8 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC1_SHIFT 3 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC1_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC1_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_TCAM_RSP_FORCE_FC1_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC0_MASK 0x10 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC0_SHIFT 4 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC0_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC0_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC0_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC1_MASK 0x20 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC1_SHIFT 5 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC1_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC1_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_DIR_RSP_FORCE_FC1_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC0_MASK 0x40 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC0_SHIFT 6 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC0_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC0_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC0_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC1_MASK 0x80 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC1_SHIFT 7 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC1_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC1_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_STAT_RSP_FORCE_FC1_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC0_MASK 0x100 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC0_SHIFT 8 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC0_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC0_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC0_MIN_VAL 0x0 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC1_MASK 0x200 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC1_SHIFT 9 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC1_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC1_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_DEBUG_CFG_REG_DMA_UL_RWE_RSP_FORCE_FC1_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_ADDR 0xa60100c4 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_WIDTH 32 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_LENGTH 32 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC0_MASK 0x1 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC0_SHIFT 0 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC0_WIDTH 1 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC0_MAX_VAL 0x1 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC0_MIN_VAL 0x0 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC1_MASK 0x2 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC1_SHIFT 1 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC1_WIDTH 1 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC1_MAX_VAL 0x1 -#define CLSF_DMA_DMA_DL_FC_DEBUG_CFG_REG_DMA_DL_FORCE_FC1_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_ADDR 0xa60100c8 -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_WIDTH 32 -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_LENGTH 32 -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_DMA_DL_FC_CNT_IN_REVS_MASK 0x1 -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_DMA_DL_FC_CNT_IN_REVS_SHIFT 0 -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_DMA_DL_FC_CNT_IN_REVS_WIDTH 1 -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_DMA_DL_FC_CNT_IN_REVS_MAX_VAL 0x1 -#define CLSF_DMA_DMA_DL_FC_CNT_IN_REVS_REG_DMA_DL_FC_CNT_IN_REVS_MIN_VAL 0x0 - -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_ADDR 0xa60100cc -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_WIDTH 32 -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_LENGTH 32 -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_DMA_UL_RSP_FC_CNT_IN_REVS_MASK 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_DMA_UL_RSP_FC_CNT_IN_REVS_SHIFT 0 -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_DMA_UL_RSP_FC_CNT_IN_REVS_WIDTH 1 -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_DMA_UL_RSP_FC_CNT_IN_REVS_MAX_VAL 0x1 -#define CLSF_DMA_DMA_UL_RSP_FC_CNT_IN_REVS_REG_DMA_UL_RSP_FC_CNT_IN_REVS_MIN_VAL 0x0 - -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_ADDR 0xa60100d0 -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_WIDTH 32 -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_LENGTH 32 -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_HIF2IPP_UL_FC_CNT_IN_REVS_MASK 0x1 -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_HIF2IPP_UL_FC_CNT_IN_REVS_SHIFT 0 -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_HIF2IPP_UL_FC_CNT_IN_REVS_WIDTH 1 -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_HIF2IPP_UL_FC_CNT_IN_REVS_MAX_VAL 0x1 -#define CLSF_DMA_HIF2IPP_FC_CNT_IN_REVS_REG_HIF2IPP_UL_FC_CNT_IN_REVS_MIN_VAL 0x0 - -#define CLSF_DMA_OVFL_UNFL_INT_REG_ADDR 0xa60100d8 -#define CLSF_DMA_OVFL_UNFL_INT_REG_WIDTH 32 -#define CLSF_DMA_OVFL_UNFL_INT_REG_LENGTH 32 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_DL_FIFO_OVFL_MASK 0x1 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_DL_FIFO_OVFL_SHIFT 0 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_DL_FIFO_OVFL_WIDTH 1 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_DL_FIFO_UNFL_MASK 0x2 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_DL_FIFO_UNFL_SHIFT 1 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_DL_FIFO_UNFL_WIDTH 1 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_UL_FIFO_OVFL_MASK 0x4 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_UL_FIFO_OVFL_SHIFT 2 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_UL_FIFO_OVFL_WIDTH 1 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_UL_FIFO_UNFL_MASK 0x8 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_UL_FIFO_UNFL_SHIFT 3 -#define CLSF_DMA_OVFL_UNFL_INT_REG_DMA_UL_FIFO_UNFL_WIDTH 1 - -#define CLSF_DMA_PP_DMA_FC_CNT_REG_ADDR 0xa60100e0 -#define CLSF_DMA_PP_DMA_FC_CNT_REG_WIDTH 32 -#define CLSF_DMA_PP_DMA_FC_CNT_REG_LENGTH 32 -#define CLSF_DMA_PP_DMA_FC_CNT_REG_TX_IPP2HIF_DL_FC_CNT_MASK 0xffff -#define CLSF_DMA_PP_DMA_FC_CNT_REG_TX_IPP2HIF_DL_FC_CNT_SHIFT 0 -#define CLSF_DMA_PP_DMA_FC_CNT_REG_TX_IPP2HIF_DL_FC_CNT_WIDTH 16 -#define CLSF_DMA_PP_DMA_FC_CNT_REG_RX_HIF2IPP_UL_FC_CNT_MASK 0xffff0000 -#define CLSF_DMA_PP_DMA_FC_CNT_REG_RX_HIF2IPP_UL_FC_CNT_SHIFT 16 -#define CLSF_DMA_PP_DMA_FC_CNT_REG_RX_HIF2IPP_UL_FC_CNT_WIDTH 16 - -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_ADDR 0xa60100f0 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_WIDTH 128 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_LENGTH 96 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_EM_UL_RSP_FC_CNT_MASK 0xffff -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_EM_UL_RSP_FC_CNT_SHIFT 0 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_EM_UL_RSP_FC_CNT_WIDTH 16 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_TCAM_UL_RSP_FC_CNT_MASK 0xffff0000 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_TCAM_UL_RSP_FC_CNT_SHIFT 16 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_TCAM_UL_RSP_FC_CNT_WIDTH 16 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_DIR_UL_RSP_FC_CNT_MASK 0xffff00000000 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_DIR_UL_RSP_FC_CNT_SHIFT 32 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_DIR_UL_RSP_FC_CNT_WIDTH 16 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_STAT_UL_RSP_FC_CNT_MASK 0xffff000000000000 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_STAT_UL_RSP_FC_CNT_SHIFT 48 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_STAT_UL_RSP_FC_CNT_WIDTH 16 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_RWE_UL_RSP_FC_CNT_MASK 0x0 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_RWE_UL_RSP_FC_CNT_SHIFT 64 -#define CLSF_DMA_PP_DMA_UL_RSP_CNT_REG_TX_RWE_UL_RSP_FC_CNT_WIDTH 16 - -#define CLSF_DMA_RX_DL_HIF_CNT_REG_ADDR 0xa6010100 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_WIDTH 128 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_LENGTH 96 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_BURST_VLD_CNT_MASK 0xffff -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_BURST_VLD_CNT_SHIFT 0 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_BURST_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_SOF_CNT_MASK 0xffff0000 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_SOF_CNT_SHIFT 16 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_SOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_EOF_CNT_MASK 0xffff00000000 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_EOF_CNT_SHIFT 32 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_EOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_DATA_VLD_CNT_MASK 0xffff000000000000 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_DATA_VLD_CNT_SHIFT 48 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_HIF_DATA_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_LAST_FLAG_CNT_MASK 0x0 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_LAST_FLAG_CNT_SHIFT 64 -#define CLSF_DMA_RX_DL_HIF_CNT_REG_RX_LAST_FLAG_CNT_WIDTH 16 - -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_ADDR 0xa6010120 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_WIDTH 256 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_LENGTH 224 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_FALSE_RSP_VLD_CNT_MASK 0xffff -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_FALSE_RSP_VLD_CNT_SHIFT 0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_FALSE_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_SOF_CNT_MASK 0xffff0000 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_SOF_CNT_SHIFT 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_EOF_CNT_MASK 0xffff00000000 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_EOF_CNT_SHIFT 32 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_VLD_CNT_MASK 0xffff000000000000 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_VLD_CNT_SHIFT 48 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_EM_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_SOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_SOF_CNT_SHIFT 64 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_EOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_EOF_CNT_SHIFT 80 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_VLD_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_VLD_CNT_SHIFT 96 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_TCAM_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_SOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_SOF_CNT_SHIFT 112 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_EOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_EOF_CNT_SHIFT 128 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_VLD_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_VLD_CNT_SHIFT 144 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_DIR_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_SOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_SOF_CNT_SHIFT 160 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_EOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_EOF_CNT_SHIFT 176 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_VLD_CNT_MASK 0x0 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_VLD_CNT_SHIFT 192 -#define CLSF_DMA_TX_DL_CLSF_CNT_REG_TX_RWE_REQ_VLD_CNT_WIDTH 16 - -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_ADDR 0xa6010140 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_WIDTH 64 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_LENGTH 64 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_EM_DL_RSP_VLD_CNT_MASK 0xffff -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_EM_DL_RSP_VLD_CNT_SHIFT 0 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_EM_DL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_TCAM_DL_RSP_VLD_CNT_MASK 0xffff0000 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_TCAM_DL_RSP_VLD_CNT_SHIFT 16 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_TCAM_DL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_DIR_DL_RSP_VLD_CNT_MASK 0xffff00000000 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_DIR_DL_RSP_VLD_CNT_SHIFT 32 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_DIR_DL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_RWE_DL_RSP_VLD_CNT_MASK 0xffff000000000000 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_RWE_DL_RSP_VLD_CNT_SHIFT 48 -#define CLSF_DMA_RX_DL_CLSF_RSP_CNT_REG_RX_RWE_DL_RSP_VLD_CNT_WIDTH 16 - -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_ADDR 0xa6010160 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_WIDTH 256 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_LENGTH 256 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_VLD_CNT_MASK 0xffff -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_VLD_CNT_SHIFT 0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_SOF_CNT_MASK 0xffff0000 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_SOF_CNT_SHIFT 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_EOF_CNT_MASK 0xffff00000000 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_EOF_CNT_SHIFT 32 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_EM_UL_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_VLD_CNT_MASK 0xffff000000000000 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_VLD_CNT_SHIFT 48 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_SOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_SOF_CNT_SHIFT 64 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_EOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_EOF_CNT_SHIFT 80 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_TCAM_UL_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_VLD_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_VLD_CNT_SHIFT 96 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_SOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_SOF_CNT_SHIFT 112 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_EOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_EOF_CNT_SHIFT 128 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_DIR_UL_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_VLD_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_VLD_CNT_SHIFT 144 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_SOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_SOF_CNT_SHIFT 160 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_EOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_EOF_CNT_SHIFT 176 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_STAT_UL_REQ_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_VLD_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_VLD_CNT_SHIFT 192 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_SOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_SOF_CNT_SHIFT 208 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_EOF_CNT_MASK 0x0 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_EOF_CNT_SHIFT 224 -#define CLSF_DMA_TX_UL_CLSF_CNT_REG_TX_RWE_UL_REQ_EOF_CNT_WIDTH 16 - -#define CLSF_DMA_TX_UL_HIF_CNT_REG_ADDR 0xa6010180 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_WIDTH 64 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_LENGTH 64 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_VLD_CNT_MASK 0xffff -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_VLD_CNT_SHIFT 0 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_VLD_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_SOF_CNT_MASK 0xffff0000 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_SOF_CNT_SHIFT 16 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_SOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_EOF_CNT_MASK 0xffff00000000 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_EOF_CNT_SHIFT 32 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_UL_EOF_CNT_WIDTH 16 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_DMA_RD_DONE_CNT_MASK 0xffff000000000000 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_DMA_RD_DONE_CNT_SHIFT 48 -#define CLSF_DMA_TX_UL_HIF_CNT_REG_TX_IPP2HIF_DMA_RD_DONE_CNT_WIDTH 16 - -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_ADDR 0xa60101a0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_WIDTH 256 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_LENGTH 256 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_VLD_CNT_MASK 0xffff -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_VLD_CNT_SHIFT 0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_SOF_CNT_MASK 0xffff0000 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_SOF_CNT_SHIFT 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_SOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_EOF_CNT_MASK 0xffff00000000 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_EOF_CNT_SHIFT 32 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_EM_UL_RSP_EOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_VLD_CNT_MASK 0xffff000000000000 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_VLD_CNT_SHIFT 48 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_SOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_SOF_CNT_SHIFT 64 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_SOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_EOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_EOF_CNT_SHIFT 80 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_TCAM_UL_RSP_EOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_VLD_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_VLD_CNT_SHIFT 96 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_SOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_SOF_CNT_SHIFT 112 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_SOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_EOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_EOF_CNT_SHIFT 128 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_DIR_UL_RSP_EOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_VLD_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_VLD_CNT_SHIFT 144 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_SOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_SOF_CNT_SHIFT 160 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_SOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_EOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_EOF_CNT_SHIFT 176 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_STAT_UL_RSP_EOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_VLD_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_VLD_CNT_SHIFT 192 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_VLD_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_SOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_SOF_CNT_SHIFT 208 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_SOF_CNT_WIDTH 16 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_EOF_CNT_MASK 0x0 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_EOF_CNT_SHIFT 224 -#define CLSF_DMA_RX_UL_CLSF_CNT_REG_RX_RWE_UL_RSP_EOF_CNT_WIDTH 16 - -#define CLSF_DMA_RX_CFG_RD_REQ_POS_CNT_REG_ADDR 0xa60101c0 -#define CLSF_DMA_RX_CFG_RD_REQ_POS_CNT_REG_WIDTH 32 -#define CLSF_DMA_RX_CFG_RD_REQ_POS_CNT_REG_LENGTH 32 -#define CLSF_DMA_RX_CFG_RD_REQ_POS_CNT_REG_CFG_RD_REQ_POS_CNT_MASK 0xffff -#define CLSF_DMA_RX_CFG_RD_REQ_POS_CNT_REG_CFG_RD_REQ_POS_CNT_SHIFT 0 -#define CLSF_DMA_RX_CFG_RD_REQ_POS_CNT_REG_CFG_RD_REQ_POS_CNT_WIDTH 16 - -#define CLSF_DMA_NEW_EOF_CNT_REG_ADDR 0xa60101c4 -#define CLSF_DMA_NEW_EOF_CNT_REG_WIDTH 32 -#define CLSF_DMA_NEW_EOF_CNT_REG_LENGTH 32 -#define CLSF_DMA_NEW_EOF_CNT_REG_NEW_EOF_CNT_MASK 0xffff -#define CLSF_DMA_NEW_EOF_CNT_REG_NEW_EOF_CNT_SHIFT 0 -#define CLSF_DMA_NEW_EOF_CNT_REG_NEW_EOF_CNT_WIDTH 16 - -#define CLSF_DMA_RD_LAST_FLAG_CNT_REG_ADDR 0xa60101c8 -#define CLSF_DMA_RD_LAST_FLAG_CNT_REG_WIDTH 32 -#define CLSF_DMA_RD_LAST_FLAG_CNT_REG_LENGTH 32 -#define CLSF_DMA_RD_LAST_FLAG_CNT_REG_RD_LAST_FLAG_CNT_MASK 0xffff -#define CLSF_DMA_RD_LAST_FLAG_CNT_REG_RD_LAST_FLAG_CNT_SHIFT 0 -#define CLSF_DMA_RD_LAST_FLAG_CNT_REG_RD_LAST_FLAG_CNT_WIDTH 16 - -#define CLSF_DMA_TIMEOUT_INT_REG_ADDR 0xa60101cc -#define CLSF_DMA_TIMEOUT_INT_REG_WIDTH 32 -#define CLSF_DMA_TIMEOUT_INT_REG_LENGTH 32 -#define CLSF_DMA_TIMEOUT_INT_REG_DMA_DL_SUCCESS_REG_TIMEOUT_MASK 0x1 -#define CLSF_DMA_TIMEOUT_INT_REG_DMA_DL_SUCCESS_REG_TIMEOUT_SHIFT 0 -#define CLSF_DMA_TIMEOUT_INT_REG_DMA_DL_SUCCESS_REG_TIMEOUT_WIDTH 1 -#define CLSF_DMA_TIMEOUT_INT_REG_DMA_DL_DONE_REG_TIMEOUT_MASK 0x2 -#define CLSF_DMA_TIMEOUT_INT_REG_DMA_DL_DONE_REG_TIMEOUT_SHIFT 1 -#define CLSF_DMA_TIMEOUT_INT_REG_DMA_DL_DONE_REG_TIMEOUT_WIDTH 1 - -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_ADDR 0xa60101d0 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_WIDTH 32 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_LENGTH 32 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_SUCCESS_REG_TIMEOUT_INT_MASK_MASK 0x1 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_SUCCESS_REG_TIMEOUT_INT_MASK_SHIFT 0 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_SUCCESS_REG_TIMEOUT_INT_MASK_WIDTH 1 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_SUCCESS_REG_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_SUCCESS_REG_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_DONE_REG_TIMEOUT_INT_MASK_MASK 0x2 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_DONE_REG_TIMEOUT_INT_MASK_SHIFT 1 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_DONE_REG_TIMEOUT_INT_MASK_WIDTH 1 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_DONE_REG_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define CLSF_DMA_TIMEOUT_INT_MASK_REG_DMA_DL_DONE_REG_TIMEOUT_INT_MASK_MIN_VAL 0x0 - -#define CLSF_DMA_TIMEOUT_CFG_REG_ADDR 0xa60101d4 -#define CLSF_DMA_TIMEOUT_CFG_REG_WIDTH 32 -#define CLSF_DMA_TIMEOUT_CFG_REG_LENGTH 32 -#define CLSF_DMA_TIMEOUT_CFG_REG_TIMEOUT_PARA_MASK 0xffff -#define CLSF_DMA_TIMEOUT_CFG_REG_TIMEOUT_PARA_SHIFT 0 -#define CLSF_DMA_TIMEOUT_CFG_REG_TIMEOUT_PARA_WIDTH 16 -#define CLSF_DMA_TIMEOUT_CFG_REG_TIMEOUT_PARA_MAX_VAL 0xffff -#define CLSF_DMA_TIMEOUT_CFG_REG_TIMEOUT_PARA_MIN_VAL 0x0 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_cmdqm_csr_defines.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_cmdqm_csr_defines.h deleted file mode 100644 index b54f97b0c8bdb931f05cd06ff0ff3f1002ed999e..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_cmdqm_csr_defines.h +++ /dev/null @@ -1,978 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _HIF_CMDQM_CSR_DEFINES_H_ -#define _HIF_CMDQM_CSR_DEFINES_H_ - -#define HIF_CMDQM_SOFT_RESET_REG_ADDR 0xa1020000 -#define HIF_CMDQM_SOFT_RESET_REG_WIDTH 32 -#define HIF_CMDQM_SOFT_RESET_REG_LENGTH 32 -#define HIF_CMDQM_SOFT_RESET_REG_SOFT_RESET_MASK 0x1 -#define HIF_CMDQM_SOFT_RESET_REG_SOFT_RESET_SHIFT 0 -#define HIF_CMDQM_SOFT_RESET_REG_SOFT_RESET_WIDTH 1 -#define HIF_CMDQM_SOFT_RESET_REG_SOFT_RESET_MAX_VAL 0x1 -#define HIF_CMDQM_SOFT_RESET_REG_SOFT_RESET_MIN_VAL 0x0 - -#define HIF_CMDQM_SCRATCH_PAD_REG_ADDR 0xa102000c -#define HIF_CMDQM_SCRATCH_PAD_REG_WIDTH 32 -#define HIF_CMDQM_SCRATCH_PAD_REG_LENGTH 32 -#define HIF_CMDQM_SCRATCH_PAD_REG_SCRATCH_PAD_MASK 0xffffffff -#define HIF_CMDQM_SCRATCH_PAD_REG_SCRATCH_PAD_SHIFT 0 -#define HIF_CMDQM_SCRATCH_PAD_REG_SCRATCH_PAD_WIDTH 32 -#define HIF_CMDQM_SCRATCH_PAD_REG_SCRATCH_PAD_MAX_VAL 0xffffffff -#define HIF_CMDQM_SCRATCH_PAD_REG_SCRATCH_PAD_MIN_VAL 0x0 - -#define HIF_CMDQM_CSR_ERR_FLAG_REG_ADDR 0xa1020010 -#define HIF_CMDQM_CSR_ERR_FLAG_REG_WIDTH 32 -#define HIF_CMDQM_CSR_ERR_FLAG_REG_LENGTH 32 -#define HIF_CMDQM_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_MASK 0x1 -#define HIF_CMDQM_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_SHIFT 0 -#define HIF_CMDQM_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_WIDTH 1 - -#define HIF_CMDQM_CSR_ERR_ADDR_REG_ADDR 0xa1020014 -#define HIF_CMDQM_CSR_ERR_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_CSR_ERR_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_MASK 0xffffffff -#define HIF_CMDQM_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_SHIFT 0 -#define HIF_CMDQM_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_WIDTH 32 - -#define HIF_CMDQM_CSR_ERR_LEN_REG_ADDR 0xa1020018 -#define HIF_CMDQM_CSR_ERR_LEN_REG_WIDTH 32 -#define HIF_CMDQM_CSR_ERR_LEN_REG_LENGTH 32 -#define HIF_CMDQM_CSR_ERR_LEN_REG_CSR_ERR_LEN_MASK 0x3ff -#define HIF_CMDQM_CSR_ERR_LEN_REG_CSR_ERR_LEN_SHIFT 0 -#define HIF_CMDQM_CSR_ERR_LEN_REG_CSR_ERR_LEN_WIDTH 10 - -#define HIF_CMDQM_CSR_ERR_TYPE_REG_ADDR 0xa102001c -#define HIF_CMDQM_CSR_ERR_TYPE_REG_WIDTH 32 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_LENGTH 32 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_MASK 0x1 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_SHIFT 0 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_WIDTH 1 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_MASK 0x6 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_SHIFT 1 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_WIDTH 2 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_MASK 0x8 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_SHIFT 3 -#define HIF_CMDQM_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_WIDTH 1 - -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1020020 -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_WIDTH 32 -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_LENGTH 32 -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_Q_ELEMENT_SZ_MASK 0xf -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_Q_ELEMENT_SZ_SHIFT 0 -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_Q_ELEMENT_SZ_WIDTH 4 -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_Q_ELEMENT_SZ_MAX_VAL 0xf -#define HIF_CMDQM_Q_ELEMENT_SZ_REG_Q_ELEMENT_SZ_MIN_VAL 0x0 - -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_ADDR 0xa1020024 -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_WIDTH 32 -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_LENGTH 32 -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_MASK 0x7ff -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_SHIFT 0 -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_WIDTH 11 -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_MAX_VAL 0x7ff -#define HIF_CMDQM_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_MIN_VAL 0x0 - -#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1020028 -#define HIF_CMDQM_HOST_Q_DEPTH_REG_WIDTH 32 -#define HIF_CMDQM_HOST_Q_DEPTH_REG_LENGTH 32 -#define HIF_CMDQM_HOST_Q_DEPTH_REG_HOST_Q_DEPTH_MASK 0x7ff -#define HIF_CMDQM_HOST_Q_DEPTH_REG_HOST_Q_DEPTH_SHIFT 0 -#define HIF_CMDQM_HOST_Q_DEPTH_REG_HOST_Q_DEPTH_WIDTH 11 -#define HIF_CMDQM_HOST_Q_DEPTH_REG_HOST_Q_DEPTH_MAX_VAL 0x7ff -#define HIF_CMDQM_HOST_Q_DEPTH_REG_HOST_Q_DEPTH_MIN_VAL 0x0 - -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_ADDR 0xa102002c -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_WIDTH 32 -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_LENGTH 32 -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_HOST_Q_BURST_NUM_MASK 0x7 -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_HOST_Q_BURST_NUM_SHIFT 0 -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_HOST_Q_BURST_NUM_WIDTH 3 -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_HOST_Q_BURST_NUM_MAX_VAL 0x7 -#define HIF_CMDQM_HOST_Q_BURST_NUM_REG_HOST_Q_BURST_NUM_MIN_VAL 0x0 - -#define HIF_CMDQM_ARM_Q_DEPTH_REG_ADDR 0xa1020030 -#define HIF_CMDQM_ARM_Q_DEPTH_REG_WIDTH 32 -#define HIF_CMDQM_ARM_Q_DEPTH_REG_LENGTH 32 -#define HIF_CMDQM_ARM_Q_DEPTH_REG_ARM_Q_DEPTH_MASK 0x7ff -#define HIF_CMDQM_ARM_Q_DEPTH_REG_ARM_Q_DEPTH_SHIFT 0 -#define HIF_CMDQM_ARM_Q_DEPTH_REG_ARM_Q_DEPTH_WIDTH 11 -#define HIF_CMDQM_ARM_Q_DEPTH_REG_ARM_Q_DEPTH_MAX_VAL 0x7ff -#define HIF_CMDQM_ARM_Q_DEPTH_REG_ARM_Q_DEPTH_MIN_VAL 0x0 - -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_ADDR 0xa1020034 -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_WIDTH 32 -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_LENGTH 32 -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_ARM_Q_BURST_NUM_MASK 0x7 -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_ARM_Q_BURST_NUM_SHIFT 0 -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_ARM_Q_BURST_NUM_WIDTH 3 -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_ARM_Q_BURST_NUM_MAX_VAL 0x7 -#define HIF_CMDQM_ARM_Q_BURST_NUM_REG_ARM_Q_BURST_NUM_MIN_VAL 0x0 - -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_ADDR 0xa1020038 -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_ARM_REQ_BASE_ADDR_MASK 0xffffffff -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_ARM_REQ_BASE_ADDR_SHIFT 0 -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_ARM_REQ_BASE_ADDR_WIDTH 32 -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_ARM_REQ_BASE_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_ARM_REQ_BASE_ADDR_REG_ARM_REQ_BASE_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_ARM_REQ_CID_REG_ADDR 0xa102003c -#define HIF_CMDQM_ARM_REQ_CID_REG_WIDTH 32 -#define HIF_CMDQM_ARM_REQ_CID_REG_LENGTH 32 -#define HIF_CMDQM_ARM_REQ_CID_REG_ARM_REQ_CID_MASK 0x7ff -#define HIF_CMDQM_ARM_REQ_CID_REG_ARM_REQ_CID_SHIFT 0 -#define HIF_CMDQM_ARM_REQ_CID_REG_ARM_REQ_CID_WIDTH 11 -#define HIF_CMDQM_ARM_REQ_CID_REG_ARM_REQ_CID_MAX_VAL 0x7ff -#define HIF_CMDQM_ARM_REQ_CID_REG_ARM_REQ_CID_MIN_VAL 0x0 - -#define HIF_CMDQM_HW_ARM_REQ_CID_REG_ADDR 0xa1020040 -#define HIF_CMDQM_HW_ARM_REQ_CID_REG_WIDTH 32 -#define HIF_CMDQM_HW_ARM_REQ_CID_REG_LENGTH 32 -#define HIF_CMDQM_HW_ARM_REQ_CID_REG_HW_ARM_REQ_CID_MASK 0x7ff -#define HIF_CMDQM_HW_ARM_REQ_CID_REG_HW_ARM_REQ_CID_SHIFT 0 -#define HIF_CMDQM_HW_ARM_REQ_CID_REG_HW_ARM_REQ_CID_WIDTH 11 - -#define HIF_CMDQM_ARM_REQ_PID_REG_ADDR 0xa1020044 -#define HIF_CMDQM_ARM_REQ_PID_REG_WIDTH 32 -#define HIF_CMDQM_ARM_REQ_PID_REG_LENGTH 32 -#define HIF_CMDQM_ARM_REQ_PID_REG_ARM_REQ_PID_MASK 0x7ff -#define HIF_CMDQM_ARM_REQ_PID_REG_ARM_REQ_PID_SHIFT 0 -#define HIF_CMDQM_ARM_REQ_PID_REG_ARM_REQ_PID_WIDTH 11 - -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_REG_ADDR 0xa1020048 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_REG_WIDTH 32 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_REG_LENGTH 32 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_REG_HW2ARM_REQ_MASK 0x1 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_REG_HW2ARM_REQ_SHIFT 0 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_REG_HW2ARM_REQ_WIDTH 1 - -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_ADDR 0xa102004c -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_WIDTH 32 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_LENGTH 32 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_HW2ARM_REQ_INT_MASK_MASK 0x1 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_HW2ARM_REQ_INT_MASK_SHIFT 0 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_HW2ARM_REQ_INT_MASK_WIDTH 1 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_HW2ARM_REQ_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_CMDQM2IRQ_CMDQ_REQ_INT_MASK_REG_HW2ARM_REQ_INT_MASK_MIN_VAL 0x0 - -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_ADDR 0xa1020050 -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_ARM_RSP_BASE_ADDR_MASK 0xffffffff -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_ARM_RSP_BASE_ADDR_SHIFT 0 -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_ARM_RSP_BASE_ADDR_WIDTH 32 -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_ARM_RSP_BASE_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_ARM_RSP_BASE_ADDR_REG_ARM_RSP_BASE_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_ARM_RSP_CID_REG_ADDR 0xa1020054 -#define HIF_CMDQM_ARM_RSP_CID_REG_WIDTH 32 -#define HIF_CMDQM_ARM_RSP_CID_REG_LENGTH 32 -#define HIF_CMDQM_ARM_RSP_CID_REG_ARM_RSP_CID_MASK 0x7ff -#define HIF_CMDQM_ARM_RSP_CID_REG_ARM_RSP_CID_SHIFT 0 -#define HIF_CMDQM_ARM_RSP_CID_REG_ARM_RSP_CID_WIDTH 11 - -#define HIF_CMDQM_ARM_RSP_PID_REG_ADDR 0xa1020058 -#define HIF_CMDQM_ARM_RSP_PID_REG_WIDTH 32 -#define HIF_CMDQM_ARM_RSP_PID_REG_LENGTH 32 -#define HIF_CMDQM_ARM_RSP_PID_REG_ARM_RSP_PID_MASK 0x7ff -#define HIF_CMDQM_ARM_RSP_PID_REG_ARM_RSP_PID_SHIFT 0 -#define HIF_CMDQM_ARM_RSP_PID_REG_ARM_RSP_PID_WIDTH 11 -#define HIF_CMDQM_ARM_RSP_PID_REG_ARM_RSP_PID_MAX_VAL 0x7ff -#define HIF_CMDQM_ARM_RSP_PID_REG_ARM_RSP_PID_MIN_VAL 0x0 - -#define HIF_CMDQM_HW_ARM_RSP_PID_REG_ADDR 0xa102005c -#define HIF_CMDQM_HW_ARM_RSP_PID_REG_WIDTH 32 -#define HIF_CMDQM_HW_ARM_RSP_PID_REG_LENGTH 32 -#define HIF_CMDQM_HW_ARM_RSP_PID_REG_HW_ARM_RSP_PID_MASK 0x7ff -#define HIF_CMDQM_HW_ARM_RSP_PID_REG_HW_ARM_RSP_PID_SHIFT 0 -#define HIF_CMDQM_HW_ARM_RSP_PID_REG_HW_ARM_RSP_PID_WIDTH 11 - -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_ADDR 0xa1020060 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_DMA_RX_PCIE_PORT_MASK 0x1 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_DMA_RX_PCIE_PORT_SHIFT 0 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_DMA_RX_PCIE_PORT_WIDTH 1 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_DMA_RX_PCIE_PORT_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_RX_PCIE_PORT_REG_DMA_RX_PCIE_PORT_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_ADDR 0xa1020068 -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_DMA_RX_FUNCTION_ID_MASK 0x7ff -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_DMA_RX_FUNCTION_ID_SHIFT 0 -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_DMA_RX_FUNCTION_ID_WIDTH 11 -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_DMA_RX_FUNCTION_ID_MAX_VAL 0x7ff -#define HIF_CMDQM_DMA_RX_FUNCTION_ID_REG_DMA_RX_FUNCTION_ID_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_ADDR 0xa1020070 -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_DMA_RX_SRC_H_ADDR_MASK 0xffffffff -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_DMA_RX_SRC_H_ADDR_SHIFT 0 -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_DMA_RX_SRC_H_ADDR_WIDTH 32 -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_DMA_RX_SRC_H_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_DMA_RX_SRC_H_ADDR_REG_DMA_RX_SRC_H_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_ADDR 0xa1020078 -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_DMA_RX_SRC_L_ADDR_MASK 0xffffffff -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_DMA_RX_SRC_L_ADDR_SHIFT 0 -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_DMA_RX_SRC_L_ADDR_WIDTH 32 -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_DMA_RX_SRC_L_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_DMA_RX_SRC_L_ADDR_REG_DMA_RX_SRC_L_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_ADDR 0xa1020080 -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_DMA_RX_DST_ADDR_MASK 0xffffffff -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_DMA_RX_DST_ADDR_SHIFT 0 -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_DMA_RX_DST_ADDR_WIDTH 32 -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_DMA_RX_DST_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_DMA_RX_DST_ADDR_REG_DMA_RX_DST_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_ADDR 0xa1020088 -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_DMA_RX_DATA_LEN_MASK 0x1ffff -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_DMA_RX_DATA_LEN_SHIFT 0 -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_DMA_RX_DATA_LEN_WIDTH 17 -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_DMA_RX_DATA_LEN_MAX_VAL 0x1ffff -#define HIF_CMDQM_DMA_RX_DATA_LEN_REG_DMA_RX_DATA_LEN_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_RX_STRAT_REG_ADDR 0xa1020090 -#define HIF_CMDQM_DMA_RX_STRAT_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_STRAT_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_STRAT_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_STRAT_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_STRAT_REG_DMA_RX_STRAT_MASK 0x1 -#define HIF_CMDQM_DMA_RX_STRAT_REG_DMA_RX_STRAT_SHIFT 0 -#define HIF_CMDQM_DMA_RX_STRAT_REG_DMA_RX_STRAT_WIDTH 1 -#define HIF_CMDQM_DMA_RX_STRAT_REG_DMA_RX_STRAT_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_RX_STRAT_REG_DMA_RX_STRAT_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_RX_STATE_REG_ADDR 0xa1020098 -#define HIF_CMDQM_DMA_RX_STATE_REG_WIDTH 32 -#define HIF_CMDQM_DMA_RX_STATE_REG_LENGTH 32 -#define HIF_CMDQM_DMA_RX_STATE_REG_SIZE 2 -#define HIF_CMDQM_DMA_RX_STATE_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_RX_STATE_REG_DMA_RX_STATE_MASK 0x1 -#define HIF_CMDQM_DMA_RX_STATE_REG_DMA_RX_STATE_SHIFT 0 -#define HIF_CMDQM_DMA_RX_STATE_REG_DMA_RX_STATE_WIDTH 1 - -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_ADDR 0xa10200a0 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_WIDTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_LENGTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_SIZE 2 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_STRIDE 0x8 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_DMA_RX_MASK 0x1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_DMA_RX_SHIFT 0 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_REG_DMA_RX_WIDTH 1 - -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_ADDR 0xa10200a4 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_WIDTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_LENGTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_SIZE 2 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_STRIDE 0x8 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_DMA_RX_INT_MASK_MASK 0x1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_DMA_RX_INT_MASK_SHIFT 0 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_DMA_RX_INT_MASK_WIDTH 1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_DMA_RX_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_RX_INT_MASK_REG_DMA_RX_INT_MASK_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_ADDR 0xa10200b0 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_DMA_TX_PCIE_PORT_MASK 0x1 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_DMA_TX_PCIE_PORT_SHIFT 0 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_DMA_TX_PCIE_PORT_WIDTH 1 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_DMA_TX_PCIE_PORT_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_TX_PCIE_PORT_REG_DMA_TX_PCIE_PORT_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_ADDR 0xa10200b8 -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_DMA_TX_FUNCTION_ID_MASK 0x7ff -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_DMA_TX_FUNCTION_ID_SHIFT 0 -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_DMA_TX_FUNCTION_ID_WIDTH 11 -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_DMA_TX_FUNCTION_ID_MAX_VAL 0x7ff -#define HIF_CMDQM_DMA_TX_FUNCTION_ID_REG_DMA_TX_FUNCTION_ID_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_ADDR 0xa10200c0 -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_DMA_TX_SRC_ADDR_MASK 0xffffffff -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_DMA_TX_SRC_ADDR_SHIFT 0 -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_DMA_TX_SRC_ADDR_WIDTH 32 -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_DMA_TX_SRC_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_DMA_TX_SRC_ADDR_REG_DMA_TX_SRC_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_ADDR 0xa10200c8 -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_DMA_TX_DST_H_ADDR_MASK 0xffffffff -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_DMA_TX_DST_H_ADDR_SHIFT 0 -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_DMA_TX_DST_H_ADDR_WIDTH 32 -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_DMA_TX_DST_H_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_DMA_TX_DST_H_ADDR_REG_DMA_TX_DST_H_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_ADDR 0xa10200d0 -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_DMA_TX_DST_L_ADDR_MASK 0xffffffff -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_DMA_TX_DST_L_ADDR_SHIFT 0 -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_DMA_TX_DST_L_ADDR_WIDTH 32 -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_DMA_TX_DST_L_ADDR_MAX_VAL 0xffffffff -#define HIF_CMDQM_DMA_TX_DST_L_ADDR_REG_DMA_TX_DST_L_ADDR_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_ADDR 0xa10200d8 -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_DMA_TX_DATA_LEN_MASK 0x1ffff -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_DMA_TX_DATA_LEN_SHIFT 0 -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_DMA_TX_DATA_LEN_WIDTH 17 -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_DMA_TX_DATA_LEN_MAX_VAL 0x1ffff -#define HIF_CMDQM_DMA_TX_DATA_LEN_REG_DMA_TX_DATA_LEN_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_STRAT_REG_ADDR 0xa10200e0 -#define HIF_CMDQM_DMA_TX_STRAT_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_STRAT_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_STRAT_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_STRAT_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_STRAT_REG_DMA_TX_STRAT_MASK 0x1 -#define HIF_CMDQM_DMA_TX_STRAT_REG_DMA_TX_STRAT_SHIFT 0 -#define HIF_CMDQM_DMA_TX_STRAT_REG_DMA_TX_STRAT_WIDTH 1 -#define HIF_CMDQM_DMA_TX_STRAT_REG_DMA_TX_STRAT_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_TX_STRAT_REG_DMA_TX_STRAT_MIN_VAL 0x0 - -#define HIF_CMDQM_DMA_TX_STATE_REG_ADDR 0xa10200e8 -#define HIF_CMDQM_DMA_TX_STATE_REG_WIDTH 32 -#define HIF_CMDQM_DMA_TX_STATE_REG_LENGTH 32 -#define HIF_CMDQM_DMA_TX_STATE_REG_SIZE 2 -#define HIF_CMDQM_DMA_TX_STATE_REG_STRIDE 0x4 -#define HIF_CMDQM_DMA_TX_STATE_REG_DMA_TX_STATE_MASK 0x1 -#define HIF_CMDQM_DMA_TX_STATE_REG_DMA_TX_STATE_SHIFT 0 -#define HIF_CMDQM_DMA_TX_STATE_REG_DMA_TX_STATE_WIDTH 1 - -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_ADDR 0xa10200f0 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_WIDTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_LENGTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_SIZE 2 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_STRIDE 0x8 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_DMA_TX_MASK 0x1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_DMA_TX_SHIFT 0 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_REG_DMA_TX_WIDTH 1 - -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_ADDR 0xa10200f4 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_WIDTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_LENGTH 32 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_SIZE 2 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_STRIDE 0x8 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_DMA_TX_INT_MASK_MASK 0x1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_DMA_TX_INT_MASK_SHIFT 0 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_DMA_TX_INT_MASK_WIDTH 1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_DMA_TX_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_CMDQM2IRQ_DMA_TX_INT_MASK_REG_DMA_TX_INT_MASK_MIN_VAL 0x0 - -#define HIF_CMDQM_FIFO_INT_REG_ADDR 0xa102010c -#define HIF_CMDQM_FIFO_INT_REG_WIDTH 32 -#define HIF_CMDQM_FIFO_INT_REG_LENGTH 32 -#define HIF_CMDQM_FIFO_INT_REG_REQ_DATA_FIFO_OVFL_MASK 0x1 -#define HIF_CMDQM_FIFO_INT_REG_REQ_DATA_FIFO_OVFL_SHIFT 0 -#define HIF_CMDQM_FIFO_INT_REG_REQ_DATA_FIFO_OVFL_WIDTH 1 -#define HIF_CMDQM_FIFO_INT_REG_REQ_DATA_FIFO_UNFL_MASK 0x2 -#define HIF_CMDQM_FIFO_INT_REG_REQ_DATA_FIFO_UNFL_SHIFT 1 -#define HIF_CMDQM_FIFO_INT_REG_REQ_DATA_FIFO_UNFL_WIDTH 1 -#define HIF_CMDQM_FIFO_INT_REG_RSP_DATA_FIFO_OVFL_MASK 0x4 -#define HIF_CMDQM_FIFO_INT_REG_RSP_DATA_FIFO_OVFL_SHIFT 2 -#define HIF_CMDQM_FIFO_INT_REG_RSP_DATA_FIFO_OVFL_WIDTH 1 -#define HIF_CMDQM_FIFO_INT_REG_RSP_DATA_FIFO_UNFL_MASK 0x8 -#define HIF_CMDQM_FIFO_INT_REG_RSP_DATA_FIFO_UNFL_SHIFT 3 -#define HIF_CMDQM_FIFO_INT_REG_RSP_DATA_FIFO_UNFL_WIDTH 1 -#define HIF_CMDQM_FIFO_INT_REG_HOST_RSP_INT_FIFO_OVFL_MASK 0x10 -#define HIF_CMDQM_FIFO_INT_REG_HOST_RSP_INT_FIFO_OVFL_SHIFT 4 -#define HIF_CMDQM_FIFO_INT_REG_HOST_RSP_INT_FIFO_OVFL_WIDTH 1 -#define HIF_CMDQM_FIFO_INT_REG_HOST_RSP_INT_FIFO_UNFL_MASK 0x20 -#define HIF_CMDQM_FIFO_INT_REG_HOST_RSP_INT_FIFO_UNFL_SHIFT 5 -#define HIF_CMDQM_FIFO_INT_REG_HOST_RSP_INT_FIFO_UNFL_WIDTH 1 -#define HIF_CMDQM_FIFO_INT_REG_HOST_ERR_INT_FIFO_OVFL_MASK 0x40 -#define HIF_CMDQM_FIFO_INT_REG_HOST_ERR_INT_FIFO_OVFL_SHIFT 6 -#define HIF_CMDQM_FIFO_INT_REG_HOST_ERR_INT_FIFO_OVFL_WIDTH 1 -#define HIF_CMDQM_FIFO_INT_REG_HOST_ERR_INT_FIFO_UNFL_MASK 0x80 -#define HIF_CMDQM_FIFO_INT_REG_HOST_ERR_INT_FIFO_UNFL_SHIFT 7 -#define HIF_CMDQM_FIFO_INT_REG_HOST_ERR_INT_FIFO_UNFL_WIDTH 1 - -#define HIF_CMDQM_CREDIT_INT_REG_ADDR 0xa102011c -#define HIF_CMDQM_CREDIT_INT_REG_WIDTH 32 -#define HIF_CMDQM_CREDIT_INT_REG_LENGTH 32 -#define HIF_CMDQM_CREDIT_INT_REG_CREDIT_OVFL_MASK 0x1 -#define HIF_CMDQM_CREDIT_INT_REG_CREDIT_OVFL_SHIFT 0 -#define HIF_CMDQM_CREDIT_INT_REG_CREDIT_OVFL_WIDTH 1 -#define HIF_CMDQM_CREDIT_INT_REG_CREDIT_UNFL_MASK 0x2 -#define HIF_CMDQM_CREDIT_INT_REG_CREDIT_UNFL_SHIFT 1 -#define HIF_CMDQM_CREDIT_INT_REG_CREDIT_UNFL_WIDTH 1 - -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_ADDR 0xa1020124 -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_WIDTH 32 -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_LENGTH 32 -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_HOST_VF_RSP_INT_EN_MASK 0x1 -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_HOST_VF_RSP_INT_EN_SHIFT 0 -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_HOST_VF_RSP_INT_EN_WIDTH 1 -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_HOST_VF_RSP_INT_EN_MAX_VAL 0x1 -#define HIF_CMDQM_HOST_VF_RSP_INT_EN_REG_HOST_VF_RSP_INT_EN_MIN_VAL 0x0 - -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_ADDR 0xa1020130 -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_WIDTH 32 -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_LENGTH 32 -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_SIZE 4 -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_STRIDE 0x4 -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_HOST_VF_ERR_INFO_VF_MASK 0x7ff -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_HOST_VF_ERR_INFO_VF_SHIFT 0 -#define HIF_CMDQM_HOST_VF_ERR_INFO_0_REG_HOST_VF_ERR_INFO_VF_WIDTH 11 - -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_ADDR 0xa1020140 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_WIDTH 32 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_LENGTH 32 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_SIZE 4 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_STRIDE 0x4 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_HOST_VF_ERR_INFO_PID_MASK 0x7ff -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_HOST_VF_ERR_INFO_PID_SHIFT 0 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_HOST_VF_ERR_INFO_PID_WIDTH 11 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_HOST_VF_ERR_INFO_CID_MASK 0x3ff800 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_HOST_VF_ERR_INFO_CID_SHIFT 11 -#define HIF_CMDQM_HOST_VF_ERR_INFO_1_REG_HOST_VF_ERR_INFO_CID_WIDTH 11 - -#define HIF_CMDQM_ARM_ERR_INT_REG_ADDR 0xa1020150 -#define HIF_CMDQM_ARM_ERR_INT_REG_WIDTH 32 -#define HIF_CMDQM_ARM_ERR_INT_REG_LENGTH 32 -#define HIF_CMDQM_ARM_ERR_INT_REG_ARM_ERR_INT_MASK 0x1 -#define HIF_CMDQM_ARM_ERR_INT_REG_ARM_ERR_INT_SHIFT 0 -#define HIF_CMDQM_ARM_ERR_INT_REG_ARM_ERR_INT_WIDTH 1 - -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_ADDR 0xa1020154 -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_ARM_ERR_INT_INT_MASK_MASK 0x1 -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_ARM_ERR_INT_INT_MASK_SHIFT 0 -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_ARM_ERR_INT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_ARM_ERR_INT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_ARM_ERR_INT_MASK_REG_ARM_ERR_INT_INT_MASK_MIN_VAL 0x0 - -#define HIF_CMDQM_ARM_ERR_STS_REG_ADDR 0xa1020158 -#define HIF_CMDQM_ARM_ERR_STS_REG_WIDTH 32 -#define HIF_CMDQM_ARM_ERR_STS_REG_LENGTH 32 -#define HIF_CMDQM_ARM_ERR_STS_REG_ARM_ERR_STS_MASK 0x3f -#define HIF_CMDQM_ARM_ERR_STS_REG_ARM_ERR_STS_SHIFT 0 -#define HIF_CMDQM_ARM_ERR_STS_REG_ARM_ERR_STS_WIDTH 6 - -#define HIF_CMDQM_ARM_ERR_INFO_REG_ADDR 0xa102015c -#define HIF_CMDQM_ARM_ERR_INFO_REG_WIDTH 32 -#define HIF_CMDQM_ARM_ERR_INFO_REG_LENGTH 32 -#define HIF_CMDQM_ARM_ERR_INFO_REG_ARM_ERR_INFO_PID_MASK 0x7ff -#define HIF_CMDQM_ARM_ERR_INFO_REG_ARM_ERR_INFO_PID_SHIFT 0 -#define HIF_CMDQM_ARM_ERR_INFO_REG_ARM_ERR_INFO_PID_WIDTH 11 -#define HIF_CMDQM_ARM_ERR_INFO_REG_ARM_ERR_INFO_CID_MASK 0x3ff800 -#define HIF_CMDQM_ARM_ERR_INFO_REG_ARM_ERR_INFO_CID_SHIFT 11 -#define HIF_CMDQM_ARM_ERR_INFO_REG_ARM_ERR_INFO_CID_WIDTH 11 - -#define HIF_CMDQM_ARM_ERR_INFO_RSP_BASE_ADDR_REG_ADDR 0xa1020160 -#define HIF_CMDQM_ARM_ERR_INFO_RSP_BASE_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_ARM_ERR_INFO_RSP_BASE_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_ARM_ERR_INFO_RSP_BASE_ADDR_REG_ARM_ERR_INFO_RSP_BASE_ADDR_MASK 0xffffffff -#define HIF_CMDQM_ARM_ERR_INFO_RSP_BASE_ADDR_REG_ARM_ERR_INFO_RSP_BASE_ADDR_SHIFT 0 -#define HIF_CMDQM_ARM_ERR_INFO_RSP_BASE_ADDR_REG_ARM_ERR_INFO_RSP_BASE_ADDR_WIDTH 32 - -#define HIF_CMDQM_ARM_ERR_INFO_REQ_BASE_ADDR_REG_ADDR 0xa1020164 -#define HIF_CMDQM_ARM_ERR_INFO_REQ_BASE_ADDR_REG_WIDTH 32 -#define HIF_CMDQM_ARM_ERR_INFO_REQ_BASE_ADDR_REG_LENGTH 32 -#define HIF_CMDQM_ARM_ERR_INFO_REQ_BASE_ADDR_REG_ARM_ERR_INFO_REQ_BASE_ADDR_MASK 0xffffffff -#define HIF_CMDQM_ARM_ERR_INFO_REQ_BASE_ADDR_REG_ARM_ERR_INFO_REQ_BASE_ADDR_SHIFT 0 -#define HIF_CMDQM_ARM_ERR_INFO_REQ_BASE_ADDR_REG_ARM_ERR_INFO_REQ_BASE_ADDR_WIDTH 32 - -#define HIF_CMDQM_DMA_ERR_INT_REG_ADDR 0xa1020170 -#define HIF_CMDQM_DMA_ERR_INT_REG_WIDTH 32 -#define HIF_CMDQM_DMA_ERR_INT_REG_LENGTH 32 -#define HIF_CMDQM_DMA_ERR_INT_REG_SIZE 2 -#define HIF_CMDQM_DMA_ERR_INT_REG_STRIDE 0x8 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_RD_MASK 0x1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_RD_SHIFT 0 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_RD_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_WR_MASK 0x2 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_WR_SHIFT 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_WR_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_RECFG_MASK 0x4 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_RECFG_SHIFT 2 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_RECFG_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_CFG_LEN_MASK 0x8 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_CFG_LEN_SHIFT 3 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_RX_CFG_LEN_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_RD_MASK 0x10 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_RD_SHIFT 4 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_RD_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_WR_MASK 0x20 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_WR_SHIFT 5 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_WR_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_RECFG_MASK 0x40 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_RECFG_SHIFT 6 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_RECFG_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_CFG_LEN_MASK 0x80 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_CFG_LEN_SHIFT 7 -#define HIF_CMDQM_DMA_ERR_INT_REG_DMA_ERR_TX_CFG_LEN_WIDTH 1 - -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_ADDR 0xa1020174 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_SIZE 2 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_STRIDE 0x8 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RD_INT_MASK_MASK 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RD_INT_MASK_SHIFT 0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RD_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RD_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RD_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_WR_INT_MASK_MASK 0x2 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_WR_INT_MASK_SHIFT 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_WR_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_WR_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_WR_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RECFG_INT_MASK_MASK 0x4 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RECFG_INT_MASK_SHIFT 2 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RECFG_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RECFG_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_RECFG_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_CFG_LEN_INT_MASK_MASK 0x8 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_CFG_LEN_INT_MASK_SHIFT 3 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_CFG_LEN_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_CFG_LEN_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_RX_CFG_LEN_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RD_INT_MASK_MASK 0x10 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RD_INT_MASK_SHIFT 4 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RD_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RD_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RD_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_WR_INT_MASK_MASK 0x20 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_WR_INT_MASK_SHIFT 5 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_WR_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_WR_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_WR_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RECFG_INT_MASK_MASK 0x40 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RECFG_INT_MASK_SHIFT 6 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RECFG_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RECFG_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_RECFG_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_CFG_LEN_INT_MASK_MASK 0x80 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_CFG_LEN_INT_MASK_SHIFT 7 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_CFG_LEN_INT_MASK_WIDTH 1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_CFG_LEN_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_DMA_ERR_INT_MASK_REG_DMA_ERR_TX_CFG_LEN_INT_MASK_MIN_VAL 0x0 - -#define HIF_CMDQM_RD_ADPT_DEBUG_CNT_REG_ADDR 0xa1020180 -#define HIF_CMDQM_RD_ADPT_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_RD_ADPT_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_RD_ADPT_DEBUG_CNT_REG_RD_ADPT_CNT_MASK 0xffff -#define HIF_CMDQM_RD_ADPT_DEBUG_CNT_REG_RD_ADPT_CNT_SHIFT 0 -#define HIF_CMDQM_RD_ADPT_DEBUG_CNT_REG_RD_ADPT_CNT_WIDTH 16 - -#define HIF_CMDQM_RD_ADPT_ACK_DEBUG_CNT_REG_ADDR 0xa1020184 -#define HIF_CMDQM_RD_ADPT_ACK_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_RD_ADPT_ACK_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_RD_ADPT_ACK_DEBUG_CNT_REG_RD_ADPT_ACK_CNT_MASK 0xffff -#define HIF_CMDQM_RD_ADPT_ACK_DEBUG_CNT_REG_RD_ADPT_ACK_CNT_SHIFT 0 -#define HIF_CMDQM_RD_ADPT_ACK_DEBUG_CNT_REG_RD_ADPT_ACK_CNT_WIDTH 16 - -#define HIF_CMDQM_WR_ADPT_DEBUG_CNT_REG_ADDR 0xa1020188 -#define HIF_CMDQM_WR_ADPT_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_WR_ADPT_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_WR_ADPT_DEBUG_CNT_REG_WR_ADPT_CNT_MASK 0xffff -#define HIF_CMDQM_WR_ADPT_DEBUG_CNT_REG_WR_ADPT_CNT_SHIFT 0 -#define HIF_CMDQM_WR_ADPT_DEBUG_CNT_REG_WR_ADPT_CNT_WIDTH 16 - -#define HIF_CMDQM_WR_ADPT_ACK_DEBUG_CNT_REG_ADDR 0xa102018c -#define HIF_CMDQM_WR_ADPT_ACK_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_WR_ADPT_ACK_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_WR_ADPT_ACK_DEBUG_CNT_REG_WR_ADPT_ACK_CNT_MASK 0xffff -#define HIF_CMDQM_WR_ADPT_ACK_DEBUG_CNT_REG_WR_ADPT_ACK_CNT_SHIFT 0 -#define HIF_CMDQM_WR_ADPT_ACK_DEBUG_CNT_REG_WR_ADPT_ACK_CNT_WIDTH 16 - -#define HIF_CMDQM_RD_AXI_DEBUG_CNT_REG_ADDR 0xa1020190 -#define HIF_CMDQM_RD_AXI_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_RD_AXI_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_RD_AXI_DEBUG_CNT_REG_RD_AXI_CNT_MASK 0xffff -#define HIF_CMDQM_RD_AXI_DEBUG_CNT_REG_RD_AXI_CNT_SHIFT 0 -#define HIF_CMDQM_RD_AXI_DEBUG_CNT_REG_RD_AXI_CNT_WIDTH 16 - -#define HIF_CMDQM_RD_AXI_ACK_DEBUG_CNT_REG_ADDR 0xa1020194 -#define HIF_CMDQM_RD_AXI_ACK_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_RD_AXI_ACK_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_RD_AXI_ACK_DEBUG_CNT_REG_RD_AXI_ACK_CNT_MASK 0xffff -#define HIF_CMDQM_RD_AXI_ACK_DEBUG_CNT_REG_RD_AXI_ACK_CNT_SHIFT 0 -#define HIF_CMDQM_RD_AXI_ACK_DEBUG_CNT_REG_RD_AXI_ACK_CNT_WIDTH 16 - -#define HIF_CMDQM_WR_AXI_DEBUG_CNT_REG_ADDR 0xa1020198 -#define HIF_CMDQM_WR_AXI_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_WR_AXI_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_WR_AXI_DEBUG_CNT_REG_WR_AXI_CNT_MASK 0xffff -#define HIF_CMDQM_WR_AXI_DEBUG_CNT_REG_WR_AXI_CNT_SHIFT 0 -#define HIF_CMDQM_WR_AXI_DEBUG_CNT_REG_WR_AXI_CNT_WIDTH 16 - -#define HIF_CMDQM_WR_AXI_ACK_DEBUG_CNT_REG_ADDR 0xa102019c -#define HIF_CMDQM_WR_AXI_ACK_DEBUG_CNT_REG_WIDTH 32 -#define HIF_CMDQM_WR_AXI_ACK_DEBUG_CNT_REG_LENGTH 32 -#define HIF_CMDQM_WR_AXI_ACK_DEBUG_CNT_REG_WR_AXI_ACK_CNT_MASK 0xffff -#define HIF_CMDQM_WR_AXI_ACK_DEBUG_CNT_REG_WR_AXI_ACK_CNT_SHIFT 0 -#define HIF_CMDQM_WR_AXI_ACK_DEBUG_CNT_REG_WR_AXI_ACK_CNT_WIDTH 16 - -#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1022000 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_HOST_REQ_BUF_BASE_H_ADDR_MASK 0xffffffff -#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_HOST_REQ_BUF_BASE_H_ADDR_SHIFT 0 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_HOST_REQ_BUF_BASE_H_ADDR_WIDTH 32 - -#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1024000 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_HOST_REQ_BUF_BASE_L_ADDR_MASK 0xffffffff -#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_HOST_REQ_BUF_BASE_L_ADDR_SHIFT 0 -#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_HOST_REQ_BUF_BASE_L_ADDR_WIDTH 32 - -#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1026000 -#define HIF_CMDQM_HOST_REQ_PID_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_REQ_PID_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_REQ_PID_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_REQ_PID_MEM_HOST_REQ_PID_MASK 0x7ff -#define HIF_CMDQM_HOST_REQ_PID_MEM_HOST_REQ_PID_SHIFT 0 -#define HIF_CMDQM_HOST_REQ_PID_MEM_HOST_REQ_PID_WIDTH 11 - -#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1028000 -#define HIF_CMDQM_HOST_REQ_CID_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_REQ_CID_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_REQ_CID_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_REQ_CID_MEM_HOST_REQ_CID_MASK 0x7ff -#define HIF_CMDQM_HOST_REQ_CID_MEM_HOST_REQ_CID_SHIFT 0 -#define HIF_CMDQM_HOST_REQ_CID_MEM_HOST_REQ_CID_WIDTH 11 - -#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa102a000 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_HOST_RSP_BUF_BASE_H_ADDR_MASK 0xffffffff -#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_HOST_RSP_BUF_BASE_H_ADDR_SHIFT 0 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_HOST_RSP_BUF_BASE_H_ADDR_WIDTH 32 - -#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa102c000 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_HOST_RSP_BUF_BASE_L_ADDR_MASK 0xffffffff -#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_HOST_RSP_BUF_BASE_L_ADDR_SHIFT 0 -#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_HOST_RSP_BUF_BASE_L_ADDR_WIDTH 32 - -#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa102e000 -#define HIF_CMDQM_HOST_RSP_PID_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_RSP_PID_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_RSP_PID_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_RSP_PID_MEM_HOST_RSP_PID_MASK 0x7ff -#define HIF_CMDQM_HOST_RSP_PID_MEM_HOST_RSP_PID_SHIFT 0 -#define HIF_CMDQM_HOST_RSP_PID_MEM_HOST_RSP_PID_WIDTH 11 - -#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1030000 -#define HIF_CMDQM_HOST_RSP_CID_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_RSP_CID_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_RSP_CID_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_RSP_CID_MEM_HOST_RSP_CID_MASK 0x7ff -#define HIF_CMDQM_HOST_RSP_CID_MEM_HOST_RSP_CID_SHIFT 0 -#define HIF_CMDQM_HOST_RSP_CID_MEM_HOST_RSP_CID_WIDTH 11 - -#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1032000 -#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_WIDTH 32 -#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_LENGTH 32 -#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_DEPTH 1056 -#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_HOST_VF_ERR_STS_MASK 0xf -#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_HOST_VF_ERR_STS_SHIFT 0 -#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_HOST_VF_ERR_STS_WIDTH 4 - -#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa1034000 -#define HIF_CMDQM_VECTOR_ID_MEM_WIDTH 32 -#define HIF_CMDQM_VECTOR_ID_MEM_LENGTH 32 -#define HIF_CMDQM_VECTOR_ID_MEM_DEPTH 1056 -#define HIF_CMDQM_VECTOR_ID_MEM_VECTOR_ID_MASK 0xfff -#define HIF_CMDQM_VECTOR_ID_MEM_VECTOR_ID_SHIFT 0 -#define HIF_CMDQM_VECTOR_ID_MEM_VECTOR_ID_WIDTH 12 - -#define HIF_CMDQM_MEM_ERROR_INT_ADDR 0xa1036000 -#define HIF_CMDQM_MEM_ERROR_INT_WIDTH 32 -#define HIF_CMDQM_MEM_ERROR_INT_LENGTH 32 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_H_ADDR_MEM_SB_ERR_MASK 0x1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_H_ADDR_MEM_SB_ERR_SHIFT 0 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_H_ADDR_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_H_ADDR_MEM_DB_ERR_MASK 0x2 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_H_ADDR_MEM_DB_ERR_SHIFT 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_H_ADDR_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_L_ADDR_MEM_SB_ERR_MASK 0x4 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_L_ADDR_MEM_SB_ERR_SHIFT 2 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_L_ADDR_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_L_ADDR_MEM_DB_ERR_MASK 0x8 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_L_ADDR_MEM_DB_ERR_SHIFT 3 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_BUF_BASE_L_ADDR_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_PID_MEM_SB_ERR_MASK 0x10 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_PID_MEM_SB_ERR_SHIFT 4 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_PID_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_PID_MEM_DB_ERR_MASK 0x20 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_PID_MEM_DB_ERR_SHIFT 5 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_PID_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_CID_MEM_SB_ERR_MASK 0x40 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_CID_MEM_SB_ERR_SHIFT 6 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_CID_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_CID_MEM_DB_ERR_MASK 0x80 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_CID_MEM_DB_ERR_SHIFT 7 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_REQ_CID_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_H_ADDR_MEM_SB_ERR_MASK 0x100 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_H_ADDR_MEM_SB_ERR_SHIFT 8 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_H_ADDR_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_H_ADDR_MEM_DB_ERR_MASK 0x200 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_H_ADDR_MEM_DB_ERR_SHIFT 9 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_H_ADDR_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_L_ADDR_MEM_SB_ERR_MASK 0x400 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_L_ADDR_MEM_SB_ERR_SHIFT 10 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_L_ADDR_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_L_ADDR_MEM_DB_ERR_MASK 0x800 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_L_ADDR_MEM_DB_ERR_SHIFT 11 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_BUF_BASE_L_ADDR_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_PID_MEM_SB_ERR_MASK 0x1000 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_PID_MEM_SB_ERR_SHIFT 12 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_PID_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_PID_MEM_DB_ERR_MASK 0x2000 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_PID_MEM_DB_ERR_SHIFT 13 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_PID_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_CID_MEM_SB_ERR_MASK 0x4000 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_CID_MEM_SB_ERR_SHIFT 14 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_CID_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_CID_MEM_DB_ERR_MASK 0x8000 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_CID_MEM_DB_ERR_SHIFT 15 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_RSP_CID_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_VF_ERR_STS_MEM_SB_ERR_MASK 0x10000 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_VF_ERR_STS_MEM_SB_ERR_SHIFT 16 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_VF_ERR_STS_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_VF_ERR_STS_MEM_DB_ERR_MASK 0x20000 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_VF_ERR_STS_MEM_DB_ERR_SHIFT 17 -#define HIF_CMDQM_MEM_ERROR_INT_HOST_VF_ERR_STS_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_VECTOR_ID_MEM_SB_ERR_MASK 0x40000 -#define HIF_CMDQM_MEM_ERROR_INT_VECTOR_ID_MEM_SB_ERR_SHIFT 18 -#define HIF_CMDQM_MEM_ERROR_INT_VECTOR_ID_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_VECTOR_ID_MEM_DB_ERR_MASK 0x80000 -#define HIF_CMDQM_MEM_ERROR_INT_VECTOR_ID_MEM_DB_ERR_SHIFT 19 -#define HIF_CMDQM_MEM_ERROR_INT_VECTOR_ID_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_REQ_DATA_FIFO_MEM_SB_ERR_MASK 0x100000 -#define HIF_CMDQM_MEM_ERROR_INT_REQ_DATA_FIFO_MEM_SB_ERR_SHIFT 20 -#define HIF_CMDQM_MEM_ERROR_INT_REQ_DATA_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_REQ_DATA_FIFO_MEM_DB_ERR_MASK 0x200000 -#define HIF_CMDQM_MEM_ERROR_INT_REQ_DATA_FIFO_MEM_DB_ERR_SHIFT 21 -#define HIF_CMDQM_MEM_ERROR_INT_REQ_DATA_FIFO_MEM_DB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_RSP_DATA_FIFO_MEM_SB_ERR_MASK 0x400000 -#define HIF_CMDQM_MEM_ERROR_INT_RSP_DATA_FIFO_MEM_SB_ERR_SHIFT 22 -#define HIF_CMDQM_MEM_ERROR_INT_RSP_DATA_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_CMDQM_MEM_ERROR_INT_RSP_DATA_FIFO_MEM_DB_ERR_MASK 0x800000 -#define HIF_CMDQM_MEM_ERROR_INT_RSP_DATA_FIFO_MEM_DB_ERR_SHIFT 23 -#define HIF_CMDQM_MEM_ERROR_INT_RSP_DATA_FIFO_MEM_DB_ERR_WIDTH 1 - -#define HIF_CMDQM_MEM_INIT_CTRL_ADDR 0xa1036008 -#define HIF_CMDQM_MEM_INIT_CTRL_WIDTH 32 -#define HIF_CMDQM_MEM_INIT_CTRL_LENGTH 32 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_H_ADDR_MEM_INIT_RST_N_MASK 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_H_ADDR_MEM_INIT_RST_N_SHIFT 0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_H_ADDR_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_H_ADDR_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_H_ADDR_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_L_ADDR_MEM_INIT_RST_N_MASK 0x2 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_L_ADDR_MEM_INIT_RST_N_SHIFT 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_L_ADDR_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_L_ADDR_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_BUF_BASE_L_ADDR_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_PID_MEM_INIT_RST_N_MASK 0x4 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_PID_MEM_INIT_RST_N_SHIFT 2 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_PID_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_PID_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_PID_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_CID_MEM_INIT_RST_N_MASK 0x8 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_CID_MEM_INIT_RST_N_SHIFT 3 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_CID_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_CID_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_REQ_CID_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_H_ADDR_MEM_INIT_RST_N_MASK 0x10 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_H_ADDR_MEM_INIT_RST_N_SHIFT 4 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_H_ADDR_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_H_ADDR_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_H_ADDR_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_L_ADDR_MEM_INIT_RST_N_MASK 0x20 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_L_ADDR_MEM_INIT_RST_N_SHIFT 5 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_L_ADDR_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_L_ADDR_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_BUF_BASE_L_ADDR_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_PID_MEM_INIT_RST_N_MASK 0x40 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_PID_MEM_INIT_RST_N_SHIFT 6 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_PID_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_PID_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_PID_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_CID_MEM_INIT_RST_N_MASK 0x80 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_CID_MEM_INIT_RST_N_SHIFT 7 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_CID_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_CID_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_RSP_CID_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_VF_ERR_STS_MEM_INIT_RST_N_MASK 0x100 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_VF_ERR_STS_MEM_INIT_RST_N_SHIFT 8 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_VF_ERR_STS_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_VF_ERR_STS_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_HOST_VF_ERR_STS_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_CMDQM_MEM_INIT_CTRL_VECTOR_ID_MEM_INIT_RST_N_MASK 0x200 -#define HIF_CMDQM_MEM_INIT_CTRL_VECTOR_ID_MEM_INIT_RST_N_SHIFT 9 -#define HIF_CMDQM_MEM_INIT_CTRL_VECTOR_ID_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CMDQM_MEM_INIT_CTRL_VECTOR_ID_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CMDQM_MEM_INIT_CTRL_VECTOR_ID_MEM_INIT_RST_N_MIN_VAL 0x0 - -#define HIF_CMDQM_TIMEOUT_INT_REG_ADDR 0xa1036010 -#define HIF_CMDQM_TIMEOUT_INT_REG_WIDTH 32 -#define HIF_CMDQM_TIMEOUT_INT_REG_LENGTH 32 -#define HIF_CMDQM_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_MASK 0x1 -#define HIF_CMDQM_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_SHIFT 0 -#define HIF_CMDQM_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_ARM_ERR_STS_REG_TIMEOUT_MASK 0x2 -#define HIF_CMDQM_TIMEOUT_INT_REG_ARM_ERR_STS_REG_TIMEOUT_SHIFT 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_ARM_ERR_STS_REG_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_MASK 0x4 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_SHIFT 2 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_MASK 0x8 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_SHIFT 3 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_PID_MEM_TIMEOUT_MASK 0x10 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_PID_MEM_TIMEOUT_SHIFT 4 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_PID_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_CID_MEM_TIMEOUT_MASK 0x20 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_CID_MEM_TIMEOUT_SHIFT 5 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_REQ_CID_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_MASK 0x40 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_SHIFT 6 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_MASK 0x80 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_SHIFT 7 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_PID_MEM_TIMEOUT_MASK 0x100 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_PID_MEM_TIMEOUT_SHIFT 8 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_PID_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_CID_MEM_TIMEOUT_MASK 0x200 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_CID_MEM_TIMEOUT_SHIFT 9 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_RSP_CID_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_MASK 0x400 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_SHIFT 10 -#define HIF_CMDQM_TIMEOUT_INT_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_REG_VECTOR_ID_MEM_TIMEOUT_MASK 0x800 -#define HIF_CMDQM_TIMEOUT_INT_REG_VECTOR_ID_MEM_TIMEOUT_SHIFT 11 -#define HIF_CMDQM_TIMEOUT_INT_REG_VECTOR_ID_MEM_TIMEOUT_WIDTH 1 - -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_ADDR 0xa1036014 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_WIDTH 32 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_LENGTH 32 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MASK 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_SHIFT 0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_ARM_ERR_STS_REG_TIMEOUT_INT_MASK_MASK 0x2 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_ARM_ERR_STS_REG_TIMEOUT_INT_MASK_SHIFT 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_ARM_ERR_STS_REG_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_ARM_ERR_STS_REG_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_ARM_ERR_STS_REG_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_MASK 0x4 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_SHIFT 2 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_MASK 0x8 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_SHIFT 3 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_PID_MEM_TIMEOUT_INT_MASK_MASK 0x10 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_PID_MEM_TIMEOUT_INT_MASK_SHIFT 4 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_PID_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_PID_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_PID_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_CID_MEM_TIMEOUT_INT_MASK_MASK 0x20 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_CID_MEM_TIMEOUT_INT_MASK_SHIFT 5 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_CID_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_CID_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_REQ_CID_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_MASK 0x40 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_SHIFT 6 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_H_ADDR_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_MASK 0x80 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_SHIFT 7 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_BUF_BASE_L_ADDR_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_PID_MEM_TIMEOUT_INT_MASK_MASK 0x100 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_PID_MEM_TIMEOUT_INT_MASK_SHIFT 8 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_PID_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_PID_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_PID_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_CID_MEM_TIMEOUT_INT_MASK_MASK 0x200 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_CID_MEM_TIMEOUT_INT_MASK_SHIFT 9 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_CID_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_CID_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_RSP_CID_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_INT_MASK_MASK 0x400 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_INT_MASK_SHIFT 10 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_HOST_VF_ERR_STS_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_VECTOR_ID_MEM_TIMEOUT_INT_MASK_MASK 0x800 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_VECTOR_ID_MEM_TIMEOUT_INT_MASK_SHIFT 11 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_VECTOR_ID_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_VECTOR_ID_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CMDQM_TIMEOUT_INT_MASK_REG_VECTOR_ID_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 - -#define HIF_CMDQM_TIMEOUT_CFG_REG_ADDR 0xa1036018 -#define HIF_CMDQM_TIMEOUT_CFG_REG_WIDTH 32 -#define HIF_CMDQM_TIMEOUT_CFG_REG_LENGTH 32 -#define HIF_CMDQM_TIMEOUT_CFG_REG_TIMEOUT_PARA_MASK 0xffff -#define HIF_CMDQM_TIMEOUT_CFG_REG_TIMEOUT_PARA_SHIFT 0 -#define HIF_CMDQM_TIMEOUT_CFG_REG_TIMEOUT_PARA_WIDTH 16 -#define HIF_CMDQM_TIMEOUT_CFG_REG_TIMEOUT_PARA_MAX_VAL 0xffff -#define HIF_CMDQM_TIMEOUT_CFG_REG_TIMEOUT_PARA_MIN_VAL 0x0 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_cpm_csr_defines.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_cpm_csr_defines.h deleted file mode 100644 index 36a7399fb9ac191a5060a20289707698d36d0cf1..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_cpm_csr_defines.h +++ /dev/null @@ -1,598 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _HIF_CPM_CSR_DEFINES_H_ -#define _HIF_CPM_CSR_DEFINES_H_ - -#define HIF_CPM_SOFT_RESET_REG_ADDR 0xa0000000 -#define HIF_CPM_SOFT_RESET_REG_WIDTH 32 -#define HIF_CPM_SOFT_RESET_REG_LENGTH 32 -#define HIF_CPM_SOFT_RESET_REG_SOFT_RESET_MASK 0x1 -#define HIF_CPM_SOFT_RESET_REG_SOFT_RESET_SHIFT 0 -#define HIF_CPM_SOFT_RESET_REG_SOFT_RESET_WIDTH 1 -#define HIF_CPM_SOFT_RESET_REG_SOFT_RESET_MAX_VAL 0x1 -#define HIF_CPM_SOFT_RESET_REG_SOFT_RESET_MIN_VAL 0x0 - -#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000010 -#define HIF_CPM_CHIP_VERSION_H_REG_WIDTH 32 -#define HIF_CPM_CHIP_VERSION_H_REG_LENGTH 32 -#define HIF_CPM_CHIP_VERSION_H_REG_CHIP_VERSION_H_MASK 0xffffffff -#define HIF_CPM_CHIP_VERSION_H_REG_CHIP_VERSION_H_SHIFT 0 -#define HIF_CPM_CHIP_VERSION_H_REG_CHIP_VERSION_H_WIDTH 32 - -#define HIF_CPM_CHIP_VERSION_M_REG_ADDR 0xa0000014 -#define HIF_CPM_CHIP_VERSION_M_REG_WIDTH 32 -#define HIF_CPM_CHIP_VERSION_M_REG_LENGTH 32 -#define HIF_CPM_CHIP_VERSION_M_REG_CHIP_VERSION_M_MASK 0xffffffff -#define HIF_CPM_CHIP_VERSION_M_REG_CHIP_VERSION_M_SHIFT 0 -#define HIF_CPM_CHIP_VERSION_M_REG_CHIP_VERSION_M_WIDTH 32 - -#define HIF_CPM_CHIP_VERSION_L_REG_ADDR 0xa0000018 -#define HIF_CPM_CHIP_VERSION_L_REG_WIDTH 32 -#define HIF_CPM_CHIP_VERSION_L_REG_LENGTH 32 -#define HIF_CPM_CHIP_VERSION_L_REG_CHIP_VERSION_L_MASK 0xffffffff -#define HIF_CPM_CHIP_VERSION_L_REG_CHIP_VERSION_L_SHIFT 0 -#define HIF_CPM_CHIP_VERSION_L_REG_CHIP_VERSION_L_WIDTH 32 - -#define HIF_CPM_SIMULATION_MODE_REG_ADDR 0xa000001c -#define HIF_CPM_SIMULATION_MODE_REG_WIDTH 32 -#define HIF_CPM_SIMULATION_MODE_REG_LENGTH 32 -#define HIF_CPM_SIMULATION_MODE_REG_SIMULATION_MODE_MASK 0x1 -#define HIF_CPM_SIMULATION_MODE_REG_SIMULATION_MODE_SHIFT 0 -#define HIF_CPM_SIMULATION_MODE_REG_SIMULATION_MODE_WIDTH 1 - -#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000020 -#define HIF_CPM_IDA_CMD_REG_WIDTH 32 -#define HIF_CPM_IDA_CMD_REG_LENGTH 32 -#define HIF_CPM_IDA_CMD_REG_IDA_IDX_MASK 0x1f -#define HIF_CPM_IDA_CMD_REG_IDA_IDX_SHIFT 0 -#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 -#define HIF_CPM_IDA_CMD_REG_IDA_IDX_MAX_VAL 0x1f -#define HIF_CPM_IDA_CMD_REG_IDA_IDX_MIN_VAL 0x0 -#define HIF_CPM_IDA_CMD_REG_IDA_LEN_MASK 0x1e0 -#define HIF_CPM_IDA_CMD_REG_IDA_LEN_SHIFT 5 -#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 -#define HIF_CPM_IDA_CMD_REG_IDA_LEN_MAX_VAL 0xf -#define HIF_CPM_IDA_CMD_REG_IDA_LEN_MIN_VAL 0x0 -#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_MASK 0x200 -#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_SHIFT 9 -#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 -#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_MAX_VAL 0x1 -#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_MIN_VAL 0x0 - -#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000080 -#define HIF_CPM_IDA_ADDR_REG_WIDTH 32 -#define HIF_CPM_IDA_ADDR_REG_LENGTH 32 -#define HIF_CPM_IDA_ADDR_REG_SIZE 32 -#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x4 -#define HIF_CPM_IDA_ADDR_REG_IDA_ADDR_MASK 0xffffffff -#define HIF_CPM_IDA_ADDR_REG_IDA_ADDR_SHIFT 0 -#define HIF_CPM_IDA_ADDR_REG_IDA_ADDR_WIDTH 32 -#define HIF_CPM_IDA_ADDR_REG_IDA_ADDR_MAX_VAL 0xffffffff -#define HIF_CPM_IDA_ADDR_REG_IDA_ADDR_MIN_VAL 0x0 - -#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000100 -#define HIF_CPM_IDA_BUSY_REG_WIDTH 32 -#define HIF_CPM_IDA_BUSY_REG_LENGTH 32 -#define HIF_CPM_IDA_BUSY_REG_IDA_BUSY_MASK 0xffffffff -#define HIF_CPM_IDA_BUSY_REG_IDA_BUSY_SHIFT 0 -#define HIF_CPM_IDA_BUSY_REG_IDA_BUSY_WIDTH 32 - -#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000104 -#define HIF_CPM_LOCK_GET_REG_WIDTH 32 -#define HIF_CPM_LOCK_GET_REG_LENGTH 32 -#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f -#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_SHIFT 0 -#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_WIDTH 5 -#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_MASK 0x20 -#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 -#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_WIDTH 1 - -#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000108 -#define HIF_CPM_LOCK_PUT_REG_WIDTH 32 -#define HIF_CPM_LOCK_PUT_REG_LENGTH 32 -#define HIF_CPM_LOCK_PUT_REG_LOCK_IDX_MASK 0x1f -#define HIF_CPM_LOCK_PUT_REG_LOCK_IDX_SHIFT 0 -#define HIF_CPM_LOCK_PUT_REG_LOCK_IDX_WIDTH 5 -#define HIF_CPM_LOCK_PUT_REG_LOCK_IDX_MAX_VAL 0x1f -#define HIF_CPM_LOCK_PUT_REG_LOCK_IDX_MIN_VAL 0x0 - -#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa000010c -#define HIF_CPM_LOCK_AVAIL_REG_WIDTH 32 -#define HIF_CPM_LOCK_AVAIL_REG_LENGTH 32 -#define HIF_CPM_LOCK_AVAIL_REG_LOCK_AVAIL_MASK 0xffffffff -#define HIF_CPM_LOCK_AVAIL_REG_LOCK_AVAIL_SHIFT 0 -#define HIF_CPM_LOCK_AVAIL_REG_LOCK_AVAIL_WIDTH 32 - -#define HIF_CPM_BUSY_SWITCH_REG_ADDR 0xa0000110 -#define HIF_CPM_BUSY_SWITCH_REG_WIDTH 32 -#define HIF_CPM_BUSY_SWITCH_REG_LENGTH 32 -#define HIF_CPM_BUSY_SWITCH_REG_BUSY_SWITCH_MASK 0x1 -#define HIF_CPM_BUSY_SWITCH_REG_BUSY_SWITCH_SHIFT 0 -#define HIF_CPM_BUSY_SWITCH_REG_BUSY_SWITCH_WIDTH 1 -#define HIF_CPM_BUSY_SWITCH_REG_BUSY_SWITCH_MAX_VAL 0x1 -#define HIF_CPM_BUSY_SWITCH_REG_BUSY_SWITCH_MIN_VAL 0x0 - -#define HIF_CPM_AXIS_IDA_WR_REQ_CNT_REG_ADDR 0xa0000114 -#define HIF_CPM_AXIS_IDA_WR_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_WR_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_WR_REQ_CNT_REG_AXIS_IDA_WR_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_WR_REQ_CNT_REG_AXIS_IDA_WR_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIS_IDA_WR_REQ_CNT_REG_AXIS_IDA_WR_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_DA_WR_REQ_CNT_REG_ADDR 0xa0000118 -#define HIF_CPM_AXIS_DA_WR_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_WR_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_WR_REQ_CNT_REG_AXIS_DA_WR_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_WR_REQ_CNT_REG_AXIS_DA_WR_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIS_DA_WR_REQ_CNT_REG_AXIS_DA_WR_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_WR_RSP_CNT_REG_ADDR 0xa000011c -#define HIF_CPM_AXIS_IDA_WR_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_CNT_REG_AXIS_IDA_WR_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_WR_RSP_CNT_REG_AXIS_IDA_WR_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIS_IDA_WR_RSP_CNT_REG_AXIS_IDA_WR_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_DA_WR_RSP_CNT_REG_ADDR 0xa0000120 -#define HIF_CPM_AXIS_DA_WR_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_CNT_REG_AXIS_DA_WR_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_WR_RSP_CNT_REG_AXIS_DA_WR_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIS_DA_WR_RSP_CNT_REG_AXIS_DA_WR_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_CNT_REG_ADDR 0xa0000124 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_CNT_REG_AXIS_IDA_WR_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_CNT_REG_AXIS_IDA_WR_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_CNT_REG_AXIS_IDA_WR_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_CNT_REG_ADDR 0xa0000128 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_CNT_REG_AXIS_DA_WR_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_CNT_REG_AXIS_DA_WR_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_CNT_REG_AXIS_DA_WR_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_WR_ERROR_ADDR_REG_ADDR 0xa000012c -#define HIF_CPM_AXIS_IDA_WR_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_WR_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_WR_ERROR_ADDR_REG_AXIS_IDA_WR_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_WR_ERROR_ADDR_REG_AXIS_IDA_WR_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIS_IDA_WR_ERROR_ADDR_REG_AXIS_IDA_WR_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIS_DA_WR_ERROR_ADDR_REG_ADDR 0xa0000130 -#define HIF_CPM_AXIS_DA_WR_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_WR_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_WR_ERROR_ADDR_REG_AXIS_DA_WR_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_WR_ERROR_ADDR_REG_AXIS_DA_WR_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIS_DA_WR_ERROR_ADDR_REG_AXIS_DA_WR_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_REG_ADDR 0xa0000134 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_REG_AXIS_IDA_WR_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_REG_AXIS_IDA_WR_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_REG_AXIS_IDA_WR_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_ADDR 0xa0000138 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_AXIS_IDA_WR_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_AXIS_IDA_WR_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_AXIS_IDA_WR_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_AXIS_IDA_WR_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIS_IDA_WR_RSP_ERR_INT_MASK_REG_AXIS_IDA_WR_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_REG_ADDR 0xa000013c -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_REG_AXIS_DA_WR_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_REG_AXIS_DA_WR_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_REG_AXIS_DA_WR_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_ADDR 0xa0000140 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_AXIS_DA_WR_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_AXIS_DA_WR_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_AXIS_DA_WR_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_AXIS_DA_WR_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIS_DA_WR_RSP_ERR_INT_MASK_REG_AXIS_DA_WR_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_AXIS_IDA_RD_REQ_CNT_REG_ADDR 0xa0000144 -#define HIF_CPM_AXIS_IDA_RD_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_RD_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_RD_REQ_CNT_REG_AXIS_IDA_RD_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_RD_REQ_CNT_REG_AXIS_IDA_RD_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIS_IDA_RD_REQ_CNT_REG_AXIS_IDA_RD_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_DA_RD_REQ_CNT_REG_ADDR 0xa0000148 -#define HIF_CPM_AXIS_DA_RD_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_RD_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_RD_REQ_CNT_REG_AXIS_DA_RD_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_RD_REQ_CNT_REG_AXIS_DA_RD_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIS_DA_RD_REQ_CNT_REG_AXIS_DA_RD_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_RD_RSP_CNT_REG_ADDR 0xa000014c -#define HIF_CPM_AXIS_IDA_RD_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_CNT_REG_AXIS_IDA_RD_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_RD_RSP_CNT_REG_AXIS_IDA_RD_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIS_IDA_RD_RSP_CNT_REG_AXIS_IDA_RD_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_DA_RD_RSP_CNT_REG_ADDR 0xa0000150 -#define HIF_CPM_AXIS_DA_RD_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_CNT_REG_AXIS_DA_RD_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_RD_RSP_CNT_REG_AXIS_DA_RD_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIS_DA_RD_RSP_CNT_REG_AXIS_DA_RD_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_CNT_REG_ADDR 0xa0000154 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_CNT_REG_AXIS_IDA_RD_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_CNT_REG_AXIS_IDA_RD_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_CNT_REG_AXIS_IDA_RD_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_CNT_REG_ADDR 0xa0000158 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_CNT_REG_AXIS_DA_RD_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_CNT_REG_AXIS_DA_RD_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_CNT_REG_AXIS_DA_RD_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_RD_ERROR_ADDR_REG_ADDR 0xa000015c -#define HIF_CPM_AXIS_IDA_RD_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_RD_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_RD_ERROR_ADDR_REG_AXIS_IDA_RD_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIS_IDA_RD_ERROR_ADDR_REG_AXIS_IDA_RD_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIS_IDA_RD_ERROR_ADDR_REG_AXIS_IDA_RD_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIS_DA_RD_ERROR_ADDR_REG_ADDR 0xa0000160 -#define HIF_CPM_AXIS_DA_RD_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_RD_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_RD_ERROR_ADDR_REG_AXIS_DA_RD_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIS_DA_RD_ERROR_ADDR_REG_AXIS_DA_RD_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIS_DA_RD_ERROR_ADDR_REG_AXIS_DA_RD_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_REG_ADDR 0xa0000164 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_REG_AXIS_IDA_RD_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_REG_AXIS_IDA_RD_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_REG_AXIS_IDA_RD_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_ADDR 0xa0000168 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_AXIS_IDA_RD_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_AXIS_IDA_RD_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_AXIS_IDA_RD_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_AXIS_IDA_RD_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIS_IDA_RD_RSP_ERR_INT_MASK_REG_AXIS_IDA_RD_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_REG_ADDR 0xa000016c -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_REG_AXIS_DA_RD_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_REG_AXIS_DA_RD_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_REG_AXIS_DA_RD_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_ADDR 0xa0000170 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_AXIS_DA_RD_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_AXIS_DA_RD_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_AXIS_DA_RD_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_AXIS_DA_RD_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIS_DA_RD_RSP_ERR_INT_MASK_REG_AXIS_DA_RD_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_AXIM_IDA_WR_REQ_CNT_REG_ADDR 0xa0000174 -#define HIF_CPM_AXIM_IDA_WR_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_WR_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_WR_REQ_CNT_REG_AXIM_IDA_WR_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_WR_REQ_CNT_REG_AXIM_IDA_WR_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIM_IDA_WR_REQ_CNT_REG_AXIM_IDA_WR_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_DA_WR_REQ_CNT_REG_ADDR 0xa0000178 -#define HIF_CPM_AXIM_DA_WR_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_WR_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_WR_REQ_CNT_REG_AXIM_DA_WR_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_WR_REQ_CNT_REG_AXIM_DA_WR_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIM_DA_WR_REQ_CNT_REG_AXIM_DA_WR_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_WR_RSP_CNT_REG_ADDR 0xa000017c -#define HIF_CPM_AXIM_IDA_WR_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_CNT_REG_AXIM_IDA_WR_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_WR_RSP_CNT_REG_AXIM_IDA_WR_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIM_IDA_WR_RSP_CNT_REG_AXIM_IDA_WR_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_DA_WR_RSP_CNT_REG_ADDR 0xa0000180 -#define HIF_CPM_AXIM_DA_WR_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_CNT_REG_AXIM_DA_WR_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_WR_RSP_CNT_REG_AXIM_DA_WR_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIM_DA_WR_RSP_CNT_REG_AXIM_DA_WR_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_CNT_REG_ADDR 0xa0000184 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_CNT_REG_AXIM_IDA_WR_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_CNT_REG_AXIM_IDA_WR_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_CNT_REG_AXIM_IDA_WR_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_CNT_REG_ADDR 0xa0000188 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_CNT_REG_AXIM_DA_WR_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_CNT_REG_AXIM_DA_WR_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_CNT_REG_AXIM_DA_WR_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_WR_ERROR_ADDR_REG_ADDR 0xa000018c -#define HIF_CPM_AXIM_IDA_WR_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_WR_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_WR_ERROR_ADDR_REG_AXIM_IDA_WR_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_WR_ERROR_ADDR_REG_AXIM_IDA_WR_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIM_IDA_WR_ERROR_ADDR_REG_AXIM_IDA_WR_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIM_DA_WR_ERROR_ADDR_REG_ADDR 0xa0000190 -#define HIF_CPM_AXIM_DA_WR_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_WR_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_WR_ERROR_ADDR_REG_AXIM_DA_WR_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_WR_ERROR_ADDR_REG_AXIM_DA_WR_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIM_DA_WR_ERROR_ADDR_REG_AXIM_DA_WR_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_REG_ADDR 0xa0000194 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_REG_AXIM_IDA_WR_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_REG_AXIM_IDA_WR_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_REG_AXIM_IDA_WR_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_ADDR 0xa0000198 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_AXIM_IDA_WR_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_AXIM_IDA_WR_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_AXIM_IDA_WR_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_AXIM_IDA_WR_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIM_IDA_WR_RSP_ERR_INT_MASK_REG_AXIM_IDA_WR_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_REG_ADDR 0xa000019c -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_REG_AXIM_DA_WR_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_REG_AXIM_DA_WR_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_REG_AXIM_DA_WR_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_ADDR 0xa00001a0 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_AXIM_DA_WR_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_AXIM_DA_WR_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_AXIM_DA_WR_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_AXIM_DA_WR_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIM_DA_WR_RSP_ERR_INT_MASK_REG_AXIM_DA_WR_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_AXIM_IDA_RD_REQ_CNT_REG_ADDR 0xa00001a4 -#define HIF_CPM_AXIM_IDA_RD_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_RD_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_RD_REQ_CNT_REG_AXIM_IDA_RD_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_RD_REQ_CNT_REG_AXIM_IDA_RD_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIM_IDA_RD_REQ_CNT_REG_AXIM_IDA_RD_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_DA_RD_REQ_CNT_REG_ADDR 0xa00001a8 -#define HIF_CPM_AXIM_DA_RD_REQ_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_RD_REQ_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_RD_REQ_CNT_REG_AXIM_DA_RD_REQ_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_RD_REQ_CNT_REG_AXIM_DA_RD_REQ_CNT_SHIFT 0 -#define HIF_CPM_AXIM_DA_RD_REQ_CNT_REG_AXIM_DA_RD_REQ_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_RD_RSP_CNT_REG_ADDR 0xa00001ac -#define HIF_CPM_AXIM_IDA_RD_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_CNT_REG_AXIM_IDA_RD_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_RD_RSP_CNT_REG_AXIM_IDA_RD_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIM_IDA_RD_RSP_CNT_REG_AXIM_IDA_RD_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_DA_RD_RSP_CNT_REG_ADDR 0xa00001b0 -#define HIF_CPM_AXIM_DA_RD_RSP_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_CNT_REG_AXIM_DA_RD_RSP_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_RD_RSP_CNT_REG_AXIM_DA_RD_RSP_CNT_SHIFT 0 -#define HIF_CPM_AXIM_DA_RD_RSP_CNT_REG_AXIM_DA_RD_RSP_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_CNT_REG_ADDR 0xa00001b4 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_CNT_REG_AXIM_IDA_RD_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_CNT_REG_AXIM_IDA_RD_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_CNT_REG_AXIM_IDA_RD_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_CNT_REG_ADDR 0xa00001b8 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_CNT_REG_AXIM_DA_RD_RSP_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_CNT_REG_AXIM_DA_RD_RSP_ERR_CNT_SHIFT 0 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_CNT_REG_AXIM_DA_RD_RSP_ERR_CNT_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_RD_ERROR_ADDR_REG_ADDR 0xa00001bc -#define HIF_CPM_AXIM_IDA_RD_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_RD_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_RD_ERROR_ADDR_REG_AXIM_IDA_RD_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIM_IDA_RD_ERROR_ADDR_REG_AXIM_IDA_RD_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIM_IDA_RD_ERROR_ADDR_REG_AXIM_IDA_RD_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIM_DA_RD_ERROR_ADDR_REG_ADDR 0xa00001c0 -#define HIF_CPM_AXIM_DA_RD_ERROR_ADDR_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_RD_ERROR_ADDR_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_RD_ERROR_ADDR_REG_AXIM_DA_RD_ERROR_ADDR_MASK 0xffffffff -#define HIF_CPM_AXIM_DA_RD_ERROR_ADDR_REG_AXIM_DA_RD_ERROR_ADDR_SHIFT 0 -#define HIF_CPM_AXIM_DA_RD_ERROR_ADDR_REG_AXIM_DA_RD_ERROR_ADDR_WIDTH 32 - -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_REG_ADDR 0xa00001c4 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_REG_AXIM_IDA_RD_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_REG_AXIM_IDA_RD_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_REG_AXIM_IDA_RD_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_ADDR 0xa00001c8 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_AXIM_IDA_RD_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_AXIM_IDA_RD_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_AXIM_IDA_RD_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_AXIM_IDA_RD_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIM_IDA_RD_RSP_ERR_INT_MASK_REG_AXIM_IDA_RD_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_REG_ADDR 0xa00001cc -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_REG_AXIM_DA_RD_RSP_ERR_MASK 0x1 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_REG_AXIM_DA_RD_RSP_ERR_SHIFT 0 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_REG_AXIM_DA_RD_RSP_ERR_WIDTH 1 - -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_ADDR 0xa00001d0 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_AXIM_DA_RD_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_AXIM_DA_RD_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_AXIM_DA_RD_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_AXIM_DA_RD_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_AXIM_DA_RD_RSP_ERR_INT_MASK_REG_AXIM_DA_RD_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_STRB_ERR_REG_ADDR 0xa00001d4 -#define HIF_CPM_STRB_ERR_REG_WIDTH 32 -#define HIF_CPM_STRB_ERR_REG_LENGTH 32 -#define HIF_CPM_STRB_ERR_REG_STRB_ERR_MASK 0x1 -#define HIF_CPM_STRB_ERR_REG_STRB_ERR_SHIFT 0 -#define HIF_CPM_STRB_ERR_REG_STRB_ERR_WIDTH 1 - -#define HIF_CPM_STRB_ERR_INT_MASK_REG_ADDR 0xa00001d8 -#define HIF_CPM_STRB_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_STRB_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_STRB_ERR_INT_MASK_REG_STRB_ERR_INT_MASK_MASK 0x1 -#define HIF_CPM_STRB_ERR_INT_MASK_REG_STRB_ERR_INT_MASK_SHIFT 0 -#define HIF_CPM_STRB_ERR_INT_MASK_REG_STRB_ERR_INT_MASK_WIDTH 1 -#define HIF_CPM_STRB_ERR_INT_MASK_REG_STRB_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_STRB_ERR_INT_MASK_REG_STRB_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_IDA_OPCMD_BUSY_IDX_ERR_CNT_REG_ADDR 0xa00001dc -#define HIF_CPM_IDA_OPCMD_BUSY_IDX_ERR_CNT_REG_WIDTH 32 -#define HIF_CPM_IDA_OPCMD_BUSY_IDX_ERR_CNT_REG_LENGTH 32 -#define HIF_CPM_IDA_OPCMD_BUSY_IDX_ERR_CNT_REG_IDA_OPCMD_BUSY_IDX_ERR_CNT_MASK 0xffffffff -#define HIF_CPM_IDA_OPCMD_BUSY_IDX_ERR_CNT_REG_IDA_OPCMD_BUSY_IDX_ERR_CNT_SHIFT 0 -#define HIF_CPM_IDA_OPCMD_BUSY_IDX_ERR_CNT_REG_IDA_OPCMD_BUSY_IDX_ERR_CNT_WIDTH 32 - -#define HIF_CPM_CSR_MON_ADDR_REG_ADDR 0xa00001e0 -#define HIF_CPM_CSR_MON_ADDR_REG_WIDTH 32 -#define HIF_CPM_CSR_MON_ADDR_REG_LENGTH 32 -#define HIF_CPM_CSR_MON_ADDR_REG_CSR_MON_ADDR_MASK 0xffffffff -#define HIF_CPM_CSR_MON_ADDR_REG_CSR_MON_ADDR_SHIFT 0 -#define HIF_CPM_CSR_MON_ADDR_REG_CSR_MON_ADDR_WIDTH 32 -#define HIF_CPM_CSR_MON_ADDR_REG_CSR_MON_ADDR_MAX_VAL 0xffffffff -#define HIF_CPM_CSR_MON_ADDR_REG_CSR_MON_ADDR_MIN_VAL 0x0 - -#define HIF_CPM_CSR_MON_WR_CNT_REG_ADDR 0xa00001e4 -#define HIF_CPM_CSR_MON_WR_CNT_REG_WIDTH 32 -#define HIF_CPM_CSR_MON_WR_CNT_REG_LENGTH 32 -#define HIF_CPM_CSR_MON_WR_CNT_REG_CSR_MON_WR_CNT_MASK 0xff -#define HIF_CPM_CSR_MON_WR_CNT_REG_CSR_MON_WR_CNT_SHIFT 0 -#define HIF_CPM_CSR_MON_WR_CNT_REG_CSR_MON_WR_CNT_WIDTH 8 - -#define HIF_CPM_CSR_MON_WR_DATA_REG_ADDR 0xa00001e8 -#define HIF_CPM_CSR_MON_WR_DATA_REG_WIDTH 32 -#define HIF_CPM_CSR_MON_WR_DATA_REG_LENGTH 32 -#define HIF_CPM_CSR_MON_WR_DATA_REG_CSR_MON_WR_DATA_MASK 0xffffffff -#define HIF_CPM_CSR_MON_WR_DATA_REG_CSR_MON_WR_DATA_SHIFT 0 -#define HIF_CPM_CSR_MON_WR_DATA_REG_CSR_MON_WR_DATA_WIDTH 32 - -#define HIF_CPM_CSR_MON_RD_CNT_REG_ADDR 0xa00001ec -#define HIF_CPM_CSR_MON_RD_CNT_REG_WIDTH 32 -#define HIF_CPM_CSR_MON_RD_CNT_REG_LENGTH 32 -#define HIF_CPM_CSR_MON_RD_CNT_REG_CSR_MON_RD_CNT_MASK 0xff -#define HIF_CPM_CSR_MON_RD_CNT_REG_CSR_MON_RD_CNT_SHIFT 0 -#define HIF_CPM_CSR_MON_RD_CNT_REG_CSR_MON_RD_CNT_WIDTH 8 - -#define HIF_CPM_CHIP_HOTFIX_NUM_REG_ADDR 0xa00001f0 -#define HIF_CPM_CHIP_HOTFIX_NUM_REG_WIDTH 32 -#define HIF_CPM_CHIP_HOTFIX_NUM_REG_LENGTH 32 -#define HIF_CPM_CHIP_HOTFIX_NUM_REG_CHIP_HOTFIX_NUM_MASK 0xffffffff -#define HIF_CPM_CHIP_HOTFIX_NUM_REG_CHIP_HOTFIX_NUM_SHIFT 0 -#define HIF_CPM_CHIP_HOTFIX_NUM_REG_CHIP_HOTFIX_NUM_WIDTH 32 - -#define HIF_CPM_CHIP_FEATURE_FLAG_REG_ADDR 0xa00001f4 -#define HIF_CPM_CHIP_FEATURE_FLAG_REG_WIDTH 32 -#define HIF_CPM_CHIP_FEATURE_FLAG_REG_LENGTH 32 -#define HIF_CPM_CHIP_FEATURE_FLAG_REG_CHIP_FEATURE_FLAG_MASK 0xffffffff -#define HIF_CPM_CHIP_FEATURE_FLAG_REG_CHIP_FEATURE_FLAG_SHIFT 0 -#define HIF_CPM_CHIP_FEATURE_FLAG_REG_CHIP_FEATURE_FLAG_WIDTH 32 - -#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 -#define HIF_CPM_IDA_DATA_MEM_WIDTH 32 -#define HIF_CPM_IDA_DATA_MEM_LENGTH 32 -#define HIF_CPM_IDA_DATA_MEM_DEPTH 512 -#define HIF_CPM_IDA_DATA_MEM_DATA_MASK 0xffffffff -#define HIF_CPM_IDA_DATA_MEM_DATA_SHIFT 0 -#define HIF_CPM_IDA_DATA_MEM_DATA_WIDTH 32 - -#define HIF_CPM_MEM_ERROR_INT_ADDR 0xa0001000 -#define HIF_CPM_MEM_ERROR_INT_WIDTH 32 -#define HIF_CPM_MEM_ERROR_INT_LENGTH 32 -#define HIF_CPM_MEM_ERROR_INT_IDA_DATA_MEM_SB_ERR_MASK 0x1 -#define HIF_CPM_MEM_ERROR_INT_IDA_DATA_MEM_SB_ERR_SHIFT 0 -#define HIF_CPM_MEM_ERROR_INT_IDA_DATA_MEM_SB_ERR_WIDTH 1 -#define HIF_CPM_MEM_ERROR_INT_IDA_DATA_MEM_DB_ERR_MASK 0x2 -#define HIF_CPM_MEM_ERROR_INT_IDA_DATA_MEM_DB_ERR_SHIFT 1 -#define HIF_CPM_MEM_ERROR_INT_IDA_DATA_MEM_DB_ERR_WIDTH 1 - -#define HIF_CPM_MEM_INIT_CTRL_ADDR 0xa0001008 -#define HIF_CPM_MEM_INIT_CTRL_WIDTH 32 -#define HIF_CPM_MEM_INIT_CTRL_LENGTH 32 -#define HIF_CPM_MEM_INIT_CTRL_IDA_DATA_MEM_INIT_RST_N_MASK 0x1 -#define HIF_CPM_MEM_INIT_CTRL_IDA_DATA_MEM_INIT_RST_N_SHIFT 0 -#define HIF_CPM_MEM_INIT_CTRL_IDA_DATA_MEM_INIT_RST_N_WIDTH 1 -#define HIF_CPM_MEM_INIT_CTRL_IDA_DATA_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_CPM_MEM_INIT_CTRL_IDA_DATA_MEM_INIT_RST_N_MIN_VAL 0x0 - -#define HIF_CPM_TIMEOUT_INT_REG_ADDR 0xa0001010 -#define HIF_CPM_TIMEOUT_INT_REG_WIDTH 32 -#define HIF_CPM_TIMEOUT_INT_REG_LENGTH 32 -#define HIF_CPM_TIMEOUT_INT_REG_LOCK_GET_REG_TIMEOUT_MASK 0x1 -#define HIF_CPM_TIMEOUT_INT_REG_LOCK_GET_REG_TIMEOUT_SHIFT 0 -#define HIF_CPM_TIMEOUT_INT_REG_LOCK_GET_REG_TIMEOUT_WIDTH 1 -#define HIF_CPM_TIMEOUT_INT_REG_IDA_DATA_MEM_TIMEOUT_MASK 0x2 -#define HIF_CPM_TIMEOUT_INT_REG_IDA_DATA_MEM_TIMEOUT_SHIFT 1 -#define HIF_CPM_TIMEOUT_INT_REG_IDA_DATA_MEM_TIMEOUT_WIDTH 1 - -#define HIF_CPM_TIMEOUT_INT_MASK_REG_ADDR 0xa0001014 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_WIDTH 32 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_LENGTH 32 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_LOCK_GET_REG_TIMEOUT_INT_MASK_MASK 0x1 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_LOCK_GET_REG_TIMEOUT_INT_MASK_SHIFT 0 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_LOCK_GET_REG_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_LOCK_GET_REG_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_LOCK_GET_REG_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_IDA_DATA_MEM_TIMEOUT_INT_MASK_MASK 0x2 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_IDA_DATA_MEM_TIMEOUT_INT_MASK_SHIFT 1 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_IDA_DATA_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_IDA_DATA_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_CPM_TIMEOUT_INT_MASK_REG_IDA_DATA_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 - -#define HIF_CPM_TIMEOUT_CFG_REG_ADDR 0xa0001018 -#define HIF_CPM_TIMEOUT_CFG_REG_WIDTH 32 -#define HIF_CPM_TIMEOUT_CFG_REG_LENGTH 32 -#define HIF_CPM_TIMEOUT_CFG_REG_TIMEOUT_PARA_MASK 0xffff -#define HIF_CPM_TIMEOUT_CFG_REG_TIMEOUT_PARA_SHIFT 0 -#define HIF_CPM_TIMEOUT_CFG_REG_TIMEOUT_PARA_WIDTH 16 -#define HIF_CPM_TIMEOUT_CFG_REG_TIMEOUT_PARA_MAX_VAL 0xffff -#define HIF_CPM_TIMEOUT_CFG_REG_TIMEOUT_PARA_MIN_VAL 0x0 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_irq_csr_defines.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_irq_csr_defines.h deleted file mode 100644 index efdea561a28605c3645b9a9b6e98dcfcf272be90..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_irq_csr_defines.h +++ /dev/null @@ -1,1778 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _HIF_IRQ_CSR_DEFINES_H_ -#define _HIF_IRQ_CSR_DEFINES_H_ - -#define HIF_IRQ_SOFT_RESET_REG_ADDR 0xa1100000 -#define HIF_IRQ_SOFT_RESET_REG_WIDTH 32 -#define HIF_IRQ_SOFT_RESET_REG_LENGTH 32 -#define HIF_IRQ_SOFT_RESET_REG_SOFT_RESET_MASK 0x1 -#define HIF_IRQ_SOFT_RESET_REG_SOFT_RESET_SHIFT 0 -#define HIF_IRQ_SOFT_RESET_REG_SOFT_RESET_WIDTH 1 -#define HIF_IRQ_SOFT_RESET_REG_SOFT_RESET_MAX_VAL 0x1 -#define HIF_IRQ_SOFT_RESET_REG_SOFT_RESET_MIN_VAL 0x0 - -#define HIF_IRQ_SCRATCH_PAD_REG_ADDR 0xa110000c -#define HIF_IRQ_SCRATCH_PAD_REG_WIDTH 32 -#define HIF_IRQ_SCRATCH_PAD_REG_LENGTH 32 -#define HIF_IRQ_SCRATCH_PAD_REG_SCRATCH_PAD_MASK 0xffffffff -#define HIF_IRQ_SCRATCH_PAD_REG_SCRATCH_PAD_SHIFT 0 -#define HIF_IRQ_SCRATCH_PAD_REG_SCRATCH_PAD_WIDTH 32 -#define HIF_IRQ_SCRATCH_PAD_REG_SCRATCH_PAD_MAX_VAL 0xffffffff -#define HIF_IRQ_SCRATCH_PAD_REG_SCRATCH_PAD_MIN_VAL 0x0 - -#define HIF_IRQ_CSR_ERR_FLAG_REG_ADDR 0xa1100010 -#define HIF_IRQ_CSR_ERR_FLAG_REG_WIDTH 32 -#define HIF_IRQ_CSR_ERR_FLAG_REG_LENGTH 32 -#define HIF_IRQ_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_MASK 0x1 -#define HIF_IRQ_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_SHIFT 0 -#define HIF_IRQ_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_WIDTH 1 - -#define HIF_IRQ_CSR_ERR_ADDR_REG_ADDR 0xa1100014 -#define HIF_IRQ_CSR_ERR_ADDR_REG_WIDTH 32 -#define HIF_IRQ_CSR_ERR_ADDR_REG_LENGTH 32 -#define HIF_IRQ_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_MASK 0xffffffff -#define HIF_IRQ_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_SHIFT 0 -#define HIF_IRQ_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_WIDTH 32 - -#define HIF_IRQ_CSR_ERR_LEN_REG_ADDR 0xa1100018 -#define HIF_IRQ_CSR_ERR_LEN_REG_WIDTH 32 -#define HIF_IRQ_CSR_ERR_LEN_REG_LENGTH 32 -#define HIF_IRQ_CSR_ERR_LEN_REG_CSR_ERR_LEN_MASK 0x3ff -#define HIF_IRQ_CSR_ERR_LEN_REG_CSR_ERR_LEN_SHIFT 0 -#define HIF_IRQ_CSR_ERR_LEN_REG_CSR_ERR_LEN_WIDTH 10 - -#define HIF_IRQ_CSR_ERR_TYPE_REG_ADDR 0xa110001c -#define HIF_IRQ_CSR_ERR_TYPE_REG_WIDTH 32 -#define HIF_IRQ_CSR_ERR_TYPE_REG_LENGTH 32 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_MASK 0x1 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_SHIFT 0 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_WIDTH 1 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_MASK 0x6 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_SHIFT 1 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_WIDTH 2 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_MASK 0x8 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_SHIFT 3 -#define HIF_IRQ_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_WIDTH 1 - -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_ADDR 0xa1100020 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_WIDTH 32 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_LENGTH 32 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_EN_SHIFT 12 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_EN_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MSIX_REG_TOP_INT_SUM_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_ADDR 0xa1100024 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_WIDTH 32 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_LENGTH 32 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_EN_SHIFT 12 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_EN_WIDTH 1 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_PCIE2HIF_PCIE0_MSG_INT_MSIX_REG_PCIE2HIF_PCIE0_MSG_INT_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_ADDR 0xa1100028 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_WIDTH 32 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_LENGTH 32 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_EN_SHIFT 12 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_EN_WIDTH 1 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_PCIE2HIF_PCIE1_MSG_INT_MSIX_REG_PCIE2HIF_PCIE1_MSG_INT_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_ADDR 0xa1100030 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_WIDTH 32 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_LENGTH 32 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_SIZE 2 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_STRIDE 0x4 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_EN_SHIFT 12 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_EN_WIDTH 1 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_MSIX_REG_PIO2IRQ_PIO_REQ_INT_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_ADDR 0xa1100038 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_WIDTH 32 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_LENGTH 32 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_EN_SHIFT 12 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_EN_WIDTH 1 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_REG_CMDQM2IRQ_CMDQ_REQ_INT_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_ADDR 0xa1100040 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_WIDTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_LENGTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_SIZE 2 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_STRIDE 0x4 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_EN_SHIFT 12 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_EN_WIDTH 1 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_MSIX_REG_CMDQM2IRQ_DMA_RX_INT_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_ADDR 0xa1100048 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_WIDTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_LENGTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_SIZE 2 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_STRIDE 0x4 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_EN_SHIFT 12 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_EN_WIDTH 1 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_MSIX_REG_CMDQM2IRQ_DMA_TX_INT_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0xa1100050 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_WIDTH 32 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_LENGTH 32 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_VECTOR_ID_MAX_VAL \ - 0xfff -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_EN_MASK 0x1000 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_EN_SHIFT 12 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_EN_WIDTH 1 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_EN_MAX_VAL 0x1 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_TBL2IRQ_TBL_RD_DONE_INT_MSIX_EN_MIN_VAL 0x0 - -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_ADDR 0xa1100054 -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_WIDTH 32 -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_LENGTH 32 -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_BUBBLE_INSERT_TH_MASK 0xffff -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_BUBBLE_INSERT_TH_SHIFT 0 -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_BUBBLE_INSERT_TH_WIDTH 16 -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_BUBBLE_INSERT_TH_MAX_VAL 0xffff -#define HIF_IRQ_BUBBLE_INSERT_TH_REG_BUBBLE_INSERT_TH_MIN_VAL 0x0 - -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_ADDR 0xa1100058 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_WIDTH 32 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_LENGTH 32 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_PCIE0_INT_SUM_STS_MASK 0x1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_PCIE0_INT_SUM_STS_SHIFT 0 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_PCIE0_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_PCIE1_INT_SUM_STS_MASK 0x2 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_PCIE1_INT_SUM_STS_SHIFT 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_PCIE1_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_NIF_INT_SUM_STS_MASK 0x4 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_NIF_INT_SUM_STS_SHIFT 2 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_NIF_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_EPP_INT_SUM_STS_MASK 0x8 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_EPP_INT_SUM_STS_SHIFT 3 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_EPP_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_IPP_INT_SUM_STS_MASK 0x10 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_IPP_INT_SUM_STS_SHIFT 4 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_IPP_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_TPE_INT_SUM_STS_MASK 0x20 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_TPE_INT_SUM_STS_SHIFT 5 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_TPE_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_TM_INT_SUM_STS_MASK 0x40 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_TM_INT_SUM_STS_SHIFT 6 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_TM_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_HIF_INT_SUM_STS_MASK 0x80 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_HIF_INT_SUM_STS_SHIFT 7 -#define HIF_IRQ_TOP_INT_SUM_STATUS_REG_TOP_HIF_INT_SUM_STS_WIDTH 1 - -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_ADDR 0xa110005c -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_WIDTH 32 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_LENGTH 32 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE0_INT_SUM_MASK_MASK 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE0_INT_SUM_MASK_SHIFT 0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE0_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE0_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE0_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE1_INT_SUM_MASK_MASK 0x2 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE1_INT_SUM_MASK_SHIFT 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE1_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE1_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_PCIE1_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_NIF_INT_SUM_MASK_MASK 0x4 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_NIF_INT_SUM_MASK_SHIFT 2 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_NIF_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_NIF_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_NIF_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_EPP_INT_SUM_MASK_MASK 0x8 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_EPP_INT_SUM_MASK_SHIFT 3 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_EPP_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_EPP_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_EPP_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_IPP_INT_SUM_MASK_MASK 0x10 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_IPP_INT_SUM_MASK_SHIFT 4 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_IPP_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_IPP_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_IPP_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TPE_INT_SUM_MASK_MASK 0x20 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TPE_INT_SUM_MASK_SHIFT 5 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TPE_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TPE_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TPE_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TM_INT_SUM_MASK_MASK 0x40 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TM_INT_SUM_MASK_SHIFT 6 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_TM_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_HIF_INT_SUM_MASK_MASK 0x80 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_HIF_INT_SUM_MASK_SHIFT 7 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_HIF_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_HIF_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TOP_INT_SUM_MASK_REG_TOP_HIF_INT_SUM_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_ADDR 0xa1100060 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_WIDTH 32 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_LENGTH 32 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_CPM_INT_SUM_STS_MASK 0x1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_CPM_INT_SUM_STS_SHIFT 0 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_CPM_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_PIO_INT_SUM_STS_MASK 0x2 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_PIO_INT_SUM_STS_SHIFT 1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_PIO_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_ADPT_INT_SUM_STS_MASK 0x4 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_ADPT_INT_SUM_STS_SHIFT 2 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_ADPT_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_TBL_INT_SUM_STS_MASK 0x8 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_TBL_INT_SUM_STS_SHIFT 3 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_TBL_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_CMDQM_INT_SUM_STS_MASK 0x10 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_CMDQM_INT_SUM_STS_SHIFT 4 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_CMDQM_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_IRQ_INT_SUM_STS_MASK 0x20 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_IRQ_INT_SUM_STS_SHIFT 5 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_IRQ_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_NOC_INT_SUM_STS_MASK 0x40 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_NOC_INT_SUM_STS_SHIFT 6 -#define HIF_IRQ_HIF_INT_SUM_STATUS_REG_HIF_NOC_INT_SUM_STS_WIDTH 1 - -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_ADDR 0xa1100064 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_WIDTH 32 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_LENGTH 32 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CPM_INT_SUM_MASK_MASK 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CPM_INT_SUM_MASK_SHIFT 0 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CPM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CPM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CPM_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_PIO_INT_SUM_MASK_MASK 0x2 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_PIO_INT_SUM_MASK_SHIFT 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_PIO_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_PIO_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_PIO_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_ADPT_INT_SUM_MASK_MASK 0x4 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_ADPT_INT_SUM_MASK_SHIFT 2 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_ADPT_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_ADPT_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_ADPT_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_TBL_INT_SUM_MASK_MASK 0x8 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_TBL_INT_SUM_MASK_SHIFT 3 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_TBL_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_TBL_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_TBL_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CMDQM_INT_SUM_MASK_MASK 0x10 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CMDQM_INT_SUM_MASK_SHIFT 4 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CMDQM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CMDQM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_CMDQM_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_IRQ_INT_SUM_MASK_MASK 0x20 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_IRQ_INT_SUM_MASK_SHIFT 5 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_IRQ_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_IRQ_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_IRQ_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_NOC_INT_SUM_MASK_MASK 0x40 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_NOC_INT_SUM_MASK_SHIFT 6 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_NOC_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_NOC_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_HIF_INT_SUM_MASK_REG_HIF_NOC_INT_SUM_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_ADDR 0xa1100068 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_WIDTH 32 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_LENGTH 32 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_MTR_INT_SUM_STS_MASK 0x1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_MTR_INT_SUM_STS_SHIFT 0 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_MTR_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_QMU_INT_SUM_STS_MASK 0x2 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_QMU_INT_SUM_STS_SHIFT 1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_QMU_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_FRG_INT_SUM_STS_MASK 0x4 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_FRG_INT_SUM_STS_SHIFT 2 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_FRG_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_PDB_INT_SUM_STS_MASK 0x8 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_PDB_INT_SUM_STS_SHIFT 3 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_PDB_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_PRA_INT_SUM_STS_MASK 0x10 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_PRA_INT_SUM_STS_SHIFT 4 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_PRA_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_SCA_INT_SUM_STS_MASK 0x20 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_SCA_INT_SUM_STS_SHIFT 5 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_SCA_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_OCA_INT_SUM_STS_MASK 0x40 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_OCA_INT_SUM_STS_SHIFT 6 -#define HIF_IRQ_TM_INT_SUM_STATUS_REG_TM_OCA_INT_SUM_STS_WIDTH 1 - -#define HIF_IRQ_TM_INT_SUM_MASK_REG_ADDR 0xa110006c -#define HIF_IRQ_TM_INT_SUM_MASK_REG_WIDTH 32 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_LENGTH 32 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_MTR_INT_SUM_MASK_MASK 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_MTR_INT_SUM_MASK_SHIFT 0 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_MTR_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_MTR_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_MTR_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_QMU_INT_SUM_MASK_MASK 0x2 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_QMU_INT_SUM_MASK_SHIFT 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_QMU_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_QMU_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_QMU_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_FRG_INT_SUM_MASK_MASK 0x4 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_FRG_INT_SUM_MASK_SHIFT 2 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_FRG_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_FRG_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_FRG_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PDB_INT_SUM_MASK_MASK 0x8 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PDB_INT_SUM_MASK_SHIFT 3 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PDB_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PDB_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PDB_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PRA_INT_SUM_MASK_MASK 0x10 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PRA_INT_SUM_MASK_SHIFT 4 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PRA_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PRA_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_PRA_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_SCA_INT_SUM_MASK_MASK 0x20 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_SCA_INT_SUM_MASK_SHIFT 5 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_SCA_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_SCA_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_SCA_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_OCA_INT_SUM_MASK_MASK 0x40 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_OCA_INT_SUM_MASK_SHIFT 6 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_OCA_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_OCA_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TM_INT_SUM_MASK_REG_TM_OCA_INT_SUM_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_ADDR 0xa1100070 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_WIDTH 32 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_LENGTH 32 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MMC_INT_SUM_STS_MASK 0x1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MMC_INT_SUM_STS_SHIFT 0 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MMC_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_QPM_INT_SUM_STS_MASK 0x2 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_QPM_INT_SUM_STS_SHIFT 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_QPM_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_SV_INT_SUM_STS_MASK 0x4 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_SV_INT_SUM_STS_SHIFT 2 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_SV_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MER_INT_SUM_STS_MASK 0x8 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MER_INT_SUM_STS_SHIFT 3 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MER_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_TOP_INT_SUM_STS_MASK 0x10 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_TOP_INT_SUM_STS_SHIFT 4 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_TOP_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_CRDT_INT_SUM_STS_MASK 0x20 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_CRDT_INT_SUM_STS_SHIFT 5 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_CRDT_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MET_INT_SUM_STS_MASK 0x40 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MET_INT_SUM_STS_SHIFT 6 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MET_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_SHAP_INT_SUM_STS_MASK 0x80 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_SHAP_INT_SUM_STS_SHIFT 7 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_SHAP_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MA_INT_SUM_STS_MASK 0x100 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MA_INT_SUM_STS_SHIFT 8 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_MA_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_CEM_INT_SUM_STS_MASK 0x200 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_CEM_INT_SUM_STS_SHIFT 9 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_CEM_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_PET_INT_SUM_STS_MASK 0x400 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_PET_INT_SUM_STS_SHIFT 10 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_PET_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_PG_INT_SUM_STS_MASK 0x800 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_PG_INT_SUM_STS_SHIFT 11 -#define HIF_IRQ_TPE_INT_SUM_STATUS_REG_TPE_PG_INT_SUM_STS_WIDTH 1 - -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_ADDR 0xa1100074 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_WIDTH 32 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_LENGTH 32 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MMC_INT_SUM_MASK_MASK 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MMC_INT_SUM_MASK_SHIFT 0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MMC_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MMC_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MMC_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_QPM_INT_SUM_MASK_MASK 0x2 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_QPM_INT_SUM_MASK_SHIFT 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_QPM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_QPM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_QPM_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SV_INT_SUM_MASK_MASK 0x4 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SV_INT_SUM_MASK_SHIFT 2 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SV_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SV_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SV_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MER_INT_SUM_MASK_MASK 0x8 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MER_INT_SUM_MASK_SHIFT 3 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MER_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MER_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MER_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_TOP_INT_SUM_MASK_MASK 0x10 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_TOP_INT_SUM_MASK_SHIFT 4 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_TOP_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_TOP_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_TOP_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CRDT_INT_SUM_MASK_MASK 0x20 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CRDT_INT_SUM_MASK_SHIFT 5 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CRDT_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CRDT_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CRDT_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MET_INT_SUM_MASK_MASK 0x40 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MET_INT_SUM_MASK_SHIFT 6 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MET_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MET_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MET_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SHAP_INT_SUM_MASK_MASK 0x80 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SHAP_INT_SUM_MASK_SHIFT 7 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SHAP_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SHAP_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_SHAP_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MA_INT_SUM_MASK_MASK 0x100 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MA_INT_SUM_MASK_SHIFT 8 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MA_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MA_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_MA_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CEM_INT_SUM_MASK_MASK 0x200 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CEM_INT_SUM_MASK_SHIFT 9 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CEM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CEM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_CEM_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PET_INT_SUM_MASK_MASK 0x400 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PET_INT_SUM_MASK_SHIFT 10 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PET_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PET_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PET_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PG_INT_SUM_MASK_MASK 0x800 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PG_INT_SUM_MASK_SHIFT 11 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PG_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PG_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TPE_INT_SUM_MASK_REG_TPE_PG_INT_SUM_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_ADDR 0xa1100078 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_WIDTH 32 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_LENGTH 32 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_EM_INT_SUM_STS_MASK 0x1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_EM_INT_SUM_STS_SHIFT 0 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_EM_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_STAT_INT_SUM_STS_MASK 0x2 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_STAT_INT_SUM_STS_SHIFT 1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_STAT_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_CLSF_DMA_INT_SUM_STS_MASK 0x4 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_CLSF_DMA_INT_SUM_STS_SHIFT 2 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_CLSF_DMA_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_DIR_INT_SUM_STS_MASK 0x8 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_DIR_INT_SUM_STS_SHIFT 3 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_DIR_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_TCAM_INT_SUM_STS_MASK 0x10 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_TCAM_INT_SUM_STS_SHIFT 4 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_TCAM_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_CLSF_CTRL_INT_SUM_STS_MASK 0x20 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_CLSF_CTRL_INT_SUM_STS_SHIFT 5 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_CLSF_CTRL_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_PRS_INT_SUM_STS_MASK 0x40 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_PRS_INT_SUM_STS_SHIFT 6 -#define HIF_IRQ_IPP_INT_SUM_STATUS_REG_IPP_PRS_INT_SUM_STS_WIDTH 1 - -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_ADDR 0xa110007c -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_WIDTH 32 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_LENGTH 32 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_EM_INT_SUM_MASK_MASK 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_EM_INT_SUM_MASK_SHIFT 0 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_EM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_EM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_EM_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_STAT_INT_SUM_MASK_MASK 0x2 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_STAT_INT_SUM_MASK_SHIFT 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_STAT_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_STAT_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_STAT_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_DMA_INT_SUM_MASK_MASK 0x4 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_DMA_INT_SUM_MASK_SHIFT 2 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_DMA_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_DMA_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_DMA_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_DIR_INT_SUM_MASK_MASK 0x8 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_DIR_INT_SUM_MASK_SHIFT 3 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_DIR_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_DIR_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_DIR_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_TCAM_INT_SUM_MASK_MASK 0x10 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_TCAM_INT_SUM_MASK_SHIFT 4 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_TCAM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_TCAM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_TCAM_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_CTRL_INT_SUM_MASK_MASK 0x20 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_CTRL_INT_SUM_MASK_SHIFT 5 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_CTRL_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_CTRL_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_CLSF_CTRL_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_PRS_INT_SUM_MASK_MASK 0x40 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_PRS_INT_SUM_MASK_SHIFT 6 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_PRS_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_PRS_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_IPP_INT_SUM_MASK_REG_IPP_PRS_INT_SUM_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_EPP_INT_SUM_STATUS_REG_ADDR 0xa1100080 -#define HIF_IRQ_EPP_INT_SUM_STATUS_REG_WIDTH 32 -#define HIF_IRQ_EPP_INT_SUM_STATUS_REG_LENGTH 32 -#define HIF_IRQ_EPP_INT_SUM_STATUS_REG_EPP_RWE_INT_SUM_STS_MASK 0x1 -#define HIF_IRQ_EPP_INT_SUM_STATUS_REG_EPP_RWE_INT_SUM_STS_SHIFT 0 -#define HIF_IRQ_EPP_INT_SUM_STATUS_REG_EPP_RWE_INT_SUM_STS_WIDTH 1 - -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_ADDR 0xa1100084 -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_WIDTH 32 -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_LENGTH 32 -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_EPP_RWE_INT_SUM_MASK_MASK 0x1 -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_EPP_RWE_INT_SUM_MASK_SHIFT 0 -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_EPP_RWE_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_EPP_RWE_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_EPP_INT_SUM_MASK_REG_EPP_RWE_INT_SUM_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_ADDR 0xa1100088 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_WIDTH 32 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_LENGTH 32 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NDP_INT_SUM_STS_MASK 0x1 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NDP_INT_SUM_STS_SHIFT 0 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NDP_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NEA_INT_SUM_STS_MASK 0x2 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NEA_INT_SUM_STS_SHIFT 1 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NEA_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NEM_INT_SUM_STS_MASK 0x4 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NEM_INT_SUM_STS_SHIFT 2 -#define HIF_IRQ_NIF_INT_SUM_STATUS_REG_NIF_NEM_INT_SUM_STS_WIDTH 1 - -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_ADDR 0xa110008c -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_WIDTH 32 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_LENGTH 32 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NDP_INT_SUM_MASK_MASK 0x1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NDP_INT_SUM_MASK_SHIFT 0 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NDP_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NDP_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NDP_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEA_INT_SUM_MASK_MASK 0x2 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEA_INT_SUM_MASK_SHIFT 1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEA_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEA_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEA_INT_SUM_MASK_MIN_VAL 0x0 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEM_INT_SUM_MASK_MASK 0x4 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEM_INT_SUM_MASK_SHIFT 2 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEM_INT_SUM_MASK_WIDTH 1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEM_INT_SUM_MASK_MAX_VAL 0x1 -#define HIF_IRQ_NIF_INT_SUM_MASK_REG_NIF_NEM_INT_SUM_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_ADDR 0xa1100090 -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_WIDTH 32 -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_LENGTH 32 -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_MASK 0x7ff -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_SHIFT 0 -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_WIDTH 11 -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_MAX_VAL 0x7ff -#define HIF_IRQ_PCIE1_GLB_FUNC_ID_BASE_REG_PCIE1_GLB_FUNC_ID_BASE_MIN_VAL 0x0 - -#define HIF_IRQ_INT_DB_REG_ADDR 0xa1100094 -#define HIF_IRQ_INT_DB_REG_WIDTH 32 -#define HIF_IRQ_INT_DB_REG_LENGTH 32 -#define HIF_IRQ_INT_DB_REG_INT_DB_VECTOR_ID_MASK 0xfff -#define HIF_IRQ_INT_DB_REG_INT_DB_VECTOR_ID_SHIFT 0 -#define HIF_IRQ_INT_DB_REG_INT_DB_VECTOR_ID_WIDTH 12 -#define HIF_IRQ_INT_DB_REG_INT_DB_VECTOR_ID_MAX_VAL 0xfff -#define HIF_IRQ_INT_DB_REG_INT_DB_VECTOR_ID_MIN_VAL 0x0 -#define HIF_IRQ_INT_DB_REG_INT_DB_OP_MASK 0x1f000 -#define HIF_IRQ_INT_DB_REG_INT_DB_OP_SHIFT 12 -#define HIF_IRQ_INT_DB_REG_INT_DB_OP_WIDTH 5 -#define HIF_IRQ_INT_DB_REG_INT_DB_OP_MAX_VAL 0x1f -#define HIF_IRQ_INT_DB_REG_INT_DB_OP_MIN_VAL 0x0 - -#define HIF_IRQ_BRS_SCAN_TH_REG_ADDR 0xa11000b4 -#define HIF_IRQ_BRS_SCAN_TH_REG_WIDTH 32 -#define HIF_IRQ_BRS_SCAN_TH_REG_LENGTH 32 -#define HIF_IRQ_BRS_SCAN_TH_REG_BRS_SCAN_TH_MASK 0xff -#define HIF_IRQ_BRS_SCAN_TH_REG_BRS_SCAN_TH_SHIFT 0 -#define HIF_IRQ_BRS_SCAN_TH_REG_BRS_SCAN_TH_WIDTH 8 -#define HIF_IRQ_BRS_SCAN_TH_REG_BRS_SCAN_TH_MAX_VAL 0xff -#define HIF_IRQ_BRS_SCAN_TH_REG_BRS_SCAN_TH_MIN_VAL 0x0 - -#define HIF_IRQ_BRS_SCAN_EN_REG_ADDR 0xa11000b8 -#define HIF_IRQ_BRS_SCAN_EN_REG_WIDTH 32 -#define HIF_IRQ_BRS_SCAN_EN_REG_LENGTH 32 -#define HIF_IRQ_BRS_SCAN_EN_REG_BRS_SCAN_EN_MASK 0x1 -#define HIF_IRQ_BRS_SCAN_EN_REG_BRS_SCAN_EN_SHIFT 0 -#define HIF_IRQ_BRS_SCAN_EN_REG_BRS_SCAN_EN_WIDTH 1 -#define HIF_IRQ_BRS_SCAN_EN_REG_BRS_SCAN_EN_MAX_VAL 0x1 -#define HIF_IRQ_BRS_SCAN_EN_REG_BRS_SCAN_EN_MIN_VAL 0x0 - -#define HIF_IRQ_BRS_SCAN_MODE_REG_ADDR 0xa11000bc -#define HIF_IRQ_BRS_SCAN_MODE_REG_WIDTH 32 -#define HIF_IRQ_BRS_SCAN_MODE_REG_LENGTH 32 -#define HIF_IRQ_BRS_SCAN_MODE_REG_BRS_SCAN_MODE_MASK 0x1 -#define HIF_IRQ_BRS_SCAN_MODE_REG_BRS_SCAN_MODE_SHIFT 0 -#define HIF_IRQ_BRS_SCAN_MODE_REG_BRS_SCAN_MODE_WIDTH 1 -#define HIF_IRQ_BRS_SCAN_MODE_REG_BRS_SCAN_MODE_MAX_VAL 0x1 -#define HIF_IRQ_BRS_SCAN_MODE_REG_BRS_SCAN_MODE_MIN_VAL 0x0 - -#define HIF_IRQ_INT_STATE_REG_ADDR 0xa11000c0 -#define HIF_IRQ_INT_STATE_REG_WIDTH 32 -#define HIF_IRQ_INT_STATE_REG_LENGTH 32 -#define HIF_IRQ_INT_STATE_REG_INT_DB_FIFO_OVFL_MASK 0x1 -#define HIF_IRQ_INT_STATE_REG_INT_DB_FIFO_OVFL_SHIFT 0 -#define HIF_IRQ_INT_STATE_REG_INT_DB_FIFO_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_INT_DB_FIFO_UNFL_MASK 0x2 -#define HIF_IRQ_INT_STATE_REG_INT_DB_FIFO_UNFL_SHIFT 1 -#define HIF_IRQ_INT_STATE_REG_INT_DB_FIFO_UNFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_TPE2HIF_MSIX_CBR_OVFL_MASK 0x4 -#define HIF_IRQ_INT_STATE_REG_TPE2HIF_MSIX_CBR_OVFL_SHIFT 2 -#define HIF_IRQ_INT_STATE_REG_TPE2HIF_MSIX_CBR_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_TPE2HIF_MSIX_CBR_UNFL_MASK 0x8 -#define HIF_IRQ_INT_STATE_REG_TPE2HIF_MSIX_CBR_UNFL_SHIFT 3 -#define HIF_IRQ_INT_STATE_REG_TPE2HIF_MSIX_CBR_UNFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_CMDQM2IRQ_MSIX_CBR_OVFL_MASK 0x10 -#define HIF_IRQ_INT_STATE_REG_CMDQM2IRQ_MSIX_CBR_OVFL_SHIFT 4 -#define HIF_IRQ_INT_STATE_REG_CMDQM2IRQ_MSIX_CBR_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_CMDQM2IRQ_MSIX_CBR_UNFL_MASK 0x20 -#define HIF_IRQ_INT_STATE_REG_CMDQM2IRQ_MSIX_CBR_UNFL_SHIFT 5 -#define HIF_IRQ_INT_STATE_REG_CMDQM2IRQ_MSIX_CBR_UNFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_WRITE_FAIL_MSIX_FIFO_OVFL_MASK 0x40 -#define HIF_IRQ_INT_STATE_REG_WRITE_FAIL_MSIX_FIFO_OVFL_SHIFT 6 -#define HIF_IRQ_INT_STATE_REG_WRITE_FAIL_MSIX_FIFO_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_WRITE_FAIL_MSIX_FIFO_UNFL_MASK 0x80 -#define HIF_IRQ_INT_STATE_REG_WRITE_FAIL_MSIX_FIFO_UNFL_SHIFT 7 -#define HIF_IRQ_INT_STATE_REG_WRITE_FAIL_MSIX_FIFO_UNFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_WRITE_BACK_MSIX_FIFO_OVFL_MASK 0x100 -#define HIF_IRQ_INT_STATE_REG_WRITE_BACK_MSIX_FIFO_OVFL_SHIFT 8 -#define HIF_IRQ_INT_STATE_REG_WRITE_BACK_MSIX_FIFO_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_WRITE_BACK_MSIX_FIFO_UNFL_MASK 0x200 -#define HIF_IRQ_INT_STATE_REG_WRITE_BACK_MSIX_FIFO_UNFL_SHIFT 9 -#define HIF_IRQ_INT_STATE_REG_WRITE_BACK_MSIX_FIFO_UNFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_DIST2GEN_MSIX_FIFO_OVFL_MASK 0x400 -#define HIF_IRQ_INT_STATE_REG_DIST2GEN_MSIX_FIFO_OVFL_SHIFT 10 -#define HIF_IRQ_INT_STATE_REG_DIST2GEN_MSIX_FIFO_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_DIST2GEN_MSIX_FIFO_UNFL_MASK 0x800 -#define HIF_IRQ_INT_STATE_REG_DIST2GEN_MSIX_FIFO_UNFL_SHIFT 11 -#define HIF_IRQ_INT_STATE_REG_DIST2GEN_MSIX_FIFO_UNFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_IRQ2ADPT_CREDIT_OVFL_MASK 0x3000 -#define HIF_IRQ_INT_STATE_REG_IRQ2ADPT_CREDIT_OVFL_SHIFT 12 -#define HIF_IRQ_INT_STATE_REG_IRQ2ADPT_CREDIT_OVFL_WIDTH 2 -#define HIF_IRQ_INT_STATE_REG_IRQ2ADPT_CREDIT_UNFL_MASK 0xc000 -#define HIF_IRQ_INT_STATE_REG_IRQ2ADPT_CREDIT_UNFL_SHIFT 14 -#define HIF_IRQ_INT_STATE_REG_IRQ2ADPT_CREDIT_UNFL_WIDTH 2 -#define HIF_IRQ_INT_STATE_REG_REQ_INFO_FIFO_OVFL_MASK 0x30000 -#define HIF_IRQ_INT_STATE_REG_REQ_INFO_FIFO_OVFL_SHIFT 16 -#define HIF_IRQ_INT_STATE_REG_REQ_INFO_FIFO_OVFL_WIDTH 2 -#define HIF_IRQ_INT_STATE_REG_REQ_INFO_FIFO_UNFL_MASK 0xc0000 -#define HIF_IRQ_INT_STATE_REG_REQ_INFO_FIFO_UNFL_SHIFT 18 -#define HIF_IRQ_INT_STATE_REG_REQ_INFO_FIFO_UNFL_WIDTH 2 -#define HIF_IRQ_INT_STATE_REG_ACK_INFO_FIFO_OVFL_MASK 0x300000 -#define HIF_IRQ_INT_STATE_REG_ACK_INFO_FIFO_OVFL_SHIFT 20 -#define HIF_IRQ_INT_STATE_REG_ACK_INFO_FIFO_OVFL_WIDTH 2 -#define HIF_IRQ_INT_STATE_REG_ACK_INFO_FIFO_UNFL_MASK 0xc00000 -#define HIF_IRQ_INT_STATE_REG_ACK_INFO_FIFO_UNFL_SHIFT 22 -#define HIF_IRQ_INT_STATE_REG_ACK_INFO_FIFO_UNFL_WIDTH 2 -#define HIF_IRQ_INT_STATE_REG_BRS_SCAN_FIFO_OVFL_MASK 0x1000000 -#define HIF_IRQ_INT_STATE_REG_BRS_SCAN_FIFO_OVFL_SHIFT 24 -#define HIF_IRQ_INT_STATE_REG_BRS_SCAN_FIFO_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_BRS_SCAN_FIFO_UNFL_MASK 0x2000000 -#define HIF_IRQ_INT_STATE_REG_BRS_SCAN_FIFO_UNFL_SHIFT 25 -#define HIF_IRQ_INT_STATE_REG_BRS_SCAN_FIFO_UNFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_BRS_RSLT_FIFO_OVFL_MASK 0x4000000 -#define HIF_IRQ_INT_STATE_REG_BRS_RSLT_FIFO_OVFL_SHIFT 26 -#define HIF_IRQ_INT_STATE_REG_BRS_RSLT_FIFO_OVFL_WIDTH 1 -#define HIF_IRQ_INT_STATE_REG_BRS_RSLT_FIFO_UNFL_MASK 0x8000000 -#define HIF_IRQ_INT_STATE_REG_BRS_RSLT_FIFO_UNFL_SHIFT 27 -#define HIF_IRQ_INT_STATE_REG_BRS_RSLT_FIFO_UNFL_WIDTH 1 - -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ADDR 0xa11000c4 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WIDTH 32 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_LENGTH 32 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_OVFL_INT_MASK_MASK 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_OVFL_INT_MASK_SHIFT 0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_UNFL_INT_MASK_MASK 0x2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_UNFL_INT_MASK_SHIFT 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_INT_DB_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_OVFL_INT_MASK_MASK 0x4 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_OVFL_INT_MASK_SHIFT 2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_UNFL_INT_MASK_MASK 0x8 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_UNFL_INT_MASK_SHIFT 3 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_TPE2HIF_MSIX_CBR_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_OVFL_INT_MASK_MASK 0x10 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_OVFL_INT_MASK_SHIFT 4 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_UNFL_INT_MASK_MASK 0x20 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_UNFL_INT_MASK_SHIFT 5 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_CMDQM2IRQ_MSIX_CBR_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_OVFL_INT_MASK_MASK 0x40 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_OVFL_INT_MASK_SHIFT 6 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_UNFL_INT_MASK_MASK 0x80 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_UNFL_INT_MASK_SHIFT 7 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_FAIL_MSIX_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_OVFL_INT_MASK_MASK 0x100 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_OVFL_INT_MASK_SHIFT 8 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_UNFL_INT_MASK_MASK 0x200 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_UNFL_INT_MASK_SHIFT 9 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_WRITE_BACK_MSIX_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_OVFL_INT_MASK_MASK 0x400 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_OVFL_INT_MASK_SHIFT 10 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_UNFL_INT_MASK_MASK 0x800 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_UNFL_INT_MASK_SHIFT 11 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_DIST2GEN_MSIX_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_OVFL_INT_MASK_MASK 0x3000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_OVFL_INT_MASK_SHIFT 12 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_OVFL_INT_MASK_WIDTH 2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_OVFL_INT_MASK_MAX_VAL 0x3 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_UNFL_INT_MASK_MASK 0xc000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_UNFL_INT_MASK_SHIFT 14 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_UNFL_INT_MASK_WIDTH 2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_UNFL_INT_MASK_MAX_VAL 0x3 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_IRQ2ADPT_CREDIT_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_OVFL_INT_MASK_MASK 0x30000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_OVFL_INT_MASK_SHIFT 16 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_OVFL_INT_MASK_WIDTH 2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_OVFL_INT_MASK_MAX_VAL 0x3 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_UNFL_INT_MASK_MASK 0xc0000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_UNFL_INT_MASK_SHIFT 18 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_UNFL_INT_MASK_WIDTH 2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_UNFL_INT_MASK_MAX_VAL 0x3 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_REQ_INFO_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_OVFL_INT_MASK_MASK 0x300000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_OVFL_INT_MASK_SHIFT 20 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_OVFL_INT_MASK_WIDTH 2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_OVFL_INT_MASK_MAX_VAL 0x3 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_UNFL_INT_MASK_MASK 0xc00000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_UNFL_INT_MASK_SHIFT 22 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_UNFL_INT_MASK_WIDTH 2 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_UNFL_INT_MASK_MAX_VAL 0x3 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_ACK_INFO_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_OVFL_INT_MASK_MASK 0x1000000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_OVFL_INT_MASK_SHIFT 24 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_UNFL_INT_MASK_MASK 0x2000000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_UNFL_INT_MASK_SHIFT 25 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_SCAN_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_OVFL_INT_MASK_MASK 0x4000000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_OVFL_INT_MASK_SHIFT 26 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_UNFL_INT_MASK_MASK 0x8000000 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_UNFL_INT_MASK_SHIFT 27 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_INT_STATE_INT_MASK_REG_BRS_RSLT_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa11000c8 -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_CFG_VTR_TBL_START_MASK 0x1 -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_CFG_VTR_TBL_START_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_CFG_VTR_TBL_START_WIDTH 1 -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_CFG_VTR_TBL_START_MAX_VAL 0x1 -#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_CFG_VTR_TBL_START_MIN_VAL 0x0 - -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa11000cc -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_CFG_VTR_TBL_CMD_MASK 0x1 -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_CFG_VTR_TBL_CMD_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_CFG_VTR_TBL_CMD_WIDTH 1 -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_CFG_VTR_TBL_CMD_MAX_VAL 0x1 -#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_CFG_VTR_TBL_CMD_MIN_VAL 0x0 - -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa11000d0 -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_CFG_VTR_TBL_ADDR_MASK 0xfff -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_CFG_VTR_TBL_ADDR_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_CFG_VTR_TBL_ADDR_WIDTH 12 -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_CFG_VTR_TBL_ADDR_MAX_VAL 0xfff -#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_CFG_VTR_TBL_ADDR_MIN_VAL 0x0 - -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000d4 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_CFG_VTR_TBL_MSG_LADDR_MASK 0xffffffff -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_CFG_VTR_TBL_MSG_LADDR_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_CFG_VTR_TBL_MSG_LADDR_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_CFG_VTR_TBL_MSG_LADDR_MAX_VAL 0xffffffff -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_CFG_VTR_TBL_MSG_LADDR_MIN_VAL 0x0 - -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000d8 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_CFG_VTR_TBL_MSG_UADDR_MASK 0xffffffff -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_CFG_VTR_TBL_MSG_UADDR_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_CFG_VTR_TBL_MSG_UADDR_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_CFG_VTR_TBL_MSG_UADDR_MAX_VAL 0xffffffff -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_CFG_VTR_TBL_MSG_UADDR_MIN_VAL 0x0 - -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000dc -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_CFG_VTR_TBL_MSG_DATA_MASK 0xffffffff -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_CFG_VTR_TBL_MSG_DATA_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_CFG_VTR_TBL_MSG_DATA_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_CFG_VTR_TBL_MSG_DATA_MAX_VAL 0xffffffff -#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_CFG_VTR_TBL_MSG_DATA_MIN_VAL 0x0 - -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa11000e0 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_FUNC_ID_MASK 0x7ff -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_FUNC_ID_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_FUNC_ID_WIDTH 11 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_FUNC_ID_MAX_VAL 0x7ff -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_FUNC_ID_MIN_VAL 0x0 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_VECTOR_EN_MASK 0x800 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_VECTOR_EN_SHIFT 11 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_VECTOR_EN_WIDTH 1 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_VECTOR_EN_MAX_VAL 0x1 -#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_CFG_VTR_TBL_VECTOR_EN_MIN_VAL 0x0 - -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000e4 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_LADDR_REG_WIDTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_LADDR_REG_LENGTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_LADDR_REG_RD_VTR_TBL_MSG_LADDR_MASK 0xffffffff -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_LADDR_REG_RD_VTR_TBL_MSG_LADDR_SHIFT 0 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_LADDR_REG_RD_VTR_TBL_MSG_LADDR_WIDTH 32 - -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000e8 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_UADDR_REG_WIDTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_UADDR_REG_LENGTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_UADDR_REG_RD_VTR_TBL_MSG_UADDR_MASK 0xffffffff -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_UADDR_REG_RD_VTR_TBL_MSG_UADDR_SHIFT 0 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_UADDR_REG_RD_VTR_TBL_MSG_UADDR_WIDTH 32 - -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000ec -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_DATA_REG_WIDTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_DATA_REG_LENGTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_DATA_REG_RD_VTR_TBL_MSG_DATA_MASK 0xffffffff -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_DATA_REG_RD_VTR_TBL_MSG_DATA_SHIFT 0 -#define HIF_IRQ_RD_VECTOR_TABLE_MSG_DATA_REG_RD_VTR_TBL_MSG_DATA_WIDTH 32 - -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_ADDR 0xa11000f0 -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_WIDTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_LENGTH 32 -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_RD_VTR_TBL_FUNC_ID_MASK 0x7ff -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_RD_VTR_TBL_FUNC_ID_SHIFT 0 -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_RD_VTR_TBL_FUNC_ID_WIDTH 11 -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_RD_VTR_TBL_VECTOR_EN_MASK 0x800 -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_RD_VTR_TBL_VECTOR_EN_SHIFT 11 -#define HIF_IRQ_RD_VECTOR_TABLE_CTRL_REG_RD_VTR_TBL_VECTOR_EN_WIDTH 1 - -#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa11000f4 -#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_WIDTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_LENGTH 32 -#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_CFG_VTR_TBL_BUSY_MASK 0x1 -#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_CFG_VTR_TBL_BUSY_SHIFT 0 -#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_CFG_VTR_TBL_BUSY_WIDTH 1 - -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_ADDR 0xa11000f8 -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_RD_PBA_MASK_TBL_START_MASK 0x1 -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_RD_PBA_MASK_TBL_START_SHIFT 0 -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_RD_PBA_MASK_TBL_START_WIDTH 1 -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_RD_PBA_MASK_TBL_START_MAX_VAL 0x1 -#define HIF_IRQ_RD_PBA_MASK_TABLE_START_REG_RD_PBA_MASK_TBL_START_MIN_VAL 0x0 - -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_ADDR 0xa11000fc -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_RD_PBA_MASK_TBL_CMD_MASK 0x1 -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_RD_PBA_MASK_TBL_CMD_SHIFT 0 -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_RD_PBA_MASK_TBL_CMD_WIDTH 1 -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_RD_PBA_MASK_TBL_CMD_MAX_VAL 0x1 -#define HIF_IRQ_RD_PBA_MASK_TABLE_CMD_REG_RD_PBA_MASK_TBL_CMD_MIN_VAL 0x0 - -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_ADDR 0xa1100100 -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_RD_PBA_MASK_TBL_ADDR_MASK 0x3f -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_RD_PBA_MASK_TBL_ADDR_SHIFT 0 -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_RD_PBA_MASK_TBL_ADDR_WIDTH 6 -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_RD_PBA_MASK_TBL_ADDR_MAX_VAL 0x3f -#define HIF_IRQ_RD_PBA_MASK_TABLE_ADDR_REG_RD_PBA_MASK_TBL_ADDR_MIN_VAL 0x0 - -#define HIF_IRQ_RD_PBA_MASK_TABLE_LDATA_REG_ADDR 0xa1100104 -#define HIF_IRQ_RD_PBA_MASK_TABLE_LDATA_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_LDATA_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_LDATA_REG_RD_PBA_MASK_TBL_LDATA_MASK 0xffffffff -#define HIF_IRQ_RD_PBA_MASK_TABLE_LDATA_REG_RD_PBA_MASK_TBL_LDATA_SHIFT 0 -#define HIF_IRQ_RD_PBA_MASK_TABLE_LDATA_REG_RD_PBA_MASK_TBL_LDATA_WIDTH 32 - -#define HIF_IRQ_RD_PBA_MASK_TABLE_UDATA_REG_ADDR 0xa1100108 -#define HIF_IRQ_RD_PBA_MASK_TABLE_UDATA_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_UDATA_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_UDATA_REG_RD_PBA_MASK_TBL_UDATA_MASK 0xffffffff -#define HIF_IRQ_RD_PBA_MASK_TABLE_UDATA_REG_RD_PBA_MASK_TBL_UDATA_SHIFT 0 -#define HIF_IRQ_RD_PBA_MASK_TABLE_UDATA_REG_RD_PBA_MASK_TBL_UDATA_WIDTH 32 - -#define HIF_IRQ_RD_PBA_MASK_TABLE_BUSY_REG_ADDR 0xa110010c -#define HIF_IRQ_RD_PBA_MASK_TABLE_BUSY_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_BUSY_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_MASK_TABLE_BUSY_REG_RD_PBA_MASK_TBL_BUSY_MASK 0x1 -#define HIF_IRQ_RD_PBA_MASK_TABLE_BUSY_REG_RD_PBA_MASK_TBL_BUSY_SHIFT 0 -#define HIF_IRQ_RD_PBA_MASK_TABLE_BUSY_REG_RD_PBA_MASK_TBL_BUSY_WIDTH 1 - -#define HIF_IRQ_DIST_INT_STATE_REG_ADDR 0xa110012c -#define HIF_IRQ_DIST_INT_STATE_REG_WIDTH 32 -#define HIF_IRQ_DIST_INT_STATE_REG_LENGTH 32 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_DMA_RX_INT_STS_MASK 0x3 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_DMA_RX_INT_STS_SHIFT 0 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_DMA_RX_INT_STS_WIDTH 2 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_DMA_TX_INT_STS_MASK 0xc -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_DMA_TX_INT_STS_SHIFT 2 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_DMA_TX_INT_STS_WIDTH 2 -#define HIF_IRQ_DIST_INT_STATE_REG_PIO2IRQ_PIO_REQ_INT_STS_MASK 0x30 -#define HIF_IRQ_DIST_INT_STATE_REG_PIO2IRQ_PIO_REQ_INT_STS_SHIFT 4 -#define HIF_IRQ_DIST_INT_STATE_REG_PIO2IRQ_PIO_REQ_INT_STS_WIDTH 2 -#define HIF_IRQ_DIST_INT_STATE_REG_PCIE2HIF_PCIE_MSG_INT_STS_MASK 0xc0 -#define HIF_IRQ_DIST_INT_STATE_REG_PCIE2HIF_PCIE_MSG_INT_STS_SHIFT 6 -#define HIF_IRQ_DIST_INT_STATE_REG_PCIE2HIF_PCIE_MSG_INT_STS_WIDTH 2 -#define HIF_IRQ_DIST_INT_STATE_REG_TOP_INT_SUM_STS_MASK 0x100 -#define HIF_IRQ_DIST_INT_STATE_REG_TOP_INT_SUM_STS_SHIFT 8 -#define HIF_IRQ_DIST_INT_STATE_REG_TOP_INT_SUM_STS_WIDTH 1 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_CMDQ_REQ_INT_STS_MASK 0x200 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_CMDQ_REQ_INT_STS_SHIFT 9 -#define HIF_IRQ_DIST_INT_STATE_REG_CMDQM2IRQ_CMDQ_REQ_INT_STS_WIDTH 1 -#define HIF_IRQ_DIST_INT_STATE_REG_TBL2IRQ_TBL_RD_DONE_INT_STS_MASK 0x400 -#define HIF_IRQ_DIST_INT_STATE_REG_TBL2IRQ_TBL_RD_DONE_INT_STS_SHIFT 10 -#define HIF_IRQ_DIST_INT_STATE_REG_TBL2IRQ_TBL_RD_DONE_INT_STS_WIDTH 1 - -#define HIF_IRQ_INT_DB_PBA0_REQ_CNT_REG_ADDR 0xa1100164 -#define HIF_IRQ_INT_DB_PBA0_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_INT_DB_PBA0_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_INT_DB_PBA0_REQ_CNT_REG_INT_DB_PBA0_REQ_CNT_MASK 0xff -#define HIF_IRQ_INT_DB_PBA0_REQ_CNT_REG_INT_DB_PBA0_REQ_CNT_SHIFT 0 -#define HIF_IRQ_INT_DB_PBA0_REQ_CNT_REG_INT_DB_PBA0_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_INT_DB_PBA1_REQ_CNT_REG_ADDR 0xa1100168 -#define HIF_IRQ_INT_DB_PBA1_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_INT_DB_PBA1_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_INT_DB_PBA1_REQ_CNT_REG_INT_DB_PBA1_REQ_CNT_MASK 0xff -#define HIF_IRQ_INT_DB_PBA1_REQ_CNT_REG_INT_DB_PBA1_REQ_CNT_SHIFT 0 -#define HIF_IRQ_INT_DB_PBA1_REQ_CNT_REG_INT_DB_PBA1_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_INT_DB_MASK0_REQ_CNT_REG_ADDR 0xa110016c -#define HIF_IRQ_INT_DB_MASK0_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_INT_DB_MASK0_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_INT_DB_MASK0_REQ_CNT_REG_INT_DB_MASK0_REQ_CNT_MASK 0xff -#define HIF_IRQ_INT_DB_MASK0_REQ_CNT_REG_INT_DB_MASK0_REQ_CNT_SHIFT 0 -#define HIF_IRQ_INT_DB_MASK0_REQ_CNT_REG_INT_DB_MASK0_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_INT_DB_MASK1_REQ_CNT_REG_ADDR 0xa1100170 -#define HIF_IRQ_INT_DB_MASK1_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_INT_DB_MASK1_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_INT_DB_MASK1_REQ_CNT_REG_INT_DB_MASK1_REQ_CNT_MASK 0xff -#define HIF_IRQ_INT_DB_MASK1_REQ_CNT_REG_INT_DB_MASK1_REQ_CNT_SHIFT 0 -#define HIF_IRQ_INT_DB_MASK1_REQ_CNT_REG_INT_DB_MASK1_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_INT_DB_MSIX_REQ_CNT_REG_ADDR 0xa1100174 -#define HIF_IRQ_INT_DB_MSIX_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_INT_DB_MSIX_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_INT_DB_MSIX_REQ_CNT_REG_INT_DB_MSIX_REQ_CNT_MASK 0xff -#define HIF_IRQ_INT_DB_MSIX_REQ_CNT_REG_INT_DB_MSIX_REQ_CNT_SHIFT 0 -#define HIF_IRQ_INT_DB_MSIX_REQ_CNT_REG_INT_DB_MSIX_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_CMDQM2IRQ_MSIX_REQ_CNT_REG_ADDR 0xa1100178 -#define HIF_IRQ_CMDQM2IRQ_MSIX_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_CMDQM2IRQ_MSIX_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_CMDQM2IRQ_MSIX_REQ_CNT_REG_CMDQM2IRQ_MSIX_REQ_CNT_MASK 0xff -#define HIF_IRQ_CMDQM2IRQ_MSIX_REQ_CNT_REG_CMDQM2IRQ_MSIX_REQ_CNT_SHIFT 0 -#define HIF_IRQ_CMDQM2IRQ_MSIX_REQ_CNT_REG_CMDQM2IRQ_MSIX_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_TPE2HIF_MSIX_REQ_CNT_REG_ADDR 0xa110017c -#define HIF_IRQ_TPE2HIF_MSIX_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE2HIF_MSIX_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE2HIF_MSIX_REQ_CNT_REG_TPE2HIF_MSIX_REQ_CNT_MASK 0xff -#define HIF_IRQ_TPE2HIF_MSIX_REQ_CNT_REG_TPE2HIF_MSIX_REQ_CNT_SHIFT 0 -#define HIF_IRQ_TPE2HIF_MSIX_REQ_CNT_REG_TPE2HIF_MSIX_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_WR_BACK_MSIX_REQ_CNT_REG_ADDR 0xa1100180 -#define HIF_IRQ_WR_BACK_MSIX_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_WR_BACK_MSIX_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_WR_BACK_MSIX_REQ_CNT_REG_WR_BACK_MSIX_REQ_CNT_MASK 0xff -#define HIF_IRQ_WR_BACK_MSIX_REQ_CNT_REG_WR_BACK_MSIX_REQ_CNT_SHIFT 0 -#define HIF_IRQ_WR_BACK_MSIX_REQ_CNT_REG_WR_BACK_MSIX_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_WR_FAIL_MSIX_REQ_CNT_REG_ADDR 0xa1100184 -#define HIF_IRQ_WR_FAIL_MSIX_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_WR_FAIL_MSIX_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_WR_FAIL_MSIX_REQ_CNT_REG_WR_FAIL_MSIX_REQ_CNT_MASK 0xff -#define HIF_IRQ_WR_FAIL_MSIX_REQ_CNT_REG_WR_FAIL_MSIX_REQ_CNT_SHIFT 0 -#define HIF_IRQ_WR_FAIL_MSIX_REQ_CNT_REG_WR_FAIL_MSIX_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_DIST2GEN_MSIX_REQ_CNT_REG_ADDR 0xa1100188 -#define HIF_IRQ_DIST2GEN_MSIX_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_DIST2GEN_MSIX_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_DIST2GEN_MSIX_REQ_CNT_REG_DIST2GEN_MSIX_REQ_CNT_MASK 0xff -#define HIF_IRQ_DIST2GEN_MSIX_REQ_CNT_REG_DIST2GEN_MSIX_REQ_CNT_SHIFT 0 -#define HIF_IRQ_DIST2GEN_MSIX_REQ_CNT_REG_DIST2GEN_MSIX_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_BRS2SCH_MSIX_REQ_CNT_REG_ADDR 0xa110018c -#define HIF_IRQ_BRS2SCH_MSIX_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_BRS2SCH_MSIX_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_BRS2SCH_MSIX_REQ_CNT_REG_BRS2SCH_MSIX_REQ_CNT_MASK 0xff -#define HIF_IRQ_BRS2SCH_MSIX_REQ_CNT_REG_BRS2SCH_MSIX_REQ_CNT_SHIFT 0 -#define HIF_IRQ_BRS2SCH_MSIX_REQ_CNT_REG_BRS2SCH_MSIX_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_VECTOR_EN_DROP_CNT_REG_ADDR 0xa1100190 -#define HIF_IRQ_VECTOR_EN_DROP_CNT_REG_WIDTH 32 -#define HIF_IRQ_VECTOR_EN_DROP_CNT_REG_LENGTH 32 -#define HIF_IRQ_VECTOR_EN_DROP_CNT_REG_VECTOR_EN_DROP_CNT_MASK 0xff -#define HIF_IRQ_VECTOR_EN_DROP_CNT_REG_VECTOR_EN_DROP_CNT_SHIFT 0 -#define HIF_IRQ_VECTOR_EN_DROP_CNT_REG_VECTOR_EN_DROP_CNT_WIDTH 8 - -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_ADDR 0xa1100198 -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_SIZE 2 -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_IRQ2ADPT_PCIE_REQ_CNT_MASK 0xff -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_IRQ2ADPT_PCIE_REQ_CNT_SHIFT 0 -#define HIF_IRQ_IRQ2ADPT_PCIE_REQ_CNT_REG_IRQ2ADPT_PCIE_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_ADDR 0xa11001a0 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_WIDTH 32 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_LENGTH 32 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_SIZE 2 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_ADPT2IRQ_PCIE_ACK_CNT_MASK 0xff -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_ADPT2IRQ_PCIE_ACK_CNT_SHIFT 0 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_CNT_REG_ADPT2IRQ_PCIE_ACK_CNT_WIDTH 8 - -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_ADDR 0xa11001a8 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_WIDTH 32 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_LENGTH 32 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_SIZE 2 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_ADPT2IRQ_PCIE_ACK_ERR_CNT_MASK 0xff -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_ADPT2IRQ_PCIE_ACK_ERR_CNT_SHIFT 0 -#define HIF_IRQ_ADPT2IRQ_PCIE_ACK_ERR_CNT_REG_ADPT2IRQ_PCIE_ACK_ERR_CNT_WIDTH 8 - -#define HIF_IRQ_CFG_VTR_TBL_SUM_CNT_REG_ADDR 0xa11001b0 -#define HIF_IRQ_CFG_VTR_TBL_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_CFG_VTR_TBL_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_CFG_VTR_TBL_SUM_CNT_REG_CFG_VTR_TBL_SUM_CNT_MASK 0xff -#define HIF_IRQ_CFG_VTR_TBL_SUM_CNT_REG_CFG_VTR_TBL_SUM_CNT_SHIFT 0 -#define HIF_IRQ_CFG_VTR_TBL_SUM_CNT_REG_CFG_VTR_TBL_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_CFG_VTR_TBL_NRDY_CNT_REG_ADDR 0xa11001b4 -#define HIF_IRQ_CFG_VTR_TBL_NRDY_CNT_REG_WIDTH 32 -#define HIF_IRQ_CFG_VTR_TBL_NRDY_CNT_REG_LENGTH 32 -#define HIF_IRQ_CFG_VTR_TBL_NRDY_CNT_REG_CFG_VTR_TBL_NRDY_CNT_MASK 0xff -#define HIF_IRQ_CFG_VTR_TBL_NRDY_CNT_REG_CFG_VTR_TBL_NRDY_CNT_SHIFT 0 -#define HIF_IRQ_CFG_VTR_TBL_NRDY_CNT_REG_CFG_VTR_TBL_NRDY_CNT_WIDTH 8 - -#define HIF_IRQ_CFG_VTR_TBL_WR_REQ_CNT_REG_ADDR 0xa11001b8 -#define HIF_IRQ_CFG_VTR_TBL_WR_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_CFG_VTR_TBL_WR_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_CFG_VTR_TBL_WR_REQ_CNT_REG_CFG_VTR_TBL_WR_REQ_CNT_MASK 0xff -#define HIF_IRQ_CFG_VTR_TBL_WR_REQ_CNT_REG_CFG_VTR_TBL_WR_REQ_CNT_SHIFT 0 -#define HIF_IRQ_CFG_VTR_TBL_WR_REQ_CNT_REG_CFG_VTR_TBL_WR_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_CFG_VTR_TBL_WR_ACK_CNT_REG_ADDR 0xa11001bc -#define HIF_IRQ_CFG_VTR_TBL_WR_ACK_CNT_REG_WIDTH 32 -#define HIF_IRQ_CFG_VTR_TBL_WR_ACK_CNT_REG_LENGTH 32 -#define HIF_IRQ_CFG_VTR_TBL_WR_ACK_CNT_REG_CFG_VTR_TBL_WR_ACK_CNT_MASK 0xff -#define HIF_IRQ_CFG_VTR_TBL_WR_ACK_CNT_REG_CFG_VTR_TBL_WR_ACK_CNT_SHIFT 0 -#define HIF_IRQ_CFG_VTR_TBL_WR_ACK_CNT_REG_CFG_VTR_TBL_WR_ACK_CNT_WIDTH 8 - -#define HIF_IRQ_CFG_VTR_TBL_RD_REQ_CNT_REG_ADDR 0xa11001c0 -#define HIF_IRQ_CFG_VTR_TBL_RD_REQ_CNT_REG_WIDTH 32 -#define HIF_IRQ_CFG_VTR_TBL_RD_REQ_CNT_REG_LENGTH 32 -#define HIF_IRQ_CFG_VTR_TBL_RD_REQ_CNT_REG_CFG_VTR_TBL_RD_REQ_CNT_MASK 0xff -#define HIF_IRQ_CFG_VTR_TBL_RD_REQ_CNT_REG_CFG_VTR_TBL_RD_REQ_CNT_SHIFT 0 -#define HIF_IRQ_CFG_VTR_TBL_RD_REQ_CNT_REG_CFG_VTR_TBL_RD_REQ_CNT_WIDTH 8 - -#define HIF_IRQ_CFG_VTR_TBL_RD_ACK_CNT_REG_ADDR 0xa11001c4 -#define HIF_IRQ_CFG_VTR_TBL_RD_ACK_CNT_REG_WIDTH 32 -#define HIF_IRQ_CFG_VTR_TBL_RD_ACK_CNT_REG_LENGTH 32 -#define HIF_IRQ_CFG_VTR_TBL_RD_ACK_CNT_REG_CFG_VTR_TBL_RD_ACK_CNT_MASK 0xff -#define HIF_IRQ_CFG_VTR_TBL_RD_ACK_CNT_REG_CFG_VTR_TBL_RD_ACK_CNT_SHIFT 0 -#define HIF_IRQ_CFG_VTR_TBL_RD_ACK_CNT_REG_CFG_VTR_TBL_RD_ACK_CNT_WIDTH 8 - -#define HIF_IRQ_RD_PBA_TBL_CNT_REG_ADDR 0xa11001c8 -#define HIF_IRQ_RD_PBA_TBL_CNT_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_TBL_CNT_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_TBL_CNT_REG_RD_PBA_TBL_CNT_MASK 0xff -#define HIF_IRQ_RD_PBA_TBL_CNT_REG_RD_PBA_TBL_CNT_SHIFT 0 -#define HIF_IRQ_RD_PBA_TBL_CNT_REG_RD_PBA_TBL_CNT_WIDTH 8 - -#define HIF_IRQ_RD_PBA_TBL_ACK_CNT_REG_ADDR 0xa11001cc -#define HIF_IRQ_RD_PBA_TBL_ACK_CNT_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_TBL_ACK_CNT_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_TBL_ACK_CNT_REG_RD_PBA_TBL_ACK_CNT_MASK 0xff -#define HIF_IRQ_RD_PBA_TBL_ACK_CNT_REG_RD_PBA_TBL_ACK_CNT_SHIFT 0 -#define HIF_IRQ_RD_PBA_TBL_ACK_CNT_REG_RD_PBA_TBL_ACK_CNT_WIDTH 8 - -#define HIF_IRQ_RD_MASK_TBL_CNT_REG_ADDR 0xa11001d0 -#define HIF_IRQ_RD_MASK_TBL_CNT_REG_WIDTH 32 -#define HIF_IRQ_RD_MASK_TBL_CNT_REG_LENGTH 32 -#define HIF_IRQ_RD_MASK_TBL_CNT_REG_RD_MASK_TBL_CNT_MASK 0xff -#define HIF_IRQ_RD_MASK_TBL_CNT_REG_RD_MASK_TBL_CNT_SHIFT 0 -#define HIF_IRQ_RD_MASK_TBL_CNT_REG_RD_MASK_TBL_CNT_WIDTH 8 - -#define HIF_IRQ_RD_MASK_TBL_ACK_CNT_REG_ADDR 0xa11001d4 -#define HIF_IRQ_RD_MASK_TBL_ACK_CNT_REG_WIDTH 32 -#define HIF_IRQ_RD_MASK_TBL_ACK_CNT_REG_LENGTH 32 -#define HIF_IRQ_RD_MASK_TBL_ACK_CNT_REG_RD_MASK_TBL_ACK_CNT_MASK 0xff -#define HIF_IRQ_RD_MASK_TBL_ACK_CNT_REG_RD_MASK_TBL_ACK_CNT_SHIFT 0 -#define HIF_IRQ_RD_MASK_TBL_ACK_CNT_REG_RD_MASK_TBL_ACK_CNT_WIDTH 8 - -#define HIF_IRQ_RD_PBA_MASK_TBL_NRDY_CNT_REG_ADDR 0xa11001d8 -#define HIF_IRQ_RD_PBA_MASK_TBL_NRDY_CNT_REG_WIDTH 32 -#define HIF_IRQ_RD_PBA_MASK_TBL_NRDY_CNT_REG_LENGTH 32 -#define HIF_IRQ_RD_PBA_MASK_TBL_NRDY_CNT_REG_RD_PBA_MASK_TBL_NRDY_CNT_MASK 0xff -#define HIF_IRQ_RD_PBA_MASK_TBL_NRDY_CNT_REG_RD_PBA_MASK_TBL_NRDY_CNT_SHIFT 0 -#define HIF_IRQ_RD_PBA_MASK_TBL_NRDY_CNT_REG_RD_PBA_MASK_TBL_NRDY_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_INT_SUM_CNT_REG_ADDR 0xa11001dc -#define HIF_IRQ_HIF_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_INT_SUM_CNT_REG_HIF_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_INT_SUM_CNT_REG_HIF_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_INT_SUM_CNT_REG_HIF_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_INT_SUM_CNT_REG_ADDR 0xa11001e0 -#define HIF_IRQ_TM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_INT_SUM_CNT_REG_TM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_INT_SUM_CNT_REG_TM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_INT_SUM_CNT_REG_TM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_INT_SUM_CNT_REG_ADDR 0xa11001e4 -#define HIF_IRQ_TPE_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_INT_SUM_CNT_REG_TPE_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_INT_SUM_CNT_REG_TPE_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_INT_SUM_CNT_REG_TPE_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_INT_SUM_CNT_REG_ADDR 0xa11001e8 -#define HIF_IRQ_IPP_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_INT_SUM_CNT_REG_IPP_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_INT_SUM_CNT_REG_IPP_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_INT_SUM_CNT_REG_IPP_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_EPP_INT_SUM_CNT_REG_ADDR 0xa11001ec -#define HIF_IRQ_EPP_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_EPP_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_EPP_INT_SUM_CNT_REG_EPP_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_EPP_INT_SUM_CNT_REG_EPP_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_EPP_INT_SUM_CNT_REG_EPP_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_NIF_INT_SUM_CNT_REG_ADDR 0xa11001f0 -#define HIF_IRQ_NIF_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_NIF_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_NIF_INT_SUM_CNT_REG_NIF_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_NIF_INT_SUM_CNT_REG_NIF_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_NIF_INT_SUM_CNT_REG_NIF_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TOP_INT_SUM_CNT_REG_ADDR 0xa11001f4 -#define HIF_IRQ_TOP_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TOP_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TOP_INT_SUM_CNT_REG_TOP_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TOP_INT_SUM_CNT_REG_TOP_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TOP_INT_SUM_CNT_REG_TOP_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_CPM_INT_SUM_CNT_REG_ADDR 0xa11001f8 -#define HIF_IRQ_HIF_CPM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_CPM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_CPM_INT_SUM_CNT_REG_HIF_CPM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_CPM_INT_SUM_CNT_REG_HIF_CPM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_CPM_INT_SUM_CNT_REG_HIF_CPM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_PIO_INT_SUM_CNT_REG_ADDR 0xa11001fc -#define HIF_IRQ_HIF_PIO_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_PIO_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_PIO_INT_SUM_CNT_REG_HIF_PIO_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_PIO_INT_SUM_CNT_REG_HIF_PIO_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_PIO_INT_SUM_CNT_REG_HIF_PIO_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_ADPT_INT_SUM_CNT_REG_ADDR 0xa1100200 -#define HIF_IRQ_HIF_ADPT_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_ADPT_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_ADPT_INT_SUM_CNT_REG_HIF_ADPT_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_ADPT_INT_SUM_CNT_REG_HIF_ADPT_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_ADPT_INT_SUM_CNT_REG_HIF_ADPT_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_TBL_INT_SUM_CNT_REG_ADDR 0xa1100204 -#define HIF_IRQ_HIF_TBL_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_TBL_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_TBL_INT_SUM_CNT_REG_HIF_TBL_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_TBL_INT_SUM_CNT_REG_HIF_TBL_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_TBL_INT_SUM_CNT_REG_HIF_TBL_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_CMDQM_INT_SUM_CNT_REG_ADDR 0xa1100208 -#define HIF_IRQ_HIF_CMDQM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_CMDQM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_CMDQM_INT_SUM_CNT_REG_HIF_CMDQM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_CMDQM_INT_SUM_CNT_REG_HIF_CMDQM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_CMDQM_INT_SUM_CNT_REG_HIF_CMDQM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_IRQ_INT_SUM_CNT_REG_ADDR 0xa110020c -#define HIF_IRQ_HIF_IRQ_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_IRQ_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_IRQ_INT_SUM_CNT_REG_HIF_IRQ_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_IRQ_INT_SUM_CNT_REG_HIF_IRQ_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_IRQ_INT_SUM_CNT_REG_HIF_IRQ_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_HIF_NOC_INT_SUM_CNT_REG_ADDR 0xa1100210 -#define HIF_IRQ_HIF_NOC_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF_NOC_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF_NOC_INT_SUM_CNT_REG_HIF_NOC_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_HIF_NOC_INT_SUM_CNT_REG_HIF_NOC_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_HIF_NOC_INT_SUM_CNT_REG_HIF_NOC_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_MTR_INT_SUM_CNT_REG_ADDR 0xa1100214 -#define HIF_IRQ_TM_MTR_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_MTR_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_MTR_INT_SUM_CNT_REG_TM_MTR_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_MTR_INT_SUM_CNT_REG_TM_MTR_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_MTR_INT_SUM_CNT_REG_TM_MTR_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_QMU_INT_SUM_CNT_REG_ADDR 0xa1100218 -#define HIF_IRQ_TM_QMU_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_QMU_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_QMU_INT_SUM_CNT_REG_TM_QMU_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_QMU_INT_SUM_CNT_REG_TM_QMU_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_QMU_INT_SUM_CNT_REG_TM_QMU_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_FRG_INT_SUM_CNT_REG_ADDR 0xa110021c -#define HIF_IRQ_TM_FRG_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_FRG_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_FRG_INT_SUM_CNT_REG_TM_FRG_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_FRG_INT_SUM_CNT_REG_TM_FRG_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_FRG_INT_SUM_CNT_REG_TM_FRG_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_PDB_INT_SUM_CNT_REG_ADDR 0xa1100220 -#define HIF_IRQ_TM_PDB_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_PDB_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_PDB_INT_SUM_CNT_REG_TM_PDB_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_PDB_INT_SUM_CNT_REG_TM_PDB_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_PDB_INT_SUM_CNT_REG_TM_PDB_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_PRA_INT_SUM_CNT_REG_ADDR 0xa1100224 -#define HIF_IRQ_TM_PRA_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_PRA_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_PRA_INT_SUM_CNT_REG_TM_PRA_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_PRA_INT_SUM_CNT_REG_TM_PRA_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_PRA_INT_SUM_CNT_REG_TM_PRA_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_SCA_INT_SUM_CNT_REG_ADDR 0xa1100228 -#define HIF_IRQ_TM_SCA_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_SCA_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_SCA_INT_SUM_CNT_REG_TM_SCA_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_SCA_INT_SUM_CNT_REG_TM_SCA_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_SCA_INT_SUM_CNT_REG_TM_SCA_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TM_OCA_INT_SUM_CNT_REG_ADDR 0xa110022c -#define HIF_IRQ_TM_OCA_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TM_OCA_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TM_OCA_INT_SUM_CNT_REG_TM_OCA_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TM_OCA_INT_SUM_CNT_REG_TM_OCA_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TM_OCA_INT_SUM_CNT_REG_TM_OCA_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_MMC_INT_SUM_CNT_REG_ADDR 0xa1100230 -#define HIF_IRQ_TPE_MMC_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_MMC_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_MMC_INT_SUM_CNT_REG_TPE_MMC_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_MMC_INT_SUM_CNT_REG_TPE_MMC_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_MMC_INT_SUM_CNT_REG_TPE_MMC_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_QPM_INT_SUM_CNT_REG_ADDR 0xa1100234 -#define HIF_IRQ_TPE_QPM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_QPM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_QPM_INT_SUM_CNT_REG_TPE_QPM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_QPM_INT_SUM_CNT_REG_TPE_QPM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_QPM_INT_SUM_CNT_REG_TPE_QPM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_SV_INT_SUM_CNT_REG_ADDR 0xa1100238 -#define HIF_IRQ_TPE_SV_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_SV_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_SV_INT_SUM_CNT_REG_TPE_SV_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_SV_INT_SUM_CNT_REG_TPE_SV_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_SV_INT_SUM_CNT_REG_TPE_SV_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_MER_INT_SUM_CNT_REG_ADDR 0xa110023c -#define HIF_IRQ_TPE_MER_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_MER_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_MER_INT_SUM_CNT_REG_TPE_MER_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_MER_INT_SUM_CNT_REG_TPE_MER_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_MER_INT_SUM_CNT_REG_TPE_MER_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_TOP_INT_SUM_CNT_REG_ADDR 0xa1100240 -#define HIF_IRQ_TPE_TOP_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_TOP_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_TOP_INT_SUM_CNT_REG_TPE_TOP_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_TOP_INT_SUM_CNT_REG_TPE_TOP_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_TOP_INT_SUM_CNT_REG_TPE_TOP_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_CRDT_INT_SUM_CNT_REG_ADDR 0xa1100244 -#define HIF_IRQ_TPE_CRDT_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_CRDT_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_CRDT_INT_SUM_CNT_REG_TPE_CRDT_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_CRDT_INT_SUM_CNT_REG_TPE_CRDT_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_CRDT_INT_SUM_CNT_REG_TPE_CRDT_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_MET_INT_SUM_CNT_REG_ADDR 0xa1100248 -#define HIF_IRQ_TPE_MET_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_MET_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_MET_INT_SUM_CNT_REG_TPE_MET_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_MET_INT_SUM_CNT_REG_TPE_MET_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_MET_INT_SUM_CNT_REG_TPE_MET_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_SHAP_INT_SUM_CNT_REG_ADDR 0xa110024c -#define HIF_IRQ_TPE_SHAP_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_SHAP_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_SHAP_INT_SUM_CNT_REG_TPE_SHAP_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_SHAP_INT_SUM_CNT_REG_TPE_SHAP_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_SHAP_INT_SUM_CNT_REG_TPE_SHAP_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_MA_INT_SUM_CNT_REG_ADDR 0xa1100250 -#define HIF_IRQ_TPE_MA_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_MA_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_MA_INT_SUM_CNT_REG_TPE_MA_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_MA_INT_SUM_CNT_REG_TPE_MA_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_MA_INT_SUM_CNT_REG_TPE_MA_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_CEM_INT_SUM_CNT_REG_ADDR 0xa1100254 -#define HIF_IRQ_TPE_CEM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_CEM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_CEM_INT_SUM_CNT_REG_TPE_CEM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_CEM_INT_SUM_CNT_REG_TPE_CEM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_CEM_INT_SUM_CNT_REG_TPE_CEM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_PET_INT_SUM_CNT_REG_ADDR 0xa1100258 -#define HIF_IRQ_TPE_PET_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_PET_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_PET_INT_SUM_CNT_REG_TPE_PET_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_PET_INT_SUM_CNT_REG_TPE_PET_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_PET_INT_SUM_CNT_REG_TPE_PET_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_TPE_PG_INT_SUM_CNT_REG_ADDR 0xa110025c -#define HIF_IRQ_TPE_PG_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_TPE_PG_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_TPE_PG_INT_SUM_CNT_REG_TPE_PG_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_TPE_PG_INT_SUM_CNT_REG_TPE_PG_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_TPE_PG_INT_SUM_CNT_REG_TPE_PG_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_EM_INT_SUM_CNT_REG_ADDR 0xa1100260 -#define HIF_IRQ_IPP_EM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_EM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_EM_INT_SUM_CNT_REG_IPP_EM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_EM_INT_SUM_CNT_REG_IPP_EM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_EM_INT_SUM_CNT_REG_IPP_EM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_STAT_INT_SUM_CNT_REG_ADDR 0xa1100264 -#define HIF_IRQ_IPP_STAT_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_STAT_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_STAT_INT_SUM_CNT_REG_IPP_STAT_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_STAT_INT_SUM_CNT_REG_IPP_STAT_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_STAT_INT_SUM_CNT_REG_IPP_STAT_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_CLSF_DMA_INT_SUM_CNT_REG_ADDR 0xa1100268 -#define HIF_IRQ_IPP_CLSF_DMA_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_CLSF_DMA_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_CLSF_DMA_INT_SUM_CNT_REG_IPP_CLSF_DMA_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_CLSF_DMA_INT_SUM_CNT_REG_IPP_CLSF_DMA_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_CLSF_DMA_INT_SUM_CNT_REG_IPP_CLSF_DMA_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_DIR_INT_SUM_CNT_REG_ADDR 0xa110026c -#define HIF_IRQ_IPP_DIR_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_DIR_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_DIR_INT_SUM_CNT_REG_IPP_DIR_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_DIR_INT_SUM_CNT_REG_IPP_DIR_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_DIR_INT_SUM_CNT_REG_IPP_DIR_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_TCAM_INT_SUM_CNT_REG_ADDR 0xa1100270 -#define HIF_IRQ_IPP_TCAM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_TCAM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_TCAM_INT_SUM_CNT_REG_IPP_TCAM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_TCAM_INT_SUM_CNT_REG_IPP_TCAM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_TCAM_INT_SUM_CNT_REG_IPP_TCAM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_CLSF_CTRL_INT_SUM_CNT_REG_ADDR 0xa1100274 -#define HIF_IRQ_IPP_CLSF_CTRL_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_CLSF_CTRL_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_CLSF_CTRL_INT_SUM_CNT_REG_IPP_CLSF_CTRL_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_CLSF_CTRL_INT_SUM_CNT_REG_IPP_CLSF_CTRL_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_CLSF_CTRL_INT_SUM_CNT_REG_IPP_CLSF_CTRL_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_IPP_PRS_INT_SUM_CNT_REG_ADDR 0xa1100278 -#define HIF_IRQ_IPP_PRS_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_IPP_PRS_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_IPP_PRS_INT_SUM_CNT_REG_IPP_PRS_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_IPP_PRS_INT_SUM_CNT_REG_IPP_PRS_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_IPP_PRS_INT_SUM_CNT_REG_IPP_PRS_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_EPP_PRS_INT_SUM_CNT_REG_ADDR 0xa110027c -#define HIF_IRQ_EPP_PRS_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_EPP_PRS_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_EPP_PRS_INT_SUM_CNT_REG_EPP_RWE_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_EPP_PRS_INT_SUM_CNT_REG_EPP_RWE_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_EPP_PRS_INT_SUM_CNT_REG_EPP_RWE_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_NIF_NDP_INT_SUM_CNT_REG_ADDR 0xa1100280 -#define HIF_IRQ_NIF_NDP_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_NIF_NDP_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_NIF_NDP_INT_SUM_CNT_REG_NIF_NDP_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_NIF_NDP_INT_SUM_CNT_REG_NIF_NDP_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_NIF_NDP_INT_SUM_CNT_REG_NIF_NDP_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_NIF_NEA_INT_SUM_CNT_REG_ADDR 0xa1100284 -#define HIF_IRQ_NIF_NEA_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_NIF_NEA_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_NIF_NEA_INT_SUM_CNT_REG_NIF_NEA_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_NIF_NEA_INT_SUM_CNT_REG_NIF_NEA_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_NIF_NEA_INT_SUM_CNT_REG_NIF_NEA_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_NIF_NEM_INT_SUM_CNT_REG_ADDR 0xa1100288 -#define HIF_IRQ_NIF_NEM_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_NIF_NEM_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_NIF_NEM_INT_SUM_CNT_REG_NIF_NEM_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_NIF_NEM_INT_SUM_CNT_REG_NIF_NEM_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_NIF_NEM_INT_SUM_CNT_REG_NIF_NEM_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_PCIE0_INT_SUM_CNT_REG_ADDR 0xa110028c -#define HIF_IRQ_PCIE0_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_PCIE0_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_PCIE0_INT_SUM_CNT_REG_PCIE0_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_PCIE0_INT_SUM_CNT_REG_PCIE0_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_PCIE0_INT_SUM_CNT_REG_PCIE0_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_PCIE1_INT_SUM_CNT_REG_ADDR 0xa1100290 -#define HIF_IRQ_PCIE1_INT_SUM_CNT_REG_WIDTH 32 -#define HIF_IRQ_PCIE1_INT_SUM_CNT_REG_LENGTH 32 -#define HIF_IRQ_PCIE1_INT_SUM_CNT_REG_PCIE1_INT_SUM_CNT_MASK 0xff -#define HIF_IRQ_PCIE1_INT_SUM_CNT_REG_PCIE1_INT_SUM_CNT_SHIFT 0 -#define HIF_IRQ_PCIE1_INT_SUM_CNT_REG_PCIE1_INT_SUM_CNT_WIDTH 8 - -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_ADDR 0xa1100298 -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_PCIE2HIF_PCIE_MSG_INT_CNT_MASK 0xff -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_PCIE2HIF_PCIE_MSG_INT_CNT_SHIFT 0 -#define HIF_IRQ_PCIE2HIF_PCIE_MSG_INT_CNT_REG_PCIE2HIF_PCIE_MSG_INT_CNT_WIDTH 8 - -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_ADDR 0xa11002a0 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_PIO2IRQ_PIO_REQ_INT_CNT_MASK 0xff -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_PIO2IRQ_PIO_REQ_INT_CNT_SHIFT 0 -#define HIF_IRQ_PIO2IRQ_PIO_REQ_INT_CNT_REG_PIO2IRQ_PIO_REQ_INT_CNT_WIDTH 8 - -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_CNT_REG_ADDR 0xa11002a8 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_CNT_REG_CMDQM2IRQ_CMDQ_REQ_INT_CNT_MASK 0xff -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_CNT_REG_CMDQM2IRQ_CMDQ_REQ_INT_CNT_SHIFT 0 -#define HIF_IRQ_CMDQM2IRQ_CMDQ_REQ_INT_CNT_REG_CMDQM2IRQ_CMDQ_REQ_INT_CNT_WIDTH 8 - -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_ADDR 0xa11002b0 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_CMDQM2IRQ_DMA_RX_INT_CNT_MASK 0xff -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_CMDQM2IRQ_DMA_RX_INT_CNT_SHIFT 0 -#define HIF_IRQ_CMDQM2IRQ_DMA_RX_INT_CNT_REG_CMDQM2IRQ_DMA_RX_INT_CNT_WIDTH 8 - -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_ADDR 0xa11002b8 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_CMDQM2IRQ_DMA_TX_INT_CNT_MASK 0xff -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_CMDQM2IRQ_DMA_TX_INT_CNT_SHIFT 0 -#define HIF_IRQ_CMDQM2IRQ_DMA_TX_INT_CNT_REG_CMDQM2IRQ_DMA_TX_INT_CNT_WIDTH 8 - -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_CNT_REG_ADDR 0xa11002c0 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_CNT_REG_TBL2IRQ_TBL_RD_DONE_INT_CNT_MASK 0xff -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_CNT_REG_TBL2IRQ_TBL_RD_DONE_INT_CNT_SHIFT 0 -#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_CNT_REG_TBL2IRQ_TBL_RD_DONE_INT_CNT_WIDTH 8 - -#define HIF_IRQ_HIF2APS_IRQ_SUM_INT_CNT_REG_ADDR 0xa11002c4 -#define HIF_IRQ_HIF2APS_IRQ_SUM_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF2APS_IRQ_SUM_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF2APS_IRQ_SUM_INT_CNT_REG_HIF2APS_IRQ_SUM_INT_CNT_MASK 0xff -#define HIF_IRQ_HIF2APS_IRQ_SUM_INT_CNT_REG_HIF2APS_IRQ_SUM_INT_CNT_SHIFT 0 -#define HIF_IRQ_HIF2APS_IRQ_SUM_INT_CNT_REG_HIF2APS_IRQ_SUM_INT_CNT_WIDTH 8 - -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_ADDR 0xa11002c8 -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_HIF2APS_PCIE_MSG_INT_CNT_MASK 0xff -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_HIF2APS_PCIE_MSG_INT_CNT_SHIFT 0 -#define HIF_IRQ_HIF2APS_PCIE_MSG_INT_CNT_REG_HIF2APS_PCIE_MSG_INT_CNT_WIDTH 8 - -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_ADDR 0xa11002d0 -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_HIF2APS_PIO_REQ_INT_CNT_MASK 0xff -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_HIF2APS_PIO_REQ_INT_CNT_SHIFT 0 -#define HIF_IRQ_HIF2APS_PIO_REQ_INT_CNT_REG_HIF2APS_PIO_REQ_INT_CNT_WIDTH 8 - -#define HIF_IRQ_HIF2APS_CMDQ_REQ_INT_CNT_REG_ADDR 0xa11002d8 -#define HIF_IRQ_HIF2APS_CMDQ_REQ_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF2APS_CMDQ_REQ_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF2APS_CMDQ_REQ_INT_CNT_REG_HIF2APS_CMDQ_REQ_INT_CNT_MASK 0xff -#define HIF_IRQ_HIF2APS_CMDQ_REQ_INT_CNT_REG_HIF2APS_CMDQ_REQ_INT_CNT_SHIFT 0 -#define HIF_IRQ_HIF2APS_CMDQ_REQ_INT_CNT_REG_HIF2APS_CMDQ_REQ_INT_CNT_WIDTH 8 - -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_ADDR 0xa11002e0 -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_HIF2APS_DMA_RX_INT_CNT_MASK 0xff -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_HIF2APS_DMA_RX_INT_CNT_SHIFT 0 -#define HIF_IRQ_HIF2APS_DMA_RX_INT_CNT_REG_HIF2APS_DMA_RX_INT_CNT_WIDTH 8 - -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_ADDR 0xa11002e8 -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_SIZE 2 -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_STRIDE 0x4 -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_HIF2APS_DMA_TX_INT_CNT_MASK 0xff -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_HIF2APS_DMA_TX_INT_CNT_SHIFT 0 -#define HIF_IRQ_HIF2APS_DMA_TX_INT_CNT_REG_HIF2APS_DMA_TX_INT_CNT_WIDTH 8 - -#define HIF_IRQ_HIF2APS_TBL_RD_DONE_INT_CNT_REG_ADDR 0xa11002f0 -#define HIF_IRQ_HIF2APS_TBL_RD_DONE_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_HIF2APS_TBL_RD_DONE_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_HIF2APS_TBL_RD_DONE_INT_CNT_REG_HIF2APS_TBL_RD_DONE_INT_CNT_MASK 0xff -#define HIF_IRQ_HIF2APS_TBL_RD_DONE_INT_CNT_REG_HIF2APS_TBL_RD_DONE_INT_CNT_SHIFT 0 -#define HIF_IRQ_HIF2APS_TBL_RD_DONE_INT_CNT_REG_HIF2APS_TBL_RD_DONE_INT_CNT_WIDTH 8 - -#define HIF_IRQ_DIST2GEN_MSIX_INT_CNT_REG_ADDR 0xa11002f4 -#define HIF_IRQ_DIST2GEN_MSIX_INT_CNT_REG_WIDTH 32 -#define HIF_IRQ_DIST2GEN_MSIX_INT_CNT_REG_LENGTH 32 -#define HIF_IRQ_DIST2GEN_MSIX_INT_CNT_REG_DIST2GEN_MSIX_INT_CNT_MASK 0xff -#define HIF_IRQ_DIST2GEN_MSIX_INT_CNT_REG_DIST2GEN_MSIX_INT_CNT_SHIFT 0 -#define HIF_IRQ_DIST2GEN_MSIX_INT_CNT_REG_DIST2GEN_MSIX_INT_CNT_WIDTH 8 - -#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1102000 -#define HIF_IRQ_CONTROL_TBL_MEM_WIDTH 32 -#define HIF_IRQ_CONTROL_TBL_MEM_LENGTH 32 -#define HIF_IRQ_CONTROL_TBL_MEM_DEPTH 1152 -#define HIF_IRQ_CONTROL_TBL_MEM_FUNC_MASK_MASK 0x1 -#define HIF_IRQ_CONTROL_TBL_MEM_FUNC_MASK_SHIFT 0 -#define HIF_IRQ_CONTROL_TBL_MEM_FUNC_MASK_WIDTH 1 - -#define HIF_IRQ_MEM_ERROR_INT_ADDR 0xa1104000 -#define HIF_IRQ_MEM_ERROR_INT_WIDTH 32 -#define HIF_IRQ_MEM_ERROR_INT_LENGTH 32 -#define HIF_IRQ_MEM_ERROR_INT_CMDQM2IRQ_MSIX_CBR_FIFO_MEM_SB_ERR_MASK 0x1 -#define HIF_IRQ_MEM_ERROR_INT_CMDQM2IRQ_MSIX_CBR_FIFO_MEM_SB_ERR_SHIFT 0 -#define HIF_IRQ_MEM_ERROR_INT_CMDQM2IRQ_MSIX_CBR_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_CMDQM2IRQ_MSIX_CBR_FIFO_MEM_DB_ERR_MASK 0x2 -#define HIF_IRQ_MEM_ERROR_INT_CMDQM2IRQ_MSIX_CBR_FIFO_MEM_DB_ERR_SHIFT 1 -#define HIF_IRQ_MEM_ERROR_INT_CMDQM2IRQ_MSIX_CBR_FIFO_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_TPE2HIF_MSIX_CBR_FIFO_MEM_SB_ERR_MASK 0x4 -#define HIF_IRQ_MEM_ERROR_INT_TPE2HIF_MSIX_CBR_FIFO_MEM_SB_ERR_SHIFT 2 -#define HIF_IRQ_MEM_ERROR_INT_TPE2HIF_MSIX_CBR_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_TPE2HIF_MSIX_CBR_FIFO_MEM_DB_ERR_MASK 0x8 -#define HIF_IRQ_MEM_ERROR_INT_TPE2HIF_MSIX_CBR_FIFO_MEM_DB_ERR_SHIFT 3 -#define HIF_IRQ_MEM_ERROR_INT_TPE2HIF_MSIX_CBR_FIFO_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_DIST2GEN_MSIX_FIFO_MEM_SB_ERR_MASK 0x10 -#define HIF_IRQ_MEM_ERROR_INT_DIST2GEN_MSIX_FIFO_MEM_SB_ERR_SHIFT 4 -#define HIF_IRQ_MEM_ERROR_INT_DIST2GEN_MSIX_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_DIST2GEN_MSIX_FIFO_MEM_DB_ERR_MASK 0x20 -#define HIF_IRQ_MEM_ERROR_INT_DIST2GEN_MSIX_FIFO_MEM_DB_ERR_SHIFT 5 -#define HIF_IRQ_MEM_ERROR_INT_DIST2GEN_MSIX_FIFO_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_FAIL_MSIX_FIFO_MEM_SB_ERR_MASK 0x40 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_FAIL_MSIX_FIFO_MEM_SB_ERR_SHIFT 6 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_FAIL_MSIX_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_FAIL_MSIX_FIFO_MEM_DB_ERR_MASK 0x80 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_FAIL_MSIX_FIFO_MEM_DB_ERR_SHIFT 7 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_FAIL_MSIX_FIFO_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_BACK_MSIX_FIFO_MEM_SB_ERR_MASK 0x100 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_BACK_MSIX_FIFO_MEM_SB_ERR_SHIFT 8 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_BACK_MSIX_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_BACK_MSIX_FIFO_MEM_DB_ERR_MASK 0x200 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_BACK_MSIX_FIFO_MEM_DB_ERR_SHIFT 9 -#define HIF_IRQ_MEM_ERROR_INT_WRITE_BACK_MSIX_FIFO_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_INT_DB_FIFO_MEM_SB_ERR_MASK 0x400 -#define HIF_IRQ_MEM_ERROR_INT_INT_DB_FIFO_MEM_SB_ERR_SHIFT 10 -#define HIF_IRQ_MEM_ERROR_INT_INT_DB_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_INT_DB_FIFO_MEM_DB_ERR_MASK 0x800 -#define HIF_IRQ_MEM_ERROR_INT_INT_DB_FIFO_MEM_DB_ERR_SHIFT 11 -#define HIF_IRQ_MEM_ERROR_INT_INT_DB_FIFO_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_REQ_INFO_FIFO_MEM_SB_ERR_MASK 0x3000 -#define HIF_IRQ_MEM_ERROR_INT_REQ_INFO_FIFO_MEM_SB_ERR_SHIFT 12 -#define HIF_IRQ_MEM_ERROR_INT_REQ_INFO_FIFO_MEM_SB_ERR_WIDTH 2 -#define HIF_IRQ_MEM_ERROR_INT_REQ_INFO_FIFO_MEM_DB_ERR_MASK 0xc000 -#define HIF_IRQ_MEM_ERROR_INT_REQ_INFO_FIFO_MEM_DB_ERR_SHIFT 14 -#define HIF_IRQ_MEM_ERROR_INT_REQ_INFO_FIFO_MEM_DB_ERR_WIDTH 2 -#define HIF_IRQ_MEM_ERROR_INT_PBA_TBL_MEM_SB_ERR_MASK 0x10000 -#define HIF_IRQ_MEM_ERROR_INT_PBA_TBL_MEM_SB_ERR_SHIFT 16 -#define HIF_IRQ_MEM_ERROR_INT_PBA_TBL_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_PBA_TBL_MEM_DB_ERR_MASK 0x20000 -#define HIF_IRQ_MEM_ERROR_INT_PBA_TBL_MEM_DB_ERR_SHIFT 17 -#define HIF_IRQ_MEM_ERROR_INT_PBA_TBL_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_MASK_TBL_MEM_SB_ERR_MASK 0x40000 -#define HIF_IRQ_MEM_ERROR_INT_MASK_TBL_MEM_SB_ERR_SHIFT 18 -#define HIF_IRQ_MEM_ERROR_INT_MASK_TBL_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_MASK_TBL_MEM_DB_ERR_MASK 0x80000 -#define HIF_IRQ_MEM_ERROR_INT_MASK_TBL_MEM_DB_ERR_SHIFT 19 -#define HIF_IRQ_MEM_ERROR_INT_MASK_TBL_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_VECTOR_TBL_MEM_SB_ERR_MASK 0x100000 -#define HIF_IRQ_MEM_ERROR_INT_VECTOR_TBL_MEM_SB_ERR_SHIFT 20 -#define HIF_IRQ_MEM_ERROR_INT_VECTOR_TBL_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_VECTOR_TBL_MEM_DB_ERR_MASK 0x200000 -#define HIF_IRQ_MEM_ERROR_INT_VECTOR_TBL_MEM_DB_ERR_SHIFT 21 -#define HIF_IRQ_MEM_ERROR_INT_VECTOR_TBL_MEM_DB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_CONTROL_TBL_MEM_SB_ERR_MASK 0x400000 -#define HIF_IRQ_MEM_ERROR_INT_CONTROL_TBL_MEM_SB_ERR_SHIFT 22 -#define HIF_IRQ_MEM_ERROR_INT_CONTROL_TBL_MEM_SB_ERR_WIDTH 1 -#define HIF_IRQ_MEM_ERROR_INT_CONTROL_TBL_MEM_DB_ERR_MASK 0x800000 -#define HIF_IRQ_MEM_ERROR_INT_CONTROL_TBL_MEM_DB_ERR_SHIFT 23 -#define HIF_IRQ_MEM_ERROR_INT_CONTROL_TBL_MEM_DB_ERR_WIDTH 1 - -#define HIF_IRQ_MEM_INIT_CTRL_ADDR 0xa1104008 -#define HIF_IRQ_MEM_INIT_CTRL_WIDTH 32 -#define HIF_IRQ_MEM_INIT_CTRL_LENGTH 32 -#define HIF_IRQ_MEM_INIT_CTRL_PBA_TBL_MEM_INIT_RST_N_MASK 0x1 -#define HIF_IRQ_MEM_INIT_CTRL_PBA_TBL_MEM_INIT_RST_N_SHIFT 0 -#define HIF_IRQ_MEM_INIT_CTRL_PBA_TBL_MEM_INIT_RST_N_WIDTH 1 -#define HIF_IRQ_MEM_INIT_CTRL_PBA_TBL_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_IRQ_MEM_INIT_CTRL_PBA_TBL_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_IRQ_MEM_INIT_CTRL_MASK_TBL_MEM_INIT_RST_N_MASK 0x2 -#define HIF_IRQ_MEM_INIT_CTRL_MASK_TBL_MEM_INIT_RST_N_SHIFT 1 -#define HIF_IRQ_MEM_INIT_CTRL_MASK_TBL_MEM_INIT_RST_N_WIDTH 1 -#define HIF_IRQ_MEM_INIT_CTRL_MASK_TBL_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_IRQ_MEM_INIT_CTRL_MASK_TBL_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_IRQ_MEM_INIT_CTRL_VECTOR_TBL_MEM_INIT_RST_N_MASK 0x4 -#define HIF_IRQ_MEM_INIT_CTRL_VECTOR_TBL_MEM_INIT_RST_N_SHIFT 2 -#define HIF_IRQ_MEM_INIT_CTRL_VECTOR_TBL_MEM_INIT_RST_N_WIDTH 1 -#define HIF_IRQ_MEM_INIT_CTRL_VECTOR_TBL_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_IRQ_MEM_INIT_CTRL_VECTOR_TBL_MEM_INIT_RST_N_MIN_VAL 0x0 -#define HIF_IRQ_MEM_INIT_CTRL_CONTROL_TBL_MEM_INIT_RST_N_MASK 0x8 -#define HIF_IRQ_MEM_INIT_CTRL_CONTROL_TBL_MEM_INIT_RST_N_SHIFT 3 -#define HIF_IRQ_MEM_INIT_CTRL_CONTROL_TBL_MEM_INIT_RST_N_WIDTH 1 -#define HIF_IRQ_MEM_INIT_CTRL_CONTROL_TBL_MEM_INIT_RST_N_MAX_VAL 0x1 -#define HIF_IRQ_MEM_INIT_CTRL_CONTROL_TBL_MEM_INIT_RST_N_MIN_VAL 0x0 - -#define HIF_IRQ_TIMEOUT_INT_REG_ADDR 0xa1104010 -#define HIF_IRQ_TIMEOUT_INT_REG_WIDTH 32 -#define HIF_IRQ_TIMEOUT_INT_REG_LENGTH 32 -#define HIF_IRQ_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_MASK 0x1 -#define HIF_IRQ_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_SHIFT 0 -#define HIF_IRQ_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_WIDTH 1 -#define HIF_IRQ_TIMEOUT_INT_REG_CONTROL_TBL_MEM_TIMEOUT_MASK 0x2 -#define HIF_IRQ_TIMEOUT_INT_REG_CONTROL_TBL_MEM_TIMEOUT_SHIFT 1 -#define HIF_IRQ_TIMEOUT_INT_REG_CONTROL_TBL_MEM_TIMEOUT_WIDTH 1 - -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_ADDR 0xa1104014 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_WIDTH 32 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_LENGTH 32 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MASK 0x1 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_SHIFT 0 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CONTROL_TBL_MEM_TIMEOUT_INT_MASK_MASK 0x2 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CONTROL_TBL_MEM_TIMEOUT_INT_MASK_SHIFT 1 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CONTROL_TBL_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CONTROL_TBL_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_IRQ_TIMEOUT_INT_MASK_REG_CONTROL_TBL_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 - -#define HIF_IRQ_TIMEOUT_CFG_REG_ADDR 0xa1104018 -#define HIF_IRQ_TIMEOUT_CFG_REG_WIDTH 32 -#define HIF_IRQ_TIMEOUT_CFG_REG_LENGTH 32 -#define HIF_IRQ_TIMEOUT_CFG_REG_TIMEOUT_PARA_MASK 0xffff -#define HIF_IRQ_TIMEOUT_CFG_REG_TIMEOUT_PARA_SHIFT 0 -#define HIF_IRQ_TIMEOUT_CFG_REG_TIMEOUT_PARA_WIDTH 16 -#define HIF_IRQ_TIMEOUT_CFG_REG_TIMEOUT_PARA_MAX_VAL 0xffff -#define HIF_IRQ_TIMEOUT_CFG_REG_TIMEOUT_PARA_MIN_VAL 0x0 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_tbl_csr_defines.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_tbl_csr_defines.h deleted file mode 100644 index 9747b87bf9e858c6f22c5cf6c5281b25ad24f257..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/hif_tbl_csr_defines.h +++ /dev/null @@ -1,474 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _HIF_TBL_CSR_DEFINES_H_ -#define _HIF_TBL_CSR_DEFINES_H_ - -#define HIF_TBL_SOFT_RESET_REG_ADDR 0xa1060000 -#define HIF_TBL_SOFT_RESET_REG_WIDTH 32 -#define HIF_TBL_SOFT_RESET_REG_LENGTH 32 -#define HIF_TBL_SOFT_RESET_REG_SOFT_RESET_MASK 0x1 -#define HIF_TBL_SOFT_RESET_REG_SOFT_RESET_SHIFT 0 -#define HIF_TBL_SOFT_RESET_REG_SOFT_RESET_WIDTH 1 -#define HIF_TBL_SOFT_RESET_REG_SOFT_RESET_MAX_VAL 0x1 -#define HIF_TBL_SOFT_RESET_REG_SOFT_RESET_MIN_VAL 0x0 - -#define HIF_TBL_SCRATCH_PAD_REG_ADDR 0xa106000c -#define HIF_TBL_SCRATCH_PAD_REG_WIDTH 32 -#define HIF_TBL_SCRATCH_PAD_REG_LENGTH 32 -#define HIF_TBL_SCRATCH_PAD_REG_SCRATCH_PAD_MASK 0xffffffff -#define HIF_TBL_SCRATCH_PAD_REG_SCRATCH_PAD_SHIFT 0 -#define HIF_TBL_SCRATCH_PAD_REG_SCRATCH_PAD_WIDTH 32 -#define HIF_TBL_SCRATCH_PAD_REG_SCRATCH_PAD_MAX_VAL 0xffffffff -#define HIF_TBL_SCRATCH_PAD_REG_SCRATCH_PAD_MIN_VAL 0x0 - -#define HIF_TBL_CSR_ERR_FLAG_REG_ADDR 0xa1060010 -#define HIF_TBL_CSR_ERR_FLAG_REG_WIDTH 32 -#define HIF_TBL_CSR_ERR_FLAG_REG_LENGTH 32 -#define HIF_TBL_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_MASK 0x1 -#define HIF_TBL_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_SHIFT 0 -#define HIF_TBL_CSR_ERR_FLAG_REG_CSR_ERR_FLAG_WIDTH 1 - -#define HIF_TBL_CSR_ERR_ADDR_REG_ADDR 0xa1060014 -#define HIF_TBL_CSR_ERR_ADDR_REG_WIDTH 32 -#define HIF_TBL_CSR_ERR_ADDR_REG_LENGTH 32 -#define HIF_TBL_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_MASK 0xffffffff -#define HIF_TBL_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_SHIFT 0 -#define HIF_TBL_CSR_ERR_ADDR_REG_CSR_ERR_ADDR_WIDTH 32 - -#define HIF_TBL_CSR_ERR_LEN_REG_ADDR 0xa1060018 -#define HIF_TBL_CSR_ERR_LEN_REG_WIDTH 32 -#define HIF_TBL_CSR_ERR_LEN_REG_LENGTH 32 -#define HIF_TBL_CSR_ERR_LEN_REG_CSR_ERR_LEN_MASK 0x3ff -#define HIF_TBL_CSR_ERR_LEN_REG_CSR_ERR_LEN_SHIFT 0 -#define HIF_TBL_CSR_ERR_LEN_REG_CSR_ERR_LEN_WIDTH 10 - -#define HIF_TBL_CSR_ERR_TYPE_REG_ADDR 0xa106001c -#define HIF_TBL_CSR_ERR_TYPE_REG_WIDTH 32 -#define HIF_TBL_CSR_ERR_TYPE_REG_LENGTH 32 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_MASK 0x1 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_SHIFT 0 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_CMD_ERR_TYPE_WIDTH 1 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_MASK 0x6 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_SHIFT 1 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_LEN_ERR_TYPE_WIDTH 2 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_MASK 0x8 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_SHIFT 3 -#define HIF_TBL_CSR_ERR_TYPE_REG_CSR_ADDR_ERR_TYPE_WIDTH 1 - -#define HIF_TBL_TBL_DL_REQ_REG_ADDR 0xa1060020 -#define HIF_TBL_TBL_DL_REQ_REG_WIDTH 32 -#define HIF_TBL_TBL_DL_REQ_REG_LENGTH 32 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_FUNC_ID_MASK 0x7ff -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_FUNC_ID_SHIFT 0 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_FUNC_ID_WIDTH 11 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_FUNC_ID_MAX_VAL 0x7ff -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_FUNC_ID_MIN_VAL 0x0 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_MASK 0x800 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT 11 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_WIDTH 1 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_MAX_VAL 0x1 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_MIN_VAL 0x0 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_MASK 0x3fff000 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT 12 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_WIDTH 14 -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_MAX_VAL 0x3fff -#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_MIN_VAL 0x0 - -#define HIF_TBL_TBL_DL_ADDR_L_REG_ADDR 0xa1060024 -#define HIF_TBL_TBL_DL_ADDR_L_REG_WIDTH 32 -#define HIF_TBL_TBL_DL_ADDR_L_REG_LENGTH 32 -#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK 0xffffffff -#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_SHIFT 0 -#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_WIDTH 32 -#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MAX_VAL 0xffffffff -#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MIN_VAL 0x0 - -#define HIF_TBL_TBL_DL_ADDR_H_REG_ADDR 0xa1060028 -#define HIF_TBL_TBL_DL_ADDR_H_REG_WIDTH 32 -#define HIF_TBL_TBL_DL_ADDR_H_REG_LENGTH 32 -#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK 0xffffffff -#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_SHIFT 0 -#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_WIDTH 32 -#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MAX_VAL 0xffffffff -#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MIN_VAL 0x0 - -#define HIF_TBL_TBL_DL_START_REG_ADDR 0xa106002c -#define HIF_TBL_TBL_DL_START_REG_WIDTH 32 -#define HIF_TBL_TBL_DL_START_REG_LENGTH 32 -#define HIF_TBL_TBL_DL_START_REG_TBL_DL_START_MASK 0x1 -#define HIF_TBL_TBL_DL_START_REG_TBL_DL_START_SHIFT 0 -#define HIF_TBL_TBL_DL_START_REG_TBL_DL_START_WIDTH 1 -#define HIF_TBL_TBL_DL_START_REG_TBL_DL_START_MAX_VAL 0x1 -#define HIF_TBL_TBL_DL_START_REG_TBL_DL_START_MIN_VAL 0x0 - -#define HIF_TBL_TBL_DL_BUSY_REG_ADDR 0xa1060030 -#define HIF_TBL_TBL_DL_BUSY_REG_WIDTH 32 -#define HIF_TBL_TBL_DL_BUSY_REG_LENGTH 32 -#define HIF_TBL_TBL_DL_BUSY_REG_TBL_DL_BUSY_MASK 0x1 -#define HIF_TBL_TBL_DL_BUSY_REG_TBL_DL_BUSY_SHIFT 0 -#define HIF_TBL_TBL_DL_BUSY_REG_TBL_DL_BUSY_WIDTH 1 - -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_REG_ADDR 0xa1060034 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_REG_WIDTH 32 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_REG_LENGTH 32 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_REG_DL_RX_PCIE_DATA_ERR_MASK 0x1 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_REG_DL_RX_PCIE_DATA_ERR_SHIFT 0 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_REG_DL_RX_PCIE_DATA_ERR_WIDTH 1 - -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_ADDR 0xa1060038 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_DL_RX_PCIE_DATA_ERR_INT_MASK_MASK 0x1 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_DL_RX_PCIE_DATA_ERR_INT_MASK_SHIFT 0 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_DL_RX_PCIE_DATA_ERR_INT_MASK_WIDTH 1 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_DL_RX_PCIE_DATA_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_DL_RX_PCIE_DATA_ERR_INT_MASK_REG_DL_RX_PCIE_DATA_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_TBL_CFG_DL_LEN_ERR_INT_REG_ADDR 0xa106003c -#define HIF_TBL_CFG_DL_LEN_ERR_INT_REG_WIDTH 32 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_REG_LENGTH 32 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_REG_CFG_DL_LEN_ERR_MASK 0x1 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_REG_CFG_DL_LEN_ERR_SHIFT 0 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_REG_CFG_DL_LEN_ERR_WIDTH 1 - -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_ADDR 0xa1060040 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_CFG_DL_LEN_ERR_INT_MASK_MASK 0x1 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_CFG_DL_LEN_ERR_INT_MASK_SHIFT 0 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_CFG_DL_LEN_ERR_INT_MASK_WIDTH 1 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_CFG_DL_LEN_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_CFG_DL_LEN_ERR_INT_MASK_REG_CFG_DL_LEN_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_TBL_MSG_RDY_REG_ADDR 0xa1060044 -#define HIF_TBL_MSG_RDY_REG_WIDTH 32 -#define HIF_TBL_MSG_RDY_REG_LENGTH 32 -#define HIF_TBL_MSG_RDY_REG_TBL2IRQ_TBL_RD_DONE_MASK 0x1 -#define HIF_TBL_MSG_RDY_REG_TBL2IRQ_TBL_RD_DONE_SHIFT 0 -#define HIF_TBL_MSG_RDY_REG_TBL2IRQ_TBL_RD_DONE_WIDTH 1 - -#define HIF_TBL_MSG_RDY_INT_MASK_REG_ADDR 0xa1060048 -#define HIF_TBL_MSG_RDY_INT_MASK_REG_WIDTH 32 -#define HIF_TBL_MSG_RDY_INT_MASK_REG_LENGTH 32 -#define HIF_TBL_MSG_RDY_INT_MASK_REG_TBL2IRQ_TBL_RD_DONE_INT_MASK_MASK 0x1 -#define HIF_TBL_MSG_RDY_INT_MASK_REG_TBL2IRQ_TBL_RD_DONE_INT_MASK_SHIFT 0 -#define HIF_TBL_MSG_RDY_INT_MASK_REG_TBL2IRQ_TBL_RD_DONE_INT_MASK_WIDTH 1 -#define HIF_TBL_MSG_RDY_INT_MASK_REG_TBL2IRQ_TBL_RD_DONE_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_MSG_RDY_INT_MASK_REG_TBL2IRQ_TBL_RD_DONE_INT_MASK_MIN_VAL 0x0 - -#define HIF_TBL_DL_RX_PCIE_L_CNT_REG_ADDR 0xa106004c -#define HIF_TBL_DL_RX_PCIE_L_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_RX_PCIE_L_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_RX_PCIE_L_CNT_REG_DL_RX_PCIE_L_MASK 0xffff -#define HIF_TBL_DL_RX_PCIE_L_CNT_REG_DL_RX_PCIE_L_SHIFT 0 -#define HIF_TBL_DL_RX_PCIE_L_CNT_REG_DL_RX_PCIE_L_WIDTH 16 - -#define HIF_TBL_DL_RX_PCIE_DATA_CNT_REG_ADDR 0xa1060050 -#define HIF_TBL_DL_RX_PCIE_DATA_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_RX_PCIE_DATA_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_RX_PCIE_DATA_CNT_REG_DL_RX_PCIE_DATA_MASK 0xffff -#define HIF_TBL_DL_RX_PCIE_DATA_CNT_REG_DL_RX_PCIE_DATA_SHIFT 0 -#define HIF_TBL_DL_RX_PCIE_DATA_CNT_REG_DL_RX_PCIE_DATA_WIDTH 16 - -#define HIF_TBL_DL_RX_PCIE_ERR_CNT_REG_ADDR 0xa1060054 -#define HIF_TBL_DL_RX_PCIE_ERR_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_RX_PCIE_ERR_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_RX_PCIE_ERR_CNT_REG_DL_RX_PCIE_ERR_MASK 0xffff -#define HIF_TBL_DL_RX_PCIE_ERR_CNT_REG_DL_RX_PCIE_ERR_SHIFT 0 -#define HIF_TBL_DL_RX_PCIE_ERR_CNT_REG_DL_RX_PCIE_ERR_WIDTH 16 - -#define HIF_TBL_DL_RX_PCIE_LAST_CNT_REG_ADDR 0xa1060058 -#define HIF_TBL_DL_RX_PCIE_LAST_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_RX_PCIE_LAST_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_RX_PCIE_LAST_CNT_REG_DL_RX_PCIE_LAST_MASK 0xffff -#define HIF_TBL_DL_RX_PCIE_LAST_CNT_REG_DL_RX_PCIE_LAST_SHIFT 0 -#define HIF_TBL_DL_RX_PCIE_LAST_CNT_REG_DL_RX_PCIE_LAST_WIDTH 16 - -#define HIF_TBL_DL_TX_REQ_CNT_REG_ADDR 0xa106005c -#define HIF_TBL_DL_TX_REQ_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_TX_REQ_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_TX_REQ_CNT_REG_DL_TX_REQ2PCIE_MASK 0xffff -#define HIF_TBL_DL_TX_REQ_CNT_REG_DL_TX_REQ2PCIE_SHIFT 0 -#define HIF_TBL_DL_TX_REQ_CNT_REG_DL_TX_REQ2PCIE_WIDTH 16 - -#define HIF_TBL_DL_TX_TBL2PP_VLD_CNT_REG_ADDR 0xa1060060 -#define HIF_TBL_DL_TX_TBL2PP_VLD_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_TX_TBL2PP_VLD_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_TX_TBL2PP_VLD_CNT_REG_DL_TX_TBL2PP_VLD_MASK 0xffff -#define HIF_TBL_DL_TX_TBL2PP_VLD_CNT_REG_DL_TX_TBL2PP_VLD_SHIFT 0 -#define HIF_TBL_DL_TX_TBL2PP_VLD_CNT_REG_DL_TX_TBL2PP_VLD_WIDTH 16 - -#define HIF_TBL_DL_TX_TBL2PP_SOF_CNT_REG_ADDR 0xa1060064 -#define HIF_TBL_DL_TX_TBL2PP_SOF_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_TX_TBL2PP_SOF_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_TX_TBL2PP_SOF_CNT_REG_DL_TX_TBL2PP_SOF_MASK 0xffff -#define HIF_TBL_DL_TX_TBL2PP_SOF_CNT_REG_DL_TX_TBL2PP_SOF_SHIFT 0 -#define HIF_TBL_DL_TX_TBL2PP_SOF_CNT_REG_DL_TX_TBL2PP_SOF_WIDTH 16 - -#define HIF_TBL_DL_TX_TBL2PP_EOF_CNT_REG_ADDR 0xa1060068 -#define HIF_TBL_DL_TX_TBL2PP_EOF_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_TX_TBL2PP_EOF_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_TX_TBL2PP_EOF_CNT_REG_DL_TX_TBL2PP_EOF_MASK 0xffff -#define HIF_TBL_DL_TX_TBL2PP_EOF_CNT_REG_DL_TX_TBL2PP_EOF_SHIFT 0 -#define HIF_TBL_DL_TX_TBL2PP_EOF_CNT_REG_DL_TX_TBL2PP_EOF_WIDTH 16 - -#define HIF_TBL_DL_TX_TBL2PP_SOB_CNT_REG_ADDR 0xa106006c -#define HIF_TBL_DL_TX_TBL2PP_SOB_CNT_REG_WIDTH 32 -#define HIF_TBL_DL_TX_TBL2PP_SOB_CNT_REG_LENGTH 32 -#define HIF_TBL_DL_TX_TBL2PP_SOB_CNT_REG_DL_TX_TBL2PP_SOB_MASK 0xffff -#define HIF_TBL_DL_TX_TBL2PP_SOB_CNT_REG_DL_TX_TBL2PP_SOB_SHIFT 0 -#define HIF_TBL_DL_TX_TBL2PP_SOB_CNT_REG_DL_TX_TBL2PP_SOB_WIDTH 16 - -#define HIF_TBL_RX_IPP2HIF_DL_FC_CNT_REG_ADDR 0xa1060070 -#define HIF_TBL_RX_IPP2HIF_DL_FC_CNT_REG_WIDTH 32 -#define HIF_TBL_RX_IPP2HIF_DL_FC_CNT_REG_LENGTH 32 -#define HIF_TBL_RX_IPP2HIF_DL_FC_CNT_REG_RX_IPP2HIF_DL_FC_CNT_MASK 0xffff -#define HIF_TBL_RX_IPP2HIF_DL_FC_CNT_REG_RX_IPP2HIF_DL_FC_CNT_SHIFT 0 -#define HIF_TBL_RX_IPP2HIF_DL_FC_CNT_REG_RX_IPP2HIF_DL_FC_CNT_WIDTH 16 - -#define HIF_TBL_TX_UL_CREDIT_FC_CNT_REG_ADDR 0xa1060074 -#define HIF_TBL_TX_UL_CREDIT_FC_CNT_REG_WIDTH 32 -#define HIF_TBL_TX_UL_CREDIT_FC_CNT_REG_LENGTH 32 -#define HIF_TBL_TX_UL_CREDIT_FC_CNT_REG_TX_UL_CREDIT_FC_CNT_MASK 0xffff -#define HIF_TBL_TX_UL_CREDIT_FC_CNT_REG_TX_UL_CREDIT_FC_CNT_SHIFT 0 -#define HIF_TBL_TX_UL_CREDIT_FC_CNT_REG_TX_UL_CREDIT_FC_CNT_WIDTH 16 - -#define HIF_TBL_LAST_FLAG_FC_CNT_REG_ADDR 0xa1060078 -#define HIF_TBL_LAST_FLAG_FC_CNT_REG_WIDTH 32 -#define HIF_TBL_LAST_FLAG_FC_CNT_REG_LENGTH 32 -#define HIF_TBL_LAST_FLAG_FC_CNT_REG_LAST_FLAG_FC_CNT_MASK 0xffff -#define HIF_TBL_LAST_FLAG_FC_CNT_REG_LAST_FLAG_FC_CNT_SHIFT 0 -#define HIF_TBL_LAST_FLAG_FC_CNT_REG_LAST_FLAG_FC_CNT_WIDTH 16 - -#define HIF_TBL_TBL_UL_REQ_REG_ADDR 0xa106007c -#define HIF_TBL_TBL_UL_REQ_REG_WIDTH 32 -#define HIF_TBL_TBL_UL_REQ_REG_LENGTH 32 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_FUNC_ID_MASK 0x7ff -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_FUNC_ID_SHIFT 0 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_FUNC_ID_WIDTH 11 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_FUNC_ID_MAX_VAL 0x7ff -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_FUNC_ID_MIN_VAL 0x0 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_MASK 0x800 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT 11 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_WIDTH 1 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_MAX_VAL 0x1 -#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_MIN_VAL 0x0 - -#define HIF_TBL_TBL_UL_ADDR_L_REG_ADDR 0xa1060080 -#define HIF_TBL_TBL_UL_ADDR_L_REG_WIDTH 32 -#define HIF_TBL_TBL_UL_ADDR_L_REG_LENGTH 32 -#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK 0xffffffff -#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_SHIFT 0 -#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_WIDTH 32 -#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MAX_VAL 0xffffffff -#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MIN_VAL 0x0 - -#define HIF_TBL_TBL_UL_ADDR_H_REG_ADDR 0xa1060084 -#define HIF_TBL_TBL_UL_ADDR_H_REG_WIDTH 32 -#define HIF_TBL_TBL_UL_ADDR_H_REG_LENGTH 32 -#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK 0xffffffff -#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_SHIFT 0 -#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_WIDTH 32 -#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MAX_VAL 0xffffffff -#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MIN_VAL 0x0 - -#define HIF_TBL_TBL_UL_START_REG_ADDR 0xa1060088 -#define HIF_TBL_TBL_UL_START_REG_WIDTH 32 -#define HIF_TBL_TBL_UL_START_REG_LENGTH 32 -#define HIF_TBL_TBL_UL_START_REG_TBL_UL_START_MASK 0x1 -#define HIF_TBL_TBL_UL_START_REG_TBL_UL_START_SHIFT 0 -#define HIF_TBL_TBL_UL_START_REG_TBL_UL_START_WIDTH 1 -#define HIF_TBL_TBL_UL_START_REG_TBL_UL_START_MAX_VAL 0x1 -#define HIF_TBL_TBL_UL_START_REG_TBL_UL_START_MIN_VAL 0x0 - -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_ADDR 0xa106008c -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_WIDTH 32 -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_LENGTH 32 -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_CFG_RDY_ALLOWANCE_MASK 0x1f -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_CFG_RDY_ALLOWANCE_SHIFT 0 -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_CFG_RDY_ALLOWANCE_WIDTH 5 -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_CFG_RDY_ALLOWANCE_MAX_VAL 0x1f -#define HIF_TBL_UL_CBT_CFG_RDY_ALLOWANCE_REG_CFG_RDY_ALLOWANCE_MIN_VAL 0x0 - -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_REG_ADDR 0xa1060090 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_REG_WIDTH 32 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_REG_LENGTH 32 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_REG_UL_RX_PCIE_RSP_ERR_MASK 0x1 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_REG_UL_RX_PCIE_RSP_ERR_SHIFT 0 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_REG_UL_RX_PCIE_RSP_ERR_WIDTH 1 - -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_ADDR 0xa1060094 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_WIDTH 32 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_LENGTH 32 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_UL_RX_PCIE_RSP_ERR_INT_MASK_MASK 0x1 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_UL_RX_PCIE_RSP_ERR_INT_MASK_SHIFT 0 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_UL_RX_PCIE_RSP_ERR_INT_MASK_WIDTH 1 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_UL_RX_PCIE_RSP_ERR_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_UL_RX_PCIE_RSP_ERR_INT_MASK_REG_UL_RX_PCIE_RSP_ERR_INT_MASK_MIN_VAL 0x0 - -#define HIF_TBL_UL_RX_PP2TBL_VLD_CNT_REG_ADDR 0xa1060098 -#define HIF_TBL_UL_RX_PP2TBL_VLD_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_RX_PP2TBL_VLD_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_RX_PP2TBL_VLD_CNT_REG_UL_RX_PP2TBL_VLD_MASK 0xffff -#define HIF_TBL_UL_RX_PP2TBL_VLD_CNT_REG_UL_RX_PP2TBL_VLD_SHIFT 0 -#define HIF_TBL_UL_RX_PP2TBL_VLD_CNT_REG_UL_RX_PP2TBL_VLD_WIDTH 16 - -#define HIF_TBL_UL_RX_PP2TBL_SOF_CNT_REG_ADDR 0xa106009c -#define HIF_TBL_UL_RX_PP2TBL_SOF_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_RX_PP2TBL_SOF_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_RX_PP2TBL_SOF_CNT_REG_UL_RX_PP2TBL_SOF_MASK 0xffff -#define HIF_TBL_UL_RX_PP2TBL_SOF_CNT_REG_UL_RX_PP2TBL_SOF_SHIFT 0 -#define HIF_TBL_UL_RX_PP2TBL_SOF_CNT_REG_UL_RX_PP2TBL_SOF_WIDTH 16 - -#define HIF_TBL_UL_RX_PP2TBL_EOF_CNT_REG_ADDR 0xa10600a0 -#define HIF_TBL_UL_RX_PP2TBL_EOF_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_RX_PP2TBL_EOF_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_RX_PP2TBL_EOF_CNT_REG_UL_RX_PP2TBL_EOF_MASK 0xffff -#define HIF_TBL_UL_RX_PP2TBL_EOF_CNT_REG_UL_RX_PP2TBL_EOF_SHIFT 0 -#define HIF_TBL_UL_RX_PP2TBL_EOF_CNT_REG_UL_RX_PP2TBL_EOF_WIDTH 16 - -#define HIF_TBL_UL_TX_PCIE_ENTRY_CNT_REG_ADDR 0xa10600a4 -#define HIF_TBL_UL_TX_PCIE_ENTRY_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_TX_PCIE_ENTRY_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_TX_PCIE_ENTRY_CNT_REG_UL_TX_PCIE_ENTRY_MASK 0xffff -#define HIF_TBL_UL_TX_PCIE_ENTRY_CNT_REG_UL_TX_PCIE_ENTRY_SHIFT 0 -#define HIF_TBL_UL_TX_PCIE_ENTRY_CNT_REG_UL_TX_PCIE_ENTRY_WIDTH 16 - -#define HIF_TBL_UL_TX_PCIE_LAST_CNT_REG_ADDR 0xa10600a8 -#define HIF_TBL_UL_TX_PCIE_LAST_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_TX_PCIE_LAST_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_TX_PCIE_LAST_CNT_REG_UL_TX_PCIE_LAST_MASK 0xffff -#define HIF_TBL_UL_TX_PCIE_LAST_CNT_REG_UL_TX_PCIE_LAST_SHIFT 0 -#define HIF_TBL_UL_TX_PCIE_LAST_CNT_REG_UL_TX_PCIE_LAST_WIDTH 16 - -#define HIF_TBL_UL_RX_PCIE_ACK_CNT_REG_ADDR 0xa10600ac -#define HIF_TBL_UL_RX_PCIE_ACK_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_RX_PCIE_ACK_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_RX_PCIE_ACK_CNT_REG_UL_RX_PCIE_ACK_MASK 0xffff -#define HIF_TBL_UL_RX_PCIE_ACK_CNT_REG_UL_RX_PCIE_ACK_SHIFT 0 -#define HIF_TBL_UL_RX_PCIE_ACK_CNT_REG_UL_RX_PCIE_ACK_WIDTH 16 - -#define HIF_TBL_UL_RX_PP2TBL_LAST_CNT_REG_ADDR 0xa10600b4 -#define HIF_TBL_UL_RX_PP2TBL_LAST_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_RX_PP2TBL_LAST_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_RX_PP2TBL_LAST_CNT_REG_UL_RX_PP2TBL_LAST_MASK 0xffff -#define HIF_TBL_UL_RX_PP2TBL_LAST_CNT_REG_UL_RX_PP2TBL_LAST_SHIFT 0 -#define HIF_TBL_UL_RX_PP2TBL_LAST_CNT_REG_UL_RX_PP2TBL_LAST_WIDTH 16 - -#define HIF_TBL_UL_TX_TBL2IRQ_RD_DONE_CNT_REG_ADDR 0xa10600b8 -#define HIF_TBL_UL_TX_TBL2IRQ_RD_DONE_CNT_REG_WIDTH 32 -#define HIF_TBL_UL_TX_TBL2IRQ_RD_DONE_CNT_REG_LENGTH 32 -#define HIF_TBL_UL_TX_TBL2IRQ_RD_DONE_CNT_REG_UL_TX_TBL2IRQ_RD_DONE_MASK 0xffff -#define HIF_TBL_UL_TX_TBL2IRQ_RD_DONE_CNT_REG_UL_TX_TBL2IRQ_RD_DONE_SHIFT 0 -#define HIF_TBL_UL_TX_TBL2IRQ_RD_DONE_CNT_REG_UL_TX_TBL2IRQ_RD_DONE_WIDTH 16 - -#define HIF_TBL_FIFO_AFUL_TH_REG_ADDR 0xa10600bc -#define HIF_TBL_FIFO_AFUL_TH_REG_WIDTH 32 -#define HIF_TBL_FIFO_AFUL_TH_REG_LENGTH 32 -#define HIF_TBL_FIFO_AFUL_TH_REG_DL_FIFO_AFUL_TH_MASK 0xff -#define HIF_TBL_FIFO_AFUL_TH_REG_DL_FIFO_AFUL_TH_SHIFT 0 -#define HIF_TBL_FIFO_AFUL_TH_REG_DL_FIFO_AFUL_TH_WIDTH 8 -#define HIF_TBL_FIFO_AFUL_TH_REG_DL_FIFO_AFUL_TH_MAX_VAL 0xff -#define HIF_TBL_FIFO_AFUL_TH_REG_DL_FIFO_AFUL_TH_MIN_VAL 0x0 -#define HIF_TBL_FIFO_AFUL_TH_REG_TBL_LAST_FLAG_DL_FIFO_AFUL_TH_MASK 0x3f00 -#define HIF_TBL_FIFO_AFUL_TH_REG_TBL_LAST_FLAG_DL_FIFO_AFUL_TH_SHIFT 8 -#define HIF_TBL_FIFO_AFUL_TH_REG_TBL_LAST_FLAG_DL_FIFO_AFUL_TH_WIDTH 6 -#define HIF_TBL_FIFO_AFUL_TH_REG_TBL_LAST_FLAG_DL_FIFO_AFUL_TH_MAX_VAL 0x3f -#define HIF_TBL_FIFO_AFUL_TH_REG_TBL_LAST_FLAG_DL_FIFO_AFUL_TH_MIN_VAL 0x0 - -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_ADDR 0xa10600c4 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_WIDTH 32 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_LENGTH 32 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_DL_FIFO_OVFL_MASK 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_DL_FIFO_OVFL_SHIFT 0 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_DL_FIFO_OVFL_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL2ADPT_CREDIT_OVFL_MASK 0x2 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL2ADPT_CREDIT_OVFL_SHIFT 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL2ADPT_CREDIT_OVFL_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL_LAST_FLAG_FIFO_OVFL_MASK 0x4 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL_LAST_FLAG_FIFO_OVFL_SHIFT 2 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL_LAST_FLAG_FIFO_OVFL_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_DL_FIFO_UNFL_MASK 0x8 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_DL_FIFO_UNFL_SHIFT 3 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_DL_FIFO_UNFL_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL2ADPT_CREDIT_UNFL_MASK 0x10 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL2ADPT_CREDIT_UNFL_SHIFT 4 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL2ADPT_CREDIT_UNFL_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL_LAST_FLAG_FIFO_UNFL_MASK 0x20 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL_LAST_FLAG_FIFO_UNFL_SHIFT 5 -#define HIF_TBL_TBL2ADPT_FIFO_INT_REG_TBL_LAST_FLAG_FIFO_UNFL_WIDTH 1 - -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_ADDR 0xa10600c8 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_WIDTH 32 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_LENGTH 32 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_OVFL_INT_MASK_MASK 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_OVFL_INT_MASK_SHIFT 0 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_OVFL_INT_MASK_MASK 0x2 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_OVFL_INT_MASK_SHIFT 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_OVFL_INT_MASK_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_OVFL_INT_MASK_MASK 0x4 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_OVFL_INT_MASK_SHIFT 2 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_OVFL_INT_MASK_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_OVFL_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_OVFL_INT_MASK_MIN_VAL 0x0 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_UNFL_INT_MASK_MASK 0x8 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_UNFL_INT_MASK_SHIFT 3 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_DL_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_UNFL_INT_MASK_MASK 0x10 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_UNFL_INT_MASK_SHIFT 4 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_UNFL_INT_MASK_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL2ADPT_CREDIT_UNFL_INT_MASK_MIN_VAL 0x0 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_UNFL_INT_MASK_MASK 0x20 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_UNFL_INT_MASK_SHIFT 5 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_UNFL_INT_MASK_WIDTH 1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_UNFL_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_TBL2ADPT_FIFO_INT_MASK_REG_TBL_LAST_FLAG_FIFO_UNFL_INT_MASK_MIN_VAL 0x0 - -#define HIF_TBL_MEM_ERROR_INT_ADDR 0xa10600d0 -#define HIF_TBL_MEM_ERROR_INT_WIDTH 32 -#define HIF_TBL_MEM_ERROR_INT_LENGTH 32 -#define HIF_TBL_MEM_ERROR_INT_DL_FIFO_MEM_SB_ERR_MASK 0x1 -#define HIF_TBL_MEM_ERROR_INT_DL_FIFO_MEM_SB_ERR_SHIFT 0 -#define HIF_TBL_MEM_ERROR_INT_DL_FIFO_MEM_SB_ERR_WIDTH 1 -#define HIF_TBL_MEM_ERROR_INT_DL_FIFO_MEM_DB_ERR_MASK 0x2 -#define HIF_TBL_MEM_ERROR_INT_DL_FIFO_MEM_DB_ERR_SHIFT 1 -#define HIF_TBL_MEM_ERROR_INT_DL_FIFO_MEM_DB_ERR_WIDTH 1 - -#define HIF_TBL_TIMEOUT_INT_REG_ADDR 0xa10600d8 -#define HIF_TBL_TIMEOUT_INT_REG_WIDTH 32 -#define HIF_TBL_TIMEOUT_INT_REG_LENGTH 32 -#define HIF_TBL_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_MASK 0x1 -#define HIF_TBL_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_SHIFT 0 -#define HIF_TBL_TIMEOUT_INT_REG_CSR_ERR_FLAG_REG_TIMEOUT_WIDTH 1 - -#define HIF_TBL_TIMEOUT_INT_MASK_REG_ADDR 0xa10600dc -#define HIF_TBL_TIMEOUT_INT_MASK_REG_WIDTH 32 -#define HIF_TBL_TIMEOUT_INT_MASK_REG_LENGTH 32 -#define HIF_TBL_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MASK 0x1 -#define HIF_TBL_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_SHIFT 0 -#define HIF_TBL_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_WIDTH 1 -#define HIF_TBL_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define HIF_TBL_TIMEOUT_INT_MASK_REG_CSR_ERR_FLAG_REG_TIMEOUT_INT_MASK_MIN_VAL 0x0 - -#define HIF_TBL_TIMEOUT_CFG_REG_ADDR 0xa10600e0 -#define HIF_TBL_TIMEOUT_CFG_REG_WIDTH 32 -#define HIF_TBL_TIMEOUT_CFG_REG_LENGTH 32 -#define HIF_TBL_TIMEOUT_CFG_REG_TIMEOUT_PARA_MASK 0xffff -#define HIF_TBL_TIMEOUT_CFG_REG_TIMEOUT_PARA_SHIFT 0 -#define HIF_TBL_TIMEOUT_CFG_REG_TIMEOUT_PARA_WIDTH 16 -#define HIF_TBL_TIMEOUT_CFG_REG_TIMEOUT_PARA_MAX_VAL 0xffff -#define HIF_TBL_TIMEOUT_CFG_REG_TIMEOUT_PARA_MIN_VAL 0x0 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/andes/mmc_csr_defines.h b/drivers/net/ethernet/yunsilicon/xsc/common/andes/mmc_csr_defines.h deleted file mode 100644 index e4bb3fe49d99c88c0cf4e591bb6680a5b2f9fa20..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/common/andes/mmc_csr_defines.h +++ /dev/null @@ -1,551 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef _MMC_CSR_DEFINES_H_ -#define _MMC_CSR_DEFINES_H_ - -#define MMC_SOFT_RESET_REG_ADDR 0xa4000000 -#define MMC_SOFT_RESET_REG_WIDTH 32 -#define MMC_SOFT_RESET_REG_LENGTH 32 -#define MMC_SOFT_RESET_REG_SOFT_RESET_MASK 0x1 -#define MMC_SOFT_RESET_REG_SOFT_RESET_SHIFT 0 -#define MMC_SOFT_RESET_REG_SOFT_RESET_WIDTH 1 -#define MMC_SOFT_RESET_REG_SOFT_RESET_MAX_VAL 0x1 -#define MMC_SOFT_RESET_REG_SOFT_RESET_MIN_VAL 0x0 - -#define MMC_SCRATCH_PAD_REG_ADDR 0xa400000c -#define MMC_SCRATCH_PAD_REG_WIDTH 32 -#define MMC_SCRATCH_PAD_REG_LENGTH 32 -#define MMC_SCRATCH_PAD_REG_SCRATCH_PAD_MASK 0xffffffff -#define MMC_SCRATCH_PAD_REG_SCRATCH_PAD_SHIFT 0 -#define MMC_SCRATCH_PAD_REG_SCRATCH_PAD_WIDTH 32 -#define MMC_SCRATCH_PAD_REG_SCRATCH_PAD_MAX_VAL 0xffffffff -#define MMC_SCRATCH_PAD_REG_SCRATCH_PAD_MIN_VAL 0x0 - -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_ADDR 0xa4000010 -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_WIDTH 32 -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_LENGTH 32 -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_MET2MMC_IN_FIFO_EMPTY_MASK 0x1 -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_MET2MMC_IN_FIFO_EMPTY_SHIFT 0 -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_MET2MMC_IN_FIFO_EMPTY_WIDTH 1 -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_MET2MMC_IN_FIFO_USED_CNT_MASK 0x7e -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_MET2MMC_IN_FIFO_USED_CNT_SHIFT 1 -#define MMC_MET2MMC_IN_FIFO_STATUS_REG_MET2MMC_IN_FIFO_USED_CNT_WIDTH 6 - -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_ADDR 0xa4000014 -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_WIDTH 32 -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_LENGTH 32 -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_MER2MMC_IN_FIFO_EMPTY_MASK 0x1 -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_MER2MMC_IN_FIFO_EMPTY_SHIFT 0 -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_MER2MMC_IN_FIFO_EMPTY_WIDTH 1 -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_MER2MMC_IN_FIFO_USED_CNT_MASK 0x7e -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_MER2MMC_IN_FIFO_USED_CNT_SHIFT 1 -#define MMC_MER2MMC_IN_FIFO_STATUS_REG_MER2MMC_IN_FIFO_USED_CNT_WIDTH 6 - -#define MMC_FIFO_FATAL_ERR_INT_REG_ADDR 0xa400003c -#define MMC_FIFO_FATAL_ERR_INT_REG_WIDTH 32 -#define MMC_FIFO_FATAL_ERR_INT_REG_LENGTH 32 -#define MMC_FIFO_FATAL_ERR_INT_REG_MET2MMC_IN_FIFO_OVFL_MASK 0x1 -#define MMC_FIFO_FATAL_ERR_INT_REG_MET2MMC_IN_FIFO_OVFL_SHIFT 0 -#define MMC_FIFO_FATAL_ERR_INT_REG_MET2MMC_IN_FIFO_OVFL_WIDTH 1 -#define MMC_FIFO_FATAL_ERR_INT_REG_MET2MMC_IN_FIFO_UNFL_MASK 0x2 -#define MMC_FIFO_FATAL_ERR_INT_REG_MET2MMC_IN_FIFO_UNFL_SHIFT 1 -#define MMC_FIFO_FATAL_ERR_INT_REG_MET2MMC_IN_FIFO_UNFL_WIDTH 1 -#define MMC_FIFO_FATAL_ERR_INT_REG_MER2MMC_IN_FIFO_OVFL_MASK 0x4 -#define MMC_FIFO_FATAL_ERR_INT_REG_MER2MMC_IN_FIFO_OVFL_SHIFT 2 -#define MMC_FIFO_FATAL_ERR_INT_REG_MER2MMC_IN_FIFO_OVFL_WIDTH 1 -#define MMC_FIFO_FATAL_ERR_INT_REG_MER2MMC_IN_FIFO_UNFL_MASK 0x8 -#define MMC_FIFO_FATAL_ERR_INT_REG_MER2MMC_IN_FIFO_UNFL_SHIFT 3 -#define MMC_FIFO_FATAL_ERR_INT_REG_MER2MMC_IN_FIFO_UNFL_WIDTH 1 -#define MMC_FIFO_FATAL_ERR_INT_REG_MPT2MTT_IN_FIFO_OVFL_MASK 0x10 -#define MMC_FIFO_FATAL_ERR_INT_REG_MPT2MTT_IN_FIFO_OVFL_SHIFT 4 -#define MMC_FIFO_FATAL_ERR_INT_REG_MPT2MTT_IN_FIFO_OVFL_WIDTH 1 -#define MMC_FIFO_FATAL_ERR_INT_REG_MPT2MTT_IN_FIFO_UNFL_MASK 0x20 -#define MMC_FIFO_FATAL_ERR_INT_REG_MPT2MTT_IN_FIFO_UNFL_SHIFT 5 -#define MMC_FIFO_FATAL_ERR_INT_REG_MPT2MTT_IN_FIFO_UNFL_WIDTH 1 - -#define MMC_MET2MMC_IN_FIFO_CFG_REG_ADDR 0xa4000050 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_WIDTH 32 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_LENGTH 32 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AFUL_TH_MASK 0x3f -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AFUL_TH_SHIFT 0 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AFUL_TH_WIDTH 6 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AFUL_TH_MAX_VAL 0x3f -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AFUL_TH_MIN_VAL 0x0 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AMTY_TH_MASK 0xfc0 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AMTY_TH_SHIFT 6 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AMTY_TH_WIDTH 6 -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AMTY_TH_MAX_VAL 0x3f -#define MMC_MET2MMC_IN_FIFO_CFG_REG_MET2MMC_IN_FIFO_AMTY_TH_MIN_VAL 0x0 - -#define MMC_MER2MMC_IN_FIFO_CFG_REG_ADDR 0xa4000054 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_WIDTH 32 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_LENGTH 32 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AFUL_TH_MASK 0x3f -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AFUL_TH_SHIFT 0 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AFUL_TH_WIDTH 6 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AFUL_TH_MAX_VAL 0x3f -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AFUL_TH_MIN_VAL 0x0 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AMTY_TH_MASK 0xfc0 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AMTY_TH_SHIFT 6 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AMTY_TH_WIDTH 6 -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AMTY_TH_MAX_VAL 0x3f -#define MMC_MER2MMC_IN_FIFO_CFG_REG_MER2MMC_IN_FIFO_AMTY_TH_MIN_VAL 0x0 - -#define MMC_MET2MMC_RD_CNT_REG_ADDR 0xa4000060 -#define MMC_MET2MMC_RD_CNT_REG_WIDTH 32 -#define MMC_MET2MMC_RD_CNT_REG_LENGTH 32 -#define MMC_MET2MMC_RD_CNT_REG_MET2MMC_RD_CNT_MASK 0xffff -#define MMC_MET2MMC_RD_CNT_REG_MET2MMC_RD_CNT_SHIFT 0 -#define MMC_MET2MMC_RD_CNT_REG_MET2MMC_RD_CNT_WIDTH 16 - -#define MMC_MER2MMC_RD_CNT_REG_ADDR 0xa4000064 -#define MMC_MER2MMC_RD_CNT_REG_WIDTH 32 -#define MMC_MER2MMC_RD_CNT_REG_LENGTH 32 -#define MMC_MER2MMC_RD_CNT_REG_MER2MMC_RD_CNT_MASK 0xffff -#define MMC_MER2MMC_RD_CNT_REG_MER2MMC_RD_CNT_SHIFT 0 -#define MMC_MER2MMC_RD_CNT_REG_MER2MMC_RD_CNT_WIDTH 16 - -#define MMC_MET_KEY_MATCH_FAIL_CNT_REG_ADDR 0xa4000068 -#define MMC_MET_KEY_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MET_KEY_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MET_KEY_MATCH_FAIL_CNT_REG_MET_KEY_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MET_KEY_MATCH_FAIL_CNT_REG_MET_KEY_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MET_KEY_MATCH_FAIL_CNT_REG_MET_KEY_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MER_KEY_MATCH_FAIL_CNT_REG_ADDR 0xa400006c -#define MMC_MER_KEY_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MER_KEY_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MER_KEY_MATCH_FAIL_CNT_REG_MER_KEY_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MER_KEY_MATCH_FAIL_CNT_REG_MER_KEY_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MER_KEY_MATCH_FAIL_CNT_REG_MER_KEY_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MET_PD_MATCH_FAIL_CNT_REG_ADDR 0xa4000070 -#define MMC_MET_PD_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MET_PD_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MET_PD_MATCH_FAIL_CNT_REG_MET_PD_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MET_PD_MATCH_FAIL_CNT_REG_MET_PD_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MET_PD_MATCH_FAIL_CNT_REG_MET_PD_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MER_PD_MATCH_FAIL_CNT_REG_ADDR 0xa4000074 -#define MMC_MER_PD_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MER_PD_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MER_PD_MATCH_FAIL_CNT_REG_MER_PD_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MER_PD_MATCH_FAIL_CNT_REG_MER_PD_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MER_PD_MATCH_FAIL_CNT_REG_MER_PD_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MET_ACC_MATCH_FAIL_CNT_REG_ADDR 0xa4000078 -#define MMC_MET_ACC_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MET_ACC_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MET_ACC_MATCH_FAIL_CNT_REG_MET_ACC_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MET_ACC_MATCH_FAIL_CNT_REG_MET_ACC_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MET_ACC_MATCH_FAIL_CNT_REG_MET_ACC_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MER_ACC_MATCH_FAIL_CNT_REG_ADDR 0xa400007c -#define MMC_MER_ACC_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MER_ACC_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MER_ACC_MATCH_FAIL_CNT_REG_MER_ACC_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MER_ACC_MATCH_FAIL_CNT_REG_MER_ACC_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MER_ACC_MATCH_FAIL_CNT_REG_MER_ACC_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MET_BOUNDARY_MATCH_FAIL_CNT_REG_ADDR 0xa4000080 -#define MMC_MET_BOUNDARY_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MET_BOUNDARY_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MET_BOUNDARY_MATCH_FAIL_CNT_REG_MET_BOUNDARY_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MET_BOUNDARY_MATCH_FAIL_CNT_REG_MET_BOUNDARY_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MET_BOUNDARY_MATCH_FAIL_CNT_REG_MET_BOUNDARY_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MER_BOUNDARY_MATCH_FAIL_CNT_REG_ADDR 0xa4000084 -#define MMC_MER_BOUNDARY_MATCH_FAIL_CNT_REG_WIDTH 32 -#define MMC_MER_BOUNDARY_MATCH_FAIL_CNT_REG_LENGTH 32 -#define MMC_MER_BOUNDARY_MATCH_FAIL_CNT_REG_MER_BOUNDARY_MACTH_FAIL_CNT_MASK 0xffff -#define MMC_MER_BOUNDARY_MATCH_FAIL_CNT_REG_MER_BOUNDARY_MACTH_FAIL_CNT_SHIFT 0 -#define MMC_MER_BOUNDARY_MATCH_FAIL_CNT_REG_MER_BOUNDARY_MACTH_FAIL_CNT_WIDTH 16 - -#define MMC_MMC2MET_EOP_CNT_REG_ADDR 0xa4000090 -#define MMC_MMC2MET_EOP_CNT_REG_WIDTH 32 -#define MMC_MMC2MET_EOP_CNT_REG_LENGTH 32 -#define MMC_MMC2MET_EOP_CNT_REG_MMC2MET_EOP_CNT_MASK 0xffff -#define MMC_MMC2MET_EOP_CNT_REG_MMC2MET_EOP_CNT_SHIFT 0 -#define MMC_MMC2MET_EOP_CNT_REG_MMC2MET_EOP_CNT_WIDTH 16 - -#define MMC_MMC2MER_EOP_CNT_REG_ADDR 0xa4000094 -#define MMC_MMC2MER_EOP_CNT_REG_WIDTH 32 -#define MMC_MMC2MER_EOP_CNT_REG_LENGTH 32 -#define MMC_MMC2MER_EOP_CNT_REG_MMC2MER_EOP_CNT_MASK 0xffff -#define MMC_MMC2MER_EOP_CNT_REG_MMC2MER_EOP_CNT_SHIFT 0 -#define MMC_MMC2MER_EOP_CNT_REG_MMC2MER_EOP_CNT_WIDTH 16 - -#define MMC_MMC2MET_VLD_CNT_REG_ADDR 0xa4000098 -#define MMC_MMC2MET_VLD_CNT_REG_WIDTH 32 -#define MMC_MMC2MET_VLD_CNT_REG_LENGTH 32 -#define MMC_MMC2MET_VLD_CNT_REG_MMC2MET_VLD_CNT_MASK 0xffff -#define MMC_MMC2MET_VLD_CNT_REG_MMC2MET_VLD_CNT_SHIFT 0 -#define MMC_MMC2MET_VLD_CNT_REG_MMC2MET_VLD_CNT_WIDTH 16 - -#define MMC_MMC2MER_VLD_CNT_REG_ADDR 0xa400009c -#define MMC_MMC2MER_VLD_CNT_REG_WIDTH 32 -#define MMC_MMC2MER_VLD_CNT_REG_LENGTH 32 -#define MMC_MMC2MER_VLD_CNT_REG_MMC2MER_VLD_CNT_MASK 0xffff -#define MMC_MMC2MER_VLD_CNT_REG_MMC2MER_VLD_CNT_SHIFT 0 -#define MMC_MMC2MER_VLD_CNT_REG_MMC2MER_VLD_CNT_WIDTH 16 - -#define MMC_MER_NO_MAP_CNT_REG_ADDR 0xa40000a0 -#define MMC_MER_NO_MAP_CNT_REG_WIDTH 32 -#define MMC_MER_NO_MAP_CNT_REG_LENGTH 32 -#define MMC_MER_NO_MAP_CNT_REG_MER_NO_MAP_CNT_MASK 0xffff -#define MMC_MER_NO_MAP_CNT_REG_MER_NO_MAP_CNT_SHIFT 0 -#define MMC_MER_NO_MAP_CNT_REG_MER_NO_MAP_CNT_WIDTH 16 - -#define MMC_MET_NO_MAP_CNT_REG_ADDR 0xa40000a4 -#define MMC_MET_NO_MAP_CNT_REG_WIDTH 32 -#define MMC_MET_NO_MAP_CNT_REG_LENGTH 32 -#define MMC_MET_NO_MAP_CNT_REG_MET_NO_MAP_CNT_MASK 0xffff -#define MMC_MET_NO_MAP_CNT_REG_MET_NO_MAP_CNT_SHIFT 0 -#define MMC_MET_NO_MAP_CNT_REG_MET_NO_MAP_CNT_WIDTH 16 - -#define MMC_MMC2MET_FC_CNT_REG_ADDR 0xa40000a8 -#define MMC_MMC2MET_FC_CNT_REG_WIDTH 32 -#define MMC_MMC2MET_FC_CNT_REG_LENGTH 32 -#define MMC_MMC2MET_FC_CNT_REG_MMC2MET_FC_CNT_MASK 0xffffffff -#define MMC_MMC2MET_FC_CNT_REG_MMC2MET_FC_CNT_SHIFT 0 -#define MMC_MMC2MET_FC_CNT_REG_MMC2MET_FC_CNT_WIDTH 32 - -#define MMC_MMC2MER_FC_CNT_REG_ADDR 0xa40000ac -#define MMC_MMC2MER_FC_CNT_REG_WIDTH 32 -#define MMC_MMC2MER_FC_CNT_REG_LENGTH 32 -#define MMC_MMC2MER_FC_CNT_REG_MMC2MER_FC_CNT_MASK 0xffffffff -#define MMC_MMC2MER_FC_CNT_REG_MMC2MER_FC_CNT_SHIFT 0 -#define MMC_MMC2MER_FC_CNT_REG_MMC2MER_FC_CNT_WIDTH 32 - -#define MMC_MPT2MTT_FC_CNT_REG_ADDR 0xa40000b0 -#define MMC_MPT2MTT_FC_CNT_REG_WIDTH 32 -#define MMC_MPT2MTT_FC_CNT_REG_LENGTH 32 -#define MMC_MPT2MTT_FC_CNT_REG_MPT2MTT_FC_CNT_MASK 0xffffffff -#define MMC_MPT2MTT_FC_CNT_REG_MPT2MTT_FC_CNT_SHIFT 0 -#define MMC_MPT2MTT_FC_CNT_REG_MPT2MTT_FC_CNT_WIDTH 32 - -#define MMC_MET_STG1_ERR_QP_ID_STS_REG_ADDR 0xa40000b4 -#define MMC_MET_STG1_ERR_QP_ID_STS_REG_WIDTH 32 -#define MMC_MET_STG1_ERR_QP_ID_STS_REG_LENGTH 32 -#define MMC_MET_STG1_ERR_QP_ID_STS_REG_MET_STG1_ERR_QP_ID_STS_MASK 0x7fff -#define MMC_MET_STG1_ERR_QP_ID_STS_REG_MET_STG1_ERR_QP_ID_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_QP_ID_STS_REG_MET_STG1_ERR_QP_ID_STS_WIDTH 15 - -#define MMC_MET_STG1_ERR_MPT_ADDR_STS_REG_ADDR 0xa40000b8 -#define MMC_MET_STG1_ERR_MPT_ADDR_STS_REG_WIDTH 32 -#define MMC_MET_STG1_ERR_MPT_ADDR_STS_REG_LENGTH 32 -#define MMC_MET_STG1_ERR_MPT_ADDR_STS_REG_MET_STG1_ERR_MPT_ADDR_STS_MASK 0x7fff -#define MMC_MET_STG1_ERR_MPT_ADDR_STS_REG_MET_STG1_ERR_MPT_ADDR_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_MPT_ADDR_STS_REG_MET_STG1_ERR_MPT_ADDR_STS_WIDTH 15 - -#define MMC_MET_STG1_ERR_MKEY_STS_REG_ADDR 0xa40000bc -#define MMC_MET_STG1_ERR_MKEY_STS_REG_WIDTH 32 -#define MMC_MET_STG1_ERR_MKEY_STS_REG_LENGTH 32 -#define MMC_MET_STG1_ERR_MKEY_STS_REG_MET_STG1_ERR_MKEY_STS_MASK 0xff -#define MMC_MET_STG1_ERR_MKEY_STS_REG_MET_STG1_ERR_MKEY_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_MKEY_STS_REG_MET_STG1_ERR_MKEY_STS_WIDTH 8 - -#define MMC_MET_STG1_ERR_VA_STS_REG_ADDR 0xa40000c0 -#define MMC_MET_STG1_ERR_VA_STS_REG_WIDTH 64 -#define MMC_MET_STG1_ERR_VA_STS_REG_LENGTH 64 -#define MMC_MET_STG1_ERR_VA_STS_REG_MET_STG1_ERR_VA_STS_MASK 0xffffffffffffffff -#define MMC_MET_STG1_ERR_VA_STS_REG_MET_STG1_ERR_VA_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_VA_STS_REG_MET_STG1_ERR_VA_STS_WIDTH 64 - -#define MMC_MET_STG1_ERR_LEN_STS_REG_ADDR 0xa40000c8 -#define MMC_MET_STG1_ERR_LEN_STS_REG_WIDTH 32 -#define MMC_MET_STG1_ERR_LEN_STS_REG_LENGTH 32 -#define MMC_MET_STG1_ERR_LEN_STS_REG_MET_STG1_ERR_LEN_STS_MASK 0xffffffff -#define MMC_MET_STG1_ERR_LEN_STS_REG_MET_STG1_ERR_LEN_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_LEN_STS_REG_MET_STG1_ERR_LEN_STS_WIDTH 32 - -#define MMC_MET_STG1_ERR_ACC_TYPE_STS_REG_ADDR 0xa40000cc -#define MMC_MET_STG1_ERR_ACC_TYPE_STS_REG_WIDTH 32 -#define MMC_MET_STG1_ERR_ACC_TYPE_STS_REG_LENGTH 32 -#define MMC_MET_STG1_ERR_ACC_TYPE_STS_REG_MET_STG1_ERR_ACC_TYPE_STS_MASK 0xf -#define MMC_MET_STG1_ERR_ACC_TYPE_STS_REG_MET_STG1_ERR_ACC_TYPE_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_ACC_TYPE_STS_REG_MET_STG1_ERR_ACC_TYPE_STS_WIDTH 4 - -#define MMC_MET_STG1_ERR_MPT_STS_REG_ADDR 0xa40000e0 -#define MMC_MET_STG1_ERR_MPT_STS_REG_WIDTH 256 -#define MMC_MET_STG1_ERR_MPT_STS_REG_LENGTH 160 -#define MMC_MET_STG1_ERR_MPT_STS_REG_MET_STG1_ERR_MPT_STS_MASK 0xffffffffffffffff -#define MMC_MET_STG1_ERR_MPT_STS_REG_MET_STG1_ERR_MPT_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_MPT_STS_REG_MET_STG1_ERR_MPT_STS_WIDTH 153 - -#define MMC_MET_STG1_ERR_QPMCT_STS_REG_ADDR 0xa4000100 -#define MMC_MET_STG1_ERR_QPMCT_STS_REG_WIDTH 32 -#define MMC_MET_STG1_ERR_QPMCT_STS_REG_LENGTH 32 -#define MMC_MET_STG1_ERR_QPMCT_STS_REG_MET_STG1_ERR_QPMCT_STS_MASK 0xffffff -#define MMC_MET_STG1_ERR_QPMCT_STS_REG_MET_STG1_ERR_QPMCT_STS_SHIFT 0 -#define MMC_MET_STG1_ERR_QPMCT_STS_REG_MET_STG1_ERR_QPMCT_STS_WIDTH 24 - -#define MMC_MER_STG1_ERR_QP_ID_STS_REG_ADDR 0xa4000104 -#define MMC_MER_STG1_ERR_QP_ID_STS_REG_WIDTH 32 -#define MMC_MER_STG1_ERR_QP_ID_STS_REG_LENGTH 32 -#define MMC_MER_STG1_ERR_QP_ID_STS_REG_MER_STG1_ERR_QP_ID_STS_MASK 0x7fff -#define MMC_MER_STG1_ERR_QP_ID_STS_REG_MER_STG1_ERR_QP_ID_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_QP_ID_STS_REG_MER_STG1_ERR_QP_ID_STS_WIDTH 15 - -#define MMC_MER_STG1_ERR_MPT_ADDR_STS_REG_ADDR 0xa4000108 -#define MMC_MER_STG1_ERR_MPT_ADDR_STS_REG_WIDTH 32 -#define MMC_MER_STG1_ERR_MPT_ADDR_STS_REG_LENGTH 32 -#define MMC_MER_STG1_ERR_MPT_ADDR_STS_REG_MER_STG1_ERR_MPT_ADDR_STS_MASK 0x7fff -#define MMC_MER_STG1_ERR_MPT_ADDR_STS_REG_MER_STG1_ERR_MPT_ADDR_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_MPT_ADDR_STS_REG_MER_STG1_ERR_MPT_ADDR_STS_WIDTH 15 - -#define MMC_MER_STG1_ERR_MKEY_STS_REG_ADDR 0xa400010c -#define MMC_MER_STG1_ERR_MKEY_STS_REG_WIDTH 32 -#define MMC_MER_STG1_ERR_MKEY_STS_REG_LENGTH 32 -#define MMC_MER_STG1_ERR_MKEY_STS_REG_MER_STG1_ERR_MKEY_STS_MASK 0xff -#define MMC_MER_STG1_ERR_MKEY_STS_REG_MER_STG1_ERR_MKEY_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_MKEY_STS_REG_MER_STG1_ERR_MKEY_STS_WIDTH 8 - -#define MMC_MER_STG1_ERR_VA_STS_REG_ADDR 0xa4000110 -#define MMC_MER_STG1_ERR_VA_STS_REG_WIDTH 64 -#define MMC_MER_STG1_ERR_VA_STS_REG_LENGTH 64 -#define MMC_MER_STG1_ERR_VA_STS_REG_MER_STG1_ERR_VA_STS_MASK 0xffffffffffffffff -#define MMC_MER_STG1_ERR_VA_STS_REG_MER_STG1_ERR_VA_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_VA_STS_REG_MER_STG1_ERR_VA_STS_WIDTH 64 - -#define MMC_MER_STG1_ERR_LEN_STS_REG_ADDR 0xa4000118 -#define MMC_MER_STG1_ERR_LEN_STS_REG_WIDTH 32 -#define MMC_MER_STG1_ERR_LEN_STS_REG_LENGTH 32 -#define MMC_MER_STG1_ERR_LEN_STS_REG_MER_STG1_ERR_LEN_STS_MASK 0xffffffff -#define MMC_MER_STG1_ERR_LEN_STS_REG_MER_STG1_ERR_LEN_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_LEN_STS_REG_MER_STG1_ERR_LEN_STS_WIDTH 32 - -#define MMC_MER_STG1_ERR_ACC_TYPE_STS_REG_ADDR 0xa400011c -#define MMC_MER_STG1_ERR_ACC_TYPE_STS_REG_WIDTH 32 -#define MMC_MER_STG1_ERR_ACC_TYPE_STS_REG_LENGTH 32 -#define MMC_MER_STG1_ERR_ACC_TYPE_STS_REG_MER_STG1_ERR_ACC_TYPE_STS_MASK 0xf -#define MMC_MER_STG1_ERR_ACC_TYPE_STS_REG_MER_STG1_ERR_ACC_TYPE_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_ACC_TYPE_STS_REG_MER_STG1_ERR_ACC_TYPE_STS_WIDTH 4 - -#define MMC_MER_STG1_ERR_MPT_STS_REG_ADDR 0xa4000120 -#define MMC_MER_STG1_ERR_MPT_STS_REG_WIDTH 256 -#define MMC_MER_STG1_ERR_MPT_STS_REG_LENGTH 160 -#define MMC_MER_STG1_ERR_MPT_STS_REG_MER_STG1_ERR_MPT_STS_MASK 0xffffffffffffffff -#define MMC_MER_STG1_ERR_MPT_STS_REG_MER_STG1_ERR_MPT_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_MPT_STS_REG_MER_STG1_ERR_MPT_STS_WIDTH 153 - -#define MMC_MER_STG1_ERR_QPMCT_STS_REG_ADDR 0xa4000140 -#define MMC_MER_STG1_ERR_QPMCT_STS_REG_WIDTH 32 -#define MMC_MER_STG1_ERR_QPMCT_STS_REG_LENGTH 32 -#define MMC_MER_STG1_ERR_QPMCT_STS_REG_MER_STG1_ERR_QPMCT_STS_MASK 0xffffff -#define MMC_MER_STG1_ERR_QPMCT_STS_REG_MER_STG1_ERR_QPMCT_STS_SHIFT 0 -#define MMC_MER_STG1_ERR_QPMCT_STS_REG_MER_STG1_ERR_QPMCT_STS_WIDTH 24 - -#define MMC_MET_STG2_ERR_MPT_PAGE_MODE_REG_ADDR 0xa4000144 -#define MMC_MET_STG2_ERR_MPT_PAGE_MODE_REG_WIDTH 32 -#define MMC_MET_STG2_ERR_MPT_PAGE_MODE_REG_LENGTH 32 -#define MMC_MET_STG2_ERR_MPT_PAGE_MODE_REG_MET_STG2_ERR_MPT_PAGE_MODE_MASK 0x3 -#define MMC_MET_STG2_ERR_MPT_PAGE_MODE_REG_MET_STG2_ERR_MPT_PAGE_MODE_SHIFT 0 -#define MMC_MET_STG2_ERR_MPT_PAGE_MODE_REG_MET_STG2_ERR_MPT_PAGE_MODE_WIDTH 2 - -#define MMC_MET_STG2_ERR_MPT_MTT_BASE_REG_ADDR 0xa4000148 -#define MMC_MET_STG2_ERR_MPT_MTT_BASE_REG_WIDTH 32 -#define MMC_MET_STG2_ERR_MPT_MTT_BASE_REG_LENGTH 32 -#define MMC_MET_STG2_ERR_MPT_MTT_BASE_REG_MET_STG2_ERR_MPT_MTT_BASE_MASK 0x3ffff -#define MMC_MET_STG2_ERR_MPT_MTT_BASE_REG_MET_STG2_ERR_MPT_MTT_BASE_SHIFT 0 -#define MMC_MET_STG2_ERR_MPT_MTT_BASE_REG_MET_STG2_ERR_MPT_MTT_BASE_WIDTH 18 - -#define MMC_MET_STG2_ERR_MPT_MEM_SZ_REG_ADDR 0xa400014c -#define MMC_MET_STG2_ERR_MPT_MEM_SZ_REG_WIDTH 32 -#define MMC_MET_STG2_ERR_MPT_MEM_SZ_REG_LENGTH 32 -#define MMC_MET_STG2_ERR_MPT_MEM_SZ_REG_MET_STG2_ERR_MPT_MEM_SZ_MASK 0xffffffff -#define MMC_MET_STG2_ERR_MPT_MEM_SZ_REG_MET_STG2_ERR_MPT_MEM_SZ_SHIFT 0 -#define MMC_MET_STG2_ERR_MPT_MEM_SZ_REG_MET_STG2_ERR_MPT_MEM_SZ_WIDTH 32 - -#define MMC_MET_STG2_ERR_MPT_VA_BASE_REG_ADDR 0xa4000150 -#define MMC_MET_STG2_ERR_MPT_VA_BASE_REG_WIDTH 64 -#define MMC_MET_STG2_ERR_MPT_VA_BASE_REG_LENGTH 64 -#define MMC_MET_STG2_ERR_MPT_VA_BASE_REG_MET_STG2_ERR_MPT_VA_BASE_MASK 0xffffffffffffffff -#define MMC_MET_STG2_ERR_MPT_VA_BASE_REG_MET_STG2_ERR_MPT_VA_BASE_SHIFT 0 -#define MMC_MET_STG2_ERR_MPT_VA_BASE_REG_MET_STG2_ERR_MPT_VA_BASE_WIDTH 64 - -#define MMC_MET_STG2_ERR_VA_STS_REG_ADDR 0xa4000158 -#define MMC_MET_STG2_ERR_VA_STS_REG_WIDTH 64 -#define MMC_MET_STG2_ERR_VA_STS_REG_LENGTH 64 -#define MMC_MET_STG2_ERR_VA_STS_REG_MET_STG2_ERR_VA_STS_MASK 0xffffffffffffffff -#define MMC_MET_STG2_ERR_VA_STS_REG_MET_STG2_ERR_VA_STS_SHIFT 0 -#define MMC_MET_STG2_ERR_VA_STS_REG_MET_STG2_ERR_VA_STS_WIDTH 64 - -#define MMC_MET_STG2_ERR_LEN_STS_REG_ADDR 0xa4000160 -#define MMC_MET_STG2_ERR_LEN_STS_REG_WIDTH 32 -#define MMC_MET_STG2_ERR_LEN_STS_REG_LENGTH 32 -#define MMC_MET_STG2_ERR_LEN_STS_REG_MET_STG2_ERR_LEN_STS_MASK 0xffffffff -#define MMC_MET_STG2_ERR_LEN_STS_REG_MET_STG2_ERR_LEN_STS_SHIFT 0 -#define MMC_MET_STG2_ERR_LEN_STS_REG_MET_STG2_ERR_LEN_STS_WIDTH 32 - -#define MMC_MER_STG2_ERR_MPT_PAGE_MODE_REG_ADDR 0xa4000164 -#define MMC_MER_STG2_ERR_MPT_PAGE_MODE_REG_WIDTH 32 -#define MMC_MER_STG2_ERR_MPT_PAGE_MODE_REG_LENGTH 32 -#define MMC_MER_STG2_ERR_MPT_PAGE_MODE_REG_MER_STG2_ERR_MPT_PAGE_MODE_MASK 0x3 -#define MMC_MER_STG2_ERR_MPT_PAGE_MODE_REG_MER_STG2_ERR_MPT_PAGE_MODE_SHIFT 0 -#define MMC_MER_STG2_ERR_MPT_PAGE_MODE_REG_MER_STG2_ERR_MPT_PAGE_MODE_WIDTH 2 - -#define MMC_MER_STG2_ERR_MPT_MTT_BASE_REG_ADDR 0xa4000168 -#define MMC_MER_STG2_ERR_MPT_MTT_BASE_REG_WIDTH 32 -#define MMC_MER_STG2_ERR_MPT_MTT_BASE_REG_LENGTH 32 -#define MMC_MER_STG2_ERR_MPT_MTT_BASE_REG_MER_STG2_ERR_MPT_MTT_BASE_MASK 0x3ffff -#define MMC_MER_STG2_ERR_MPT_MTT_BASE_REG_MER_STG2_ERR_MPT_MTT_BASE_SHIFT 0 -#define MMC_MER_STG2_ERR_MPT_MTT_BASE_REG_MER_STG2_ERR_MPT_MTT_BASE_WIDTH 18 - -#define MMC_MER_STG2_ERR_MPT_MEM_SZ_REG_ADDR 0xa400016c -#define MMC_MER_STG2_ERR_MPT_MEM_SZ_REG_WIDTH 32 -#define MMC_MER_STG2_ERR_MPT_MEM_SZ_REG_LENGTH 32 -#define MMC_MER_STG2_ERR_MPT_MEM_SZ_REG_MER_STG2_ERR_MPT_MEM_SZ_MASK 0xffffffff -#define MMC_MER_STG2_ERR_MPT_MEM_SZ_REG_MER_STG2_ERR_MPT_MEM_SZ_SHIFT 0 -#define MMC_MER_STG2_ERR_MPT_MEM_SZ_REG_MER_STG2_ERR_MPT_MEM_SZ_WIDTH 32 - -#define MMC_MER_STG2_ERR_MPT_VA_BASE_REG_ADDR 0xa4000170 -#define MMC_MER_STG2_ERR_MPT_VA_BASE_REG_WIDTH 64 -#define MMC_MER_STG2_ERR_MPT_VA_BASE_REG_LENGTH 64 -#define MMC_MER_STG2_ERR_MPT_VA_BASE_REG_MER_STG2_ERR_MPT_VA_BASE_MASK 0xffffffffffffffff -#define MMC_MER_STG2_ERR_MPT_VA_BASE_REG_MER_STG2_ERR_MPT_VA_BASE_SHIFT 0 -#define MMC_MER_STG2_ERR_MPT_VA_BASE_REG_MER_STG2_ERR_MPT_VA_BASE_WIDTH 64 - -#define MMC_MER_STG2_ERR_VA_STS_REG_ADDR 0xa4000178 -#define MMC_MER_STG2_ERR_VA_STS_REG_WIDTH 64 -#define MMC_MER_STG2_ERR_VA_STS_REG_LENGTH 64 -#define MMC_MER_STG2_ERR_VA_STS_REG_MER_STG2_ERR_VA_STS_MASK 0xffffffffffffffff -#define MMC_MER_STG2_ERR_VA_STS_REG_MER_STG2_ERR_VA_STS_SHIFT 0 -#define MMC_MER_STG2_ERR_VA_STS_REG_MER_STG2_ERR_VA_STS_WIDTH 64 - -#define MMC_MER_STG2_ERR_LEN_STS_REG_ADDR 0xa4000180 -#define MMC_MER_STG2_ERR_LEN_STS_REG_WIDTH 32 -#define MMC_MER_STG2_ERR_LEN_STS_REG_LENGTH 32 -#define MMC_MER_STG2_ERR_LEN_STS_REG_MER_STG2_ERR_LEN_STS_MASK 0xffffffff -#define MMC_MER_STG2_ERR_LEN_STS_REG_MER_STG2_ERR_LEN_STS_SHIFT 0 -#define MMC_MER_STG2_ERR_LEN_STS_REG_MER_STG2_ERR_LEN_STS_WIDTH 32 - -#define MMC_MPT_TBL_MEM_ADDR 0xa4100000 -#define MMC_MPT_TBL_MEM_WIDTH 256 -#define MMC_MPT_TBL_MEM_LENGTH 160 -#define MMC_MPT_TBL_MEM_DEPTH 32768 -#define MMC_MPT_TBL_MEM_MPT_CONTENT_MASK 0xffffffffffffffff -#define MMC_MPT_TBL_MEM_MPT_CONTENT_SHIFT 0 -#define MMC_MPT_TBL_MEM_MPT_CONTENT_WIDTH 153 - -#define MMC_MTT_TBL_MEM_ADDR 0xa4200000 -#define MMC_MTT_TBL_MEM_WIDTH 64 -#define MMC_MTT_TBL_MEM_LENGTH 64 -#define MMC_MTT_TBL_MEM_DEPTH 65536 -#define MMC_MTT_TBL_MEM_MTT_CONTENT_MASK 0xfffffffffffff -#define MMC_MTT_TBL_MEM_MTT_CONTENT_SHIFT 0 -#define MMC_MTT_TBL_MEM_MTT_CONTENT_WIDTH 52 - -#define MMC_QPMCT_TBL_MEM_ADDR 0xa4280000 -#define MMC_QPMCT_TBL_MEM_WIDTH 32 -#define MMC_QPMCT_TBL_MEM_LENGTH 32 -#define MMC_QPMCT_TBL_MEM_DEPTH 32768 -#define MMC_QPMCT_TBL_MEM_QPMCT_CONTENT_MASK 0xffffff -#define MMC_QPMCT_TBL_MEM_QPMCT_CONTENT_SHIFT 0 -#define MMC_QPMCT_TBL_MEM_QPMCT_CONTENT_WIDTH 24 - -#define MMC_MEM_ERROR_INT_ADDR 0xa42a0000 -#define MMC_MEM_ERROR_INT_WIDTH 32 -#define MMC_MEM_ERROR_INT_LENGTH 32 -#define MMC_MEM_ERROR_INT_MPT_TBL_MEM_SB_ERR_MASK 0x1 -#define MMC_MEM_ERROR_INT_MPT_TBL_MEM_SB_ERR_SHIFT 0 -#define MMC_MEM_ERROR_INT_MPT_TBL_MEM_SB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MPT_TBL_MEM_DB_ERR_MASK 0x2 -#define MMC_MEM_ERROR_INT_MPT_TBL_MEM_DB_ERR_SHIFT 1 -#define MMC_MEM_ERROR_INT_MPT_TBL_MEM_DB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MTT_TBL_MEM_SB_ERR_MASK 0x4 -#define MMC_MEM_ERROR_INT_MTT_TBL_MEM_SB_ERR_SHIFT 2 -#define MMC_MEM_ERROR_INT_MTT_TBL_MEM_SB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MTT_TBL_MEM_DB_ERR_MASK 0x8 -#define MMC_MEM_ERROR_INT_MTT_TBL_MEM_DB_ERR_SHIFT 3 -#define MMC_MEM_ERROR_INT_MTT_TBL_MEM_DB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_QPMCT_TBL_MEM_SB_ERR_MASK 0x10 -#define MMC_MEM_ERROR_INT_QPMCT_TBL_MEM_SB_ERR_SHIFT 4 -#define MMC_MEM_ERROR_INT_QPMCT_TBL_MEM_SB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_QPMCT_TBL_MEM_DB_ERR_MASK 0x20 -#define MMC_MEM_ERROR_INT_QPMCT_TBL_MEM_DB_ERR_SHIFT 5 -#define MMC_MEM_ERROR_INT_QPMCT_TBL_MEM_DB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MET2MMC_IN_FIFO_MEM_SB_ERR_MASK 0x40 -#define MMC_MEM_ERROR_INT_MET2MMC_IN_FIFO_MEM_SB_ERR_SHIFT 6 -#define MMC_MEM_ERROR_INT_MET2MMC_IN_FIFO_MEM_SB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MET2MMC_IN_FIFO_MEM_DB_ERR_MASK 0x80 -#define MMC_MEM_ERROR_INT_MET2MMC_IN_FIFO_MEM_DB_ERR_SHIFT 7 -#define MMC_MEM_ERROR_INT_MET2MMC_IN_FIFO_MEM_DB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MER2MMC_IN_FIFO_MEM_SB_ERR_MASK 0x100 -#define MMC_MEM_ERROR_INT_MER2MMC_IN_FIFO_MEM_SB_ERR_SHIFT 8 -#define MMC_MEM_ERROR_INT_MER2MMC_IN_FIFO_MEM_SB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MER2MMC_IN_FIFO_MEM_DB_ERR_MASK 0x200 -#define MMC_MEM_ERROR_INT_MER2MMC_IN_FIFO_MEM_DB_ERR_SHIFT 9 -#define MMC_MEM_ERROR_INT_MER2MMC_IN_FIFO_MEM_DB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MPT2MTT_IN_FIFO_MEM_SB_ERR_MASK 0x400 -#define MMC_MEM_ERROR_INT_MPT2MTT_IN_FIFO_MEM_SB_ERR_SHIFT 10 -#define MMC_MEM_ERROR_INT_MPT2MTT_IN_FIFO_MEM_SB_ERR_WIDTH 1 -#define MMC_MEM_ERROR_INT_MPT2MTT_IN_FIFO_MEM_DB_ERR_MASK 0x800 -#define MMC_MEM_ERROR_INT_MPT2MTT_IN_FIFO_MEM_DB_ERR_SHIFT 11 -#define MMC_MEM_ERROR_INT_MPT2MTT_IN_FIFO_MEM_DB_ERR_WIDTH 1 - -#define MMC_MEM_INIT_CTRL_ADDR 0xa42a0008 -#define MMC_MEM_INIT_CTRL_WIDTH 32 -#define MMC_MEM_INIT_CTRL_LENGTH 32 -#define MMC_MEM_INIT_CTRL_MPT_TBL_MEM_INIT_RST_N_MASK 0x1 -#define MMC_MEM_INIT_CTRL_MPT_TBL_MEM_INIT_RST_N_SHIFT 0 -#define MMC_MEM_INIT_CTRL_MPT_TBL_MEM_INIT_RST_N_WIDTH 1 -#define MMC_MEM_INIT_CTRL_MPT_TBL_MEM_INIT_RST_N_MAX_VAL 0x1 -#define MMC_MEM_INIT_CTRL_MPT_TBL_MEM_INIT_RST_N_MIN_VAL 0x0 -#define MMC_MEM_INIT_CTRL_MTT_TBL_MEM_INIT_RST_N_MASK 0x2 -#define MMC_MEM_INIT_CTRL_MTT_TBL_MEM_INIT_RST_N_SHIFT 1 -#define MMC_MEM_INIT_CTRL_MTT_TBL_MEM_INIT_RST_N_WIDTH 1 -#define MMC_MEM_INIT_CTRL_MTT_TBL_MEM_INIT_RST_N_MAX_VAL 0x1 -#define MMC_MEM_INIT_CTRL_MTT_TBL_MEM_INIT_RST_N_MIN_VAL 0x0 -#define MMC_MEM_INIT_CTRL_QPMCT_TBL_MEM_INIT_RST_N_MASK 0x4 -#define MMC_MEM_INIT_CTRL_QPMCT_TBL_MEM_INIT_RST_N_SHIFT 2 -#define MMC_MEM_INIT_CTRL_QPMCT_TBL_MEM_INIT_RST_N_WIDTH 1 -#define MMC_MEM_INIT_CTRL_QPMCT_TBL_MEM_INIT_RST_N_MAX_VAL 0x1 -#define MMC_MEM_INIT_CTRL_QPMCT_TBL_MEM_INIT_RST_N_MIN_VAL 0x0 - -#define MMC_TIMEOUT_INT_REG_ADDR 0xa42a0010 -#define MMC_TIMEOUT_INT_REG_WIDTH 32 -#define MMC_TIMEOUT_INT_REG_LENGTH 32 -#define MMC_TIMEOUT_INT_REG_MPT_TBL_MEM_TIMEOUT_MASK 0x1 -#define MMC_TIMEOUT_INT_REG_MPT_TBL_MEM_TIMEOUT_SHIFT 0 -#define MMC_TIMEOUT_INT_REG_MPT_TBL_MEM_TIMEOUT_WIDTH 1 -#define MMC_TIMEOUT_INT_REG_MTT_TBL_MEM_TIMEOUT_MASK 0x2 -#define MMC_TIMEOUT_INT_REG_MTT_TBL_MEM_TIMEOUT_SHIFT 1 -#define MMC_TIMEOUT_INT_REG_MTT_TBL_MEM_TIMEOUT_WIDTH 1 -#define MMC_TIMEOUT_INT_REG_QPMCT_TBL_MEM_TIMEOUT_MASK 0x4 -#define MMC_TIMEOUT_INT_REG_QPMCT_TBL_MEM_TIMEOUT_SHIFT 2 -#define MMC_TIMEOUT_INT_REG_QPMCT_TBL_MEM_TIMEOUT_WIDTH 1 - -#define MMC_TIMEOUT_INT_MASK_REG_ADDR 0xa42a0014 -#define MMC_TIMEOUT_INT_MASK_REG_WIDTH 32 -#define MMC_TIMEOUT_INT_MASK_REG_LENGTH 32 -#define MMC_TIMEOUT_INT_MASK_REG_MPT_TBL_MEM_TIMEOUT_INT_MASK_MASK 0x1 -#define MMC_TIMEOUT_INT_MASK_REG_MPT_TBL_MEM_TIMEOUT_INT_MASK_SHIFT 0 -#define MMC_TIMEOUT_INT_MASK_REG_MPT_TBL_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define MMC_TIMEOUT_INT_MASK_REG_MPT_TBL_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define MMC_TIMEOUT_INT_MASK_REG_MPT_TBL_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define MMC_TIMEOUT_INT_MASK_REG_MTT_TBL_MEM_TIMEOUT_INT_MASK_MASK 0x2 -#define MMC_TIMEOUT_INT_MASK_REG_MTT_TBL_MEM_TIMEOUT_INT_MASK_SHIFT 1 -#define MMC_TIMEOUT_INT_MASK_REG_MTT_TBL_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define MMC_TIMEOUT_INT_MASK_REG_MTT_TBL_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define MMC_TIMEOUT_INT_MASK_REG_MTT_TBL_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 -#define MMC_TIMEOUT_INT_MASK_REG_QPMCT_TBL_MEM_TIMEOUT_INT_MASK_MASK 0x4 -#define MMC_TIMEOUT_INT_MASK_REG_QPMCT_TBL_MEM_TIMEOUT_INT_MASK_SHIFT 2 -#define MMC_TIMEOUT_INT_MASK_REG_QPMCT_TBL_MEM_TIMEOUT_INT_MASK_WIDTH 1 -#define MMC_TIMEOUT_INT_MASK_REG_QPMCT_TBL_MEM_TIMEOUT_INT_MASK_MAX_VAL 0x1 -#define MMC_TIMEOUT_INT_MASK_REG_QPMCT_TBL_MEM_TIMEOUT_INT_MASK_MIN_VAL 0x0 - -#define MMC_TIMEOUT_CFG_REG_ADDR 0xa42a0018 -#define MMC_TIMEOUT_CFG_REG_WIDTH 32 -#define MMC_TIMEOUT_CFG_REG_LENGTH 32 -#define MMC_TIMEOUT_CFG_REG_TIMEOUT_PARA_MASK 0xffff -#define MMC_TIMEOUT_CFG_REG_TIMEOUT_PARA_SHIFT 0 -#define MMC_TIMEOUT_CFG_REG_TIMEOUT_PARA_WIDTH 16 -#define MMC_TIMEOUT_CFG_REG_TIMEOUT_PARA_MAX_VAL 0xffff -#define MMC_TIMEOUT_CFG_REG_TIMEOUT_PARA_MIN_VAL 0x0 - -#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/cq.h b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h index 1b68667e00699d7502ab1b16690c0c689b1a09b9..f537a77126a3dea673d99c5f2df7beb5b1d00e9d 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/cq.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -8,9 +7,9 @@ #define XSC_CORE_CQ_H #include -#include -#include -#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" struct xsc_core_cq { u32 cqn; @@ -76,10 +75,10 @@ static inline void xsc_cq_set_ci(struct xsc_core_cq *cq) } int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, - struct xsc_create_cq_mbox_in *in, int inlen); + struct xsc_create_cq_mbox_in *in, int inlen); int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq); int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, - struct xsc_query_cq_mbox_out *out); + struct xsc_query_cq_mbox_out *out); int xsc_debug_cq_add(struct xsc_core_device *dev, struct xsc_core_cq *cq); void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq); diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/device.h b/drivers/net/ethernet/yunsilicon/xsc/common/device.h index 6c8c39097ddf7bcaae2ff1f3474a94081aabd4aa..1d1b0be093798ad76fa963b92b02f11a7a3d15ee 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/device.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/device.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h index ecbeeeb4c8f5b6460263fa55e82d4c8c5830eb88..6b9fdfb738d8f6947af63353f5f761090df77268 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -18,7 +17,7 @@ #define XSC_GET_DOORBELL_LOCK(ptr) (NULL) static inline void xsc_write64(__be32 val[2], void __iomem *dest, - spinlock_t *doorbell_lock) + spinlock_t *doorbell_lock) { __raw_writeq(*(u64 *)val, dest); } @@ -35,13 +34,13 @@ static inline void xsc_write64(__be32 val[2], void __iomem *dest, #define XSC_GET_DOORBELL_LOCK(ptr) (ptr) static inline void xsc_write64(__be32 val[2], void __iomem *dest, - spinlock_t *doorbell_lock) + spinlock_t *doorbell_lock) { unsigned long flags; spin_lock_irqsave(doorbell_lock, flags); - __raw_writel((__force u32) val[0], dest); - __raw_writel((__force u32) val[1], dest + 4); + __raw_writel((__force u32)val[0], dest); + __raw_writel((__force u32)val[1], dest + 4); spin_unlock_irqrestore(doorbell_lock, flags); } diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/driver.h b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h index 1fd8029eb1f9b9df26c9fe3e1b5ecf2ce1228f15..44d3ff5586524b1c063d40d2a65bf844cfcc7cf1 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/driver.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -14,17 +13,17 @@ #include #include #include -#include -#include -#include -#include -#include -#include - -#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK)) -#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT) -#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK)) -#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT) +#include "common/device.h" +#include "common/doorbell.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "common/xsc_hsi.h" +#include "common/qpts.h" + +#define LS_64(val, field) (((u64)(val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_64(val, field) ((u64)((val) & field ## _MASK) >> field ## _SHIFT) +#define LS_32(val, field) (((val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_32(val, field) (((val) & field ## _MASK) >> field ## _SHIFT) enum { CMD_OWNER_SW = 0x0, @@ -78,28 +77,6 @@ struct xsc_rsc_debug { struct xsc_field_desc fields[0]; }; -struct __packed xsc_qp_trace { - u16 main_ver; - u16 sub_ver; - u32 pid; - u16 qp_type; - u16 af_type; - union { - u32 s_addr4; - u8 s_addr6[16]; - } s_addr; - union { - u32 d_addr4; - u8 d_addr6[16]; - } d_addr; - u16 s_port; - u16 d_port; - u32 affinity_idx; - u64 timestamp; - u32 lqpn; - u32 rqpn; -}; - struct xsc_buf_list { void *buf; dma_addr_t map; @@ -204,15 +181,13 @@ struct xsc_dev_resource { struct xsc_cq_table cq_table; struct xsc_eq_table eq_table; struct xsc_irq_info *irq_info; - spinlock_t mkey_lock; + spinlock_t mkey_lock; /* protect mkey */ u8 mkey_key; - struct mutex alloc_mutex; + struct mutex alloc_mutex; /* protect buffer alocation according to numa node */ int numa_node; - struct workqueue_struct *pg_wq; - struct rb_root page_root; int fw_pages; int reg_pages; - struct mutex pgdir_mutex; + struct mutex pgdir_mutex; /* protect pgdir_list */ struct list_head pgdir_list; struct dentry *qp_debugfs; struct dentry *eq_debugfs; @@ -290,26 +265,18 @@ int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr); int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, int out_size); int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, - struct xsc_buf *buf); + struct xsc_buf *buf); void xsc_buf_free(struct xsc_core_device *dev, struct xsc_buf *buf); int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, - struct xsc_register_mr_mbox_in *in, int inlen); + struct xsc_register_mr_mbox_in *in, int inlen); int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr); void xsc_reg_local_dma_mr(struct xsc_core_device *dev); int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn); int xsc_core_dealloc_pd(struct xsc_core_device *xdev, u32 pdn); int xsc_core_mad_ifc(struct xsc_core_device *xdev, void *inb, void *outb, - u16 opmod, int port); -int xsc_pagealloc_init(struct xsc_core_device *xdev); -void xsc_pagealloc_cleanup(struct xsc_core_device *xdev); -int xsc_pagealloc_start(struct xsc_core_device *xdev); -void xsc_pagealloc_stop(struct xsc_core_device *xdev); -void xsc_core_req_pages_handler(struct xsc_core_device *xdev, u16 func_id, - s16 npages); -int xsc_satisfy_startup_pages(struct xsc_core_device *xdev); -int xsc_reclaim_startup_pages(struct xsc_core_device *xdev); + u16 opmod, int port); void xsc_register_debugfs(void); void xsc_unregister_debugfs(void); int xsc_eq_init(struct xsc_core_device *dev); @@ -318,10 +285,10 @@ void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, int npages); void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages); void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type); int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, - unsigned int *irqn); + unsigned int *irqn); void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type); int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, - int nent, const char *name); + int nent, const char *name); int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq); int xsc_start_eqs(struct xsc_core_device *dev); void xsc_stop_eqs(struct xsc_core_device *dev); @@ -329,14 +296,14 @@ void xsc_stop_eqs(struct xsc_core_device *dev); int xsc_qp_debugfs_init(struct xsc_core_device *dev); void xsc_qp_debugfs_cleanup(struct xsc_core_device *dev); int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, - int size_in, void *data_out, int size_out, - u16 reg_num, int arg, int write); + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write); int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps); int xsc_debug_eq_add(struct xsc_core_device *xdev, struct xsc_eq *eq); void xsc_debug_eq_remove(struct xsc_core_device *xdev, struct xsc_eq *eq); int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, - struct xsc_query_eq_mbox_out *out, int outlen); + struct xsc_query_eq_mbox_out *out, int outlen); int xsc_eq_debugfs_init(struct xsc_core_device *dev); void xsc_eq_debugfs_cleanup(struct xsc_core_device *dev); int xsc_cq_debugfs_init(struct xsc_core_device *dev); @@ -351,7 +318,7 @@ void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev); int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node); int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, - struct xsc_frag_buf *buf, int node); + struct xsc_frag_buf *buf, int node); void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db); void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf); diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/port.h b/drivers/net/ethernet/yunsilicon/xsc/common/port.h index f27cb886cce036fc2c69995d0fc45f967522d082..5edb854e0fcd279ced9803888981922d09761dc5 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/port.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/port.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -32,8 +31,8 @@ struct xsc_module_eeprom_query_params { }; int xsc_query_module_eeprom(struct xsc_core_device *dev, - u16 offset, u16 size, u8 *data); + u16 offset, u16 size, u8 *data); int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, - struct xsc_module_eeprom_query_params *params, - u8 *data); + struct xsc_module_eeprom_query_params *params, + u8 *data); #endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qp.h b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h index aece803310bdc1c93abf8124f21583eb679db879..d6c073dc24436a8d9a48c456de5f945a52d4ce20 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/qp.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h @@ -1,15 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_QP_H #define XSC_QP_H -#include -#include -#include +#include "common/xsc_hsi.h" +#include "common/device.h" +#include "common/driver.h" enum { XSC_QP_PM_MIGRATED = 0x3, @@ -35,7 +34,7 @@ struct xsc_send_wqe_ctrl_seg { u8 has_pph:1; u8 so_type:1; __le16 so_data_size:14; - u8 rsvd:8; + u8:8; u8 so_hdr_len:8; }; struct { @@ -46,20 +45,20 @@ struct xsc_send_wqe_ctrl_seg { }; __le32 se:1; __le32 ce:1; - __le32 rsvd1:30; + __le32:30; }; struct xsc_wqe_data_seg { union { __le32 in_line:1; struct { - __le32 rsvd:1; + __le32:1; __le32 seg_len:31; __le32 mkey; __le64 va; }; struct { - __le32 rsvd1:1; + __le32:1; __le32 len:7; u8 in_line_data[15]; }; @@ -105,7 +104,7 @@ struct xsc_wqe_data_seg_2 { }; struct xsc_core_qp { - void (*event)(struct xsc_core_qp *qp, int event); + void (*event)(struct xsc_core_qp *qp, int type); int qpn; atomic_t refcount; struct completion free; @@ -117,6 +116,7 @@ struct xsc_core_qp { struct xsc_qp_trace *trace_info; u16 qp_type_internal; u16 grp_id; + struct completion delayed_release; }; struct xsc_qp_path { @@ -155,22 +155,22 @@ static inline struct xsc_core_qp *__xsc_qp_lookup(struct xsc_core_device *xdev, } int create_resource_common(struct xsc_core_device *xdev, - struct xsc_core_qp *qp); + struct xsc_core_qp *qp); void destroy_resource_common(struct xsc_core_device *xdev, - struct xsc_core_qp *qp); + struct xsc_core_qp *qp); int xsc_core_create_qp(struct xsc_core_device *xdev, - struct xsc_core_qp *qp, - struct xsc_create_qp_mbox_in *in, - int inlen); + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen); int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, - enum xsc_qp_state new_state, - struct xsc_modify_qp_mbox_in *in, int sqd_event, - struct xsc_core_qp *qp); + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp); int xsc_core_destroy_qp(struct xsc_core_device *xdev, - struct xsc_core_qp *qp); + struct xsc_core_qp *qp); int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, - struct xsc_query_qp_mbox_out *out, int outlen); + struct xsc_query_qp_mbox_out *out, int outlen); void xsc_init_qp_table(struct xsc_core_device *xdev); void xsc_cleanup_qp_table(struct xsc_core_device *xdev); @@ -180,4 +180,8 @@ void xsc_debug_qp_remove(struct xsc_core_device *xdev, struct xsc_core_qp *qp); int xsc_create_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); void xsc_remove_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); +void xsc_init_delayed_release(void); +void xsc_stop_delayed_release(void); +void xsc_add_to_delayed_release_list(struct xsc_core_device *xdev, struct xsc_core_qp *qp); + #endif /* XSC_QP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h new file mode 100644 index 0000000000000000000000000000000000000000..57eb829f811b1dd8b4e41f8daf2545caf3565780 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __QPTS_H__ +#define __QPTS_H__ + +struct __packed xsc_qp_trace { + u16 main_ver; + u16 sub_ver; + u32 pid; + u16 qp_type; + u16 af_type; + union { + u32 s_addr4; + u8 s_addr6[16]; + } s_addr; + union { + u32 d_addr4; + u8 d_addr6[16]; + } d_addr; + u16 s_port; + u16 d_port; + u32 affinity_idx; + u64 timestamp; + u32 lqpn; + u32 rqpn; +}; + +struct __packed qpt_update_affinity { + u32 aff_new; + u32 aff_old; +}; + +struct __packed qpt_update_sport { + u16 port_new; + u16 port_old; +}; + +struct __packed qpt_update_data { + u64 timestamp; + u32 qpn; + u32 bus; + u32 dev; + u32 fun; + union { + struct qpt_update_affinity affinity; + struct qpt_update_sport sport; + } update; +}; + +struct __packed xsc_qpt_update_msg { + u16 main_ver; + u16 sub_ver; + u32 type; //0:UPDATE_TYPE_SPORT; 1:UPDATE_TYPE_AFFINITY + struct qpt_update_data data; +}; + +enum { + YS_QPTRACE_UPDATE_TYPE_SPORT = 0, + YS_QPTRACE_UPDATE_TYPE_AFFINITY, +}; + +#define YS_QPTRACE_VER_MAJOR 2 +#define YS_QPTRACE_VER_MINOR 0 + +int qpts_init(void); +void qpts_fini(void); +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..fa2abe448608e3aded6d3dcbb7f0095fa423bee1 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef RES_OBJ_H +#define RES_OBJ_H + +#include +#include +#include "common/xsc_core.h" + +struct xsc_res_obj { + struct list_head node; + struct xsc_bdf_file *file; + void (*release_method)(void *obj); + char *data; + unsigned int datalen; +}; + +struct xsc_pd_obj { + struct xsc_res_obj obj; + unsigned int pdn; +}; + +struct xsc_mr_obj { + struct xsc_res_obj obj; + unsigned int mkey; +}; + +struct xsc_cq_obj { + struct xsc_res_obj obj; + unsigned int cqn; +}; + +struct xsc_qp_obj { + struct xsc_res_obj obj; + unsigned int qpn; +}; + +struct xsc_pct_obj { + struct xsc_res_obj obj; + unsigned int pct_idx; +}; + +struct xsc_wct_obj { + struct xsc_res_obj obj; + unsigned int wct_idx; +}; + +struct xsc_em_obj { + struct xsc_res_obj obj; + unsigned int em_idx[54]; +}; + +struct xsc_flow_pct_v4_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v4_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +enum RES_OBJ_TYPE { + RES_OBJ_PD, + RES_OBJ_MR, + RES_OBJ_CQ, + RES_OBJ_QP, + RES_OBJ_PCT, + RES_OBJ_WCT, + RES_OBJ_EM, + RES_OBJ_MAX +}; + +static inline unsigned long xsc_idx_to_key(unsigned int obj_type, unsigned int idx) +{ + return ((unsigned long)obj_type << 32) | idx; +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, unsigned int pdn, + char *data, unsigned int datalen); +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn); + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, unsigned int mkey, + char *data, unsigned int datalen); +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey); + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen); +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn); + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen); +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn); + +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen); +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority); + +void xsc_close_bdf_file(struct xsc_bdf_file *file); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/version.h b/drivers/net/ethernet/yunsilicon/xsc/common/version.h index 4c3b27df3864d6c8d13ce879db9497821e2321bd..39c7d25927542e409b35e9737d351e0e05776618 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/version.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/version.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -8,4 +7,4 @@ #define MAJOR_VERSION 1 #define MINOR_VERSION 0 #define HOTFIX_NUM 0 -#define BUILD_VERSION 28 +#define BUILD_VERSION 375 diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/vport.h b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h index a5cc5a9ed40f93adf7ae2b9279a5b81507526d9e..68324d03866ac5836e2e248678f7732ed3449b87 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/vport.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h @@ -1,19 +1,19 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_VPORT_H #define XSC_VPORT_H -#include +#include "common/xsc_core.h" #include +#include "common/xsc_fs.h" #define XSC_VPORT_PF_PLACEHOLDER (1u) #define XSC_VPORT_UPLINK_PLACEHOLDER (1u) #define XSC_VPORT_ECPF_PLACEHOLDER(dev) (xsc_ecpf_vport_exists(dev) || \ - xsc_core_is_ecpf_esw_manager(dev)) + xsc_core_is_ecpf_esw_manager(dev)) #define XSC_SPECIAL_VPORTS(dev) (XSC_VPORT_PF_PLACEHOLDER + \ XSC_VPORT_UPLINK_PLACEHOLDER + \ @@ -43,77 +43,76 @@ enum { u8 xsc_query_vport_state(struct xsc_core_device *dev, u8 opmod, u16 vport); int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u8 opmod, - u16 vport, u8 other_vport, u8 state); + u16 vport, u8 other_vport, u8 state); int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr); + u16 vport, u8 *addr); int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr); + u16 vport, u8 *addr); int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, - u16 vport, u8 *min_inline); + u16 vport, u8 *min_inline); void xsc_query_min_inline(struct xsc_core_device *dev, u8 *min_inline); int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, - u16 vport, u8 min_inline); + u16 vport, u8 min_inline); int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr); + u16 vport, u8 *addr, bool perm_mac); int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr); + u16 vport, u8 *addr, bool perm_mac); int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu); int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu); int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, - u64 *system_image_guid); + u64 *system_image_guid); int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, - u64 *node_guid); + u64 *node_guid); int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, - u16 vport, u64 node_guid); + u16 vport, u64 node_guid); int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, - u16 vport, u64 node_guid); + u16 vport, u64 node_guid); int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, - u16 *qkey_viol_cntr); + u16 *qkey_viol_cntr); int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, - u8 port_num, u16 vf_num, u16 gid_index, - union ib_gid *gid); + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid); int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, - u8 port_num, u16 vf_num, u16 pkey_index, - u16 *pkey); + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey); int xsc_query_hca_vport_context(struct xsc_core_device *dev, - u8 other_vport, u8 port_num, - u16 vf_num, - struct xsc_hca_vport_context *rep); + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep); int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, - u64 *node_guid); + u64 *node_guid); int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, - u16 vport, + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size); +int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, enum xsc_list_type list_type, u8 addr_list[][ETH_ALEN], - int *list_size); -int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, - enum xsc_list_type list_type, - u8 addr_list[][ETH_ALEN], - int list_size); + int list_size); int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, - u16 vport, - int *promisc_uc, - int *promisc_mc, - int *promisc_all); + u16 vport, + int *promisc_uc, + int *promisc_mc, + int *promisc_all); int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, - int promisc_uc, - int promisc_mc, - int promisc_all); + int promisc_uc, + int promisc_mc, + int promisc_all); int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, - unsigned long *vlans); + unsigned long *vlans); int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, - u16 vlans[], - int list_size); + u16 vid, bool add); int xsc_query_vport_down_stats(struct xsc_core_device *dev, u16 vport, - u8 other_vport, u64 *rx_discard_vport_down, - u64 *tx_discard_vport_down); + u8 other_vport, u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down); int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, - int vf, u8 port_num, void *out, - size_t out_sz); + int vf, u8 port_num, void *out, + size_t out_sz); int xsc_modify_hca_vport_context(struct xsc_core_device *dev, - u8 other_vport, u8 port_num, - int vf, - struct xsc_hca_vport_context *req); + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req); //int xsc_nic_vport_enable_roce(struct xsc_core_device *dev); //int xsc_nic_vport_disable_roce(struct xsc_core_device *dev); diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..c89462a5ea5aa2325b1226a730da882cdcc26d98 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +/* generated time: + * Mon Jan 22 11:36:34 CST 2024 + */ + +#ifndef XSC_HW_H +#define XSC_HW_H + +//hif_irq_csr_defines.h +#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0xa1100050 + +//hif_cpm_csr_defines.h +#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000104 +#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000108 +#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa000010c +#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 +#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000020 +#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000080 +#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000100 +#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 +#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 +#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 +#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 +#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f +#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x4 +#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000010 + +//mmc_csr_defines.h +#define MMC_MPT_TBL_MEM_DEPTH 32768 +#define MMC_MTT_TBL_MEM_DEPTH 65536 +#define MMC_MPT_TBL_MEM_WIDTH 256 +#define MMC_MTT_TBL_MEM_WIDTH 64 +#define MMC_MPT_TBL_MEM_ADDR 0xa4100000 +#define MMC_MTT_TBL_MEM_ADDR 0xa4200000 + +//clsf_dma_csr_defines.h +#define CLSF_DMA_DMA_UL_BUSY_REG_ADDR 0xa6010038 +#define CLSF_DMA_DMA_DL_DONE_REG_ADDR 0xa6010090 +#define CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR 0xa6010080 +#define CLSF_DMA_ERR_CODE_CLR_REG_ADDR 0xa6010094 +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK 0x7f +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR 0xa6010010 +#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT 16 +#define CLSF_DMA_DMA_RD_ADDR_REG_ADDR 0xa6010014 +#define CLSF_DMA_INDRW_RD_START_REG_ADDR 0xa6010018 + +//hif_tbl_csr_defines.h +#define HIF_TBL_TBL_DL_BUSY_REG_ADDR 0xa1060030 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT 12 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_DL_REQ_REG_ADDR 0xa1060020 +#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_L_REG_ADDR 0xa1060024 +#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_H_REG_ADDR 0xa1060028 +#define HIF_TBL_TBL_DL_START_REG_ADDR 0xa106002c +#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_UL_REQ_REG_ADDR 0xa106007c +#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_L_REG_ADDR 0xa1060080 +#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_H_REG_ADDR 0xa1060084 +#define HIF_TBL_TBL_UL_START_REG_ADDR 0xa1060088 +#define HIF_TBL_MSG_RDY_REG_ADDR 0xa1060044 + +//hif_cmdqm_csr_defines.h +#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1023000 +#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1024000 +#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa1027000 +#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1028000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1021000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1022000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa1025000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa1026000 +#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa102a000 +#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1020020 +#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1020028 +#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1029000 + +//PSV use +//hif_irq_csr_defines.h +#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1102000 +#define HIF_IRQ_INT_DB_REG_ADDR 0xa1100094 +#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa11000f4 +#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa11000d0 +#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa11000cc +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000d4 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000d8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000dc +#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa11000e0 +#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa11000c8 + +#endif /* XSC_HW_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h index 56bafdd62d032796a550c9111a14799c8a9c37f7..7ec85df423265fc5307c927e2fa30b58e4319026 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h @@ -1,13 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_CMD_H #define XSC_CMD_H -#define CMDQ_VERSION 0xe +#define CMDQ_VERSION 0x19 #define QOS_PRIO_MAX 7 #define QOS_DSCP_MAX 63 @@ -20,6 +19,33 @@ #define MAX_PKT_LEN 9800 #define XSC_RTT_CFG_QPN_MAX 32 +#define XSC_PCIE_LAT_INIT_INTERVAL_MIN 200 +#define XSC_PCIE_LAT_CFG_INTERVAL_MAX 8 +#define XSC_PCIE_LAT_CFG_HISTOGRAM_MAX 9 +#define XSC_PCIE_LAT_EN_DISABLE 0 +#define XSC_PCIE_LAT_EN_ENABLE 1 +#define XSC_PCIE_LAT_INTERVAL_MIN 100 +#define XSC_PCIE_LAT_INTERVAL_MAX 30000 +#define XSC_PCIE_LAT_PERIOD_MIN 1 +#define XSC_PCIE_LAT_PERIOD_MAX 20 +#define DPU_PORT_WGHT_CFG_MAX 1 + +enum { + DPU_PORT_WGHT_TARGET_HOST, + DPU_PORT_WGHT_TARGET_SOC, + DPU_PORT_WGHT_TARGET_NUM, +}; + +enum { + DPU_PRIO_WGHT_TARGET_HOST2SOC, + DPU_PRIO_WGHT_TARGET_SOC2HOST, + DPU_PRIO_WGHT_TARGET_HOSTSOC2LAG, + DPU_PRIO_WGHT_TARGET_NUM, +}; + +#define XSC_AP_FEAT_UDP_SPORT_MIN 1024 +#define XSC_AP_FEAT_UDP_SPORT_MAX 65535 + enum { XSC_CMD_OP_QUERY_HCA_CAP = 0x100, XSC_CMD_OP_QUERY_ADAPTER = 0x101, @@ -27,6 +53,7 @@ enum { XSC_CMD_OP_TEARDOWN_HCA = 0x103, XSC_CMD_OP_ENABLE_HCA = 0x104, XSC_CMD_OP_DISABLE_HCA = 0x105, + XSC_CMD_OP_MODIFY_HCA = 0x106, XSC_CMD_OP_QUERY_PAGES = 0x107, XSC_CMD_OP_MANAGE_PAGES = 0x108, XSC_CMD_OP_SET_HCA_CAP = 0x109, @@ -34,6 +61,8 @@ enum { XSC_CMD_OP_QUERY_MSIX_TBL_INFO = 0x10b, XSC_CMD_OP_FUNCTION_RESET = 0x10c, XSC_CMD_OP_DUMMY = 0x10d, + XSC_CMD_OP_SET_DEBUG_INFO = 0x10e, + XSC_CMD_OP_QUERY_PSV_FUNCID = 0x10f, XSC_CMD_OP_CREATE_MKEY = 0x200, XSC_CMD_OP_QUERY_MKEY = 0x201, @@ -41,6 +70,8 @@ enum { XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, XSC_CMD_OP_REG_MR = 0x204, XSC_CMD_OP_DEREG_MR = 0x205, + XSC_CMD_OP_SET_MPT = 0x206, + XSC_CMD_OP_SET_MTT = 0x207, XSC_CMD_OP_CREATE_EQ = 0x301, XSC_CMD_OP_DESTROY_EQ = 0x302, @@ -77,6 +108,7 @@ enum { XSC_CMD_OP_CREATE_MULTI_QP = 0x515, XSC_CMD_OP_ALLOC_MULTI_VIRTQ = 0x516, XSC_CMD_OP_RELEASE_MULTI_VIRTQ = 0x517, + XSC_CMD_OP_QUERY_QP_FLUSH_STATUS = 0x518, XSC_CMD_OP_CREATE_PSV = 0x600, XSC_CMD_OP_DESTROY_PSV = 0x601, @@ -120,7 +152,8 @@ enum { XSC_CMD_OP_QUERY_VPORT_COUNTER = 0x828, XSC_CMD_OP_QUERY_PRIO_STATS = 0x829, XSC_CMD_OP_QUERY_PHYPORT_STATE = 0x830, - XSC_CMD_OP_QUERY_EVENT_TYPE = 0x831, + XSC_CMD_OP_QUERY_EVENT_TYPE = 0x831, + XSC_CMD_OP_QUERY_LINK_INFO = 0x832, XSC_CMD_OP_LAG_CREATE = 0x840, XSC_CMD_OP_LAG_MODIFY = 0x841, @@ -147,6 +180,10 @@ enum { XSC_CMD_OP_IOCTL_GET_SP = 0x100d, XSC_CMD_OP_IOCTL_SET_WEIGHT = 0x100e, XSC_CMD_OP_IOCTL_GET_WEIGHT = 0x100f, + XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT = 0x1010, + XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT = 0x1011, + XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT = 0x1012, + XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT = 0x1013, XSC_CMD_OP_IOCTL_SET_ENABLE_RP = 0x1030, XSC_CMD_OP_IOCTL_SET_ENABLE_NP = 0x1031, @@ -167,8 +204,9 @@ enum { XSC_CMD_OP_IOCTL_SET_CNP_PCP = 0x1040, XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA = 0x1041, XSC_CMD_OP_IOCTL_GET_CC_CFG = 0x1042, - XSC_CMD_OP_IOCTL_GET_CC_STAT = 0x104a, + XSC_CMD_OP_IOCTL_GET_CC_STAT = 0x104b, XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE = 0x1052, + XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR = 0x1053, XSC_CMD_OP_IOCTL_SET_HWC = 0x1060, XSC_CMD_OP_IOCTL_GET_HWC = 0x1061, @@ -190,6 +228,11 @@ enum { XSC_CMD_OP_SET_LED_STATUS = 0X1228, + XSC_CMD_OP_AP_FEAT = 0x1400, + XSC_CMD_OP_PCIE_LAT_FEAT = 0x1401, + + XSC_CMD_OP_USER_EMU_CMD = 0x8000, + XSC_CMD_OP_MAX }; @@ -216,8 +259,8 @@ enum xsc_eth_vf_num_sel { }; enum { - XSC_CMD_RESP_LINKSPEED_MODE_25G = 0x0, - XSC_CMD_RESP_LINKSPEED_MODE_100G = 0x1 + XSC_CMD_RESP_LINKSPEED_MODE_25G = 25000, + XSC_CMD_RESP_LINKSPEED_MODE_100G = 100000 }; enum xsc_dma_direct { @@ -228,6 +271,15 @@ enum xsc_dma_direct { DMA_DIR_MAX, }; +/* hw feature bitmap, 32bit */ +enum xsc_hw_feature_flag { + XSC_HW_RDMA_SUPPORT = 0x1, + XSC_HW_SECOND_FEATURE = 0x2, + XSC_HW_THIRD_FEATURE = 0x4, + + XSC_HW_LAST_FEATURE = 0x80000000, +}; + struct xsc_inbox_hdr { __be16 opcode; u8 rsvd[4]; @@ -298,7 +350,8 @@ struct xsc_create_qp_request { __be16 cqn_send; __be16 cqn_recv; __be16 glb_funcid; - __be16 rsvd; + /*rsvd,rename logic_port used to transfer logical_port to fw*/ + __be16 logic_port; __be64 pas[0]; }; @@ -306,6 +359,7 @@ struct xsc_create_qp_mbox_in { struct xsc_inbox_hdr hdr; struct xsc_create_qp_request req; }; + struct xsc_create_qp_mbox_out { struct xsc_outbox_hdr hdr; __be32 qpn; @@ -316,7 +370,6 @@ struct xsc_destroy_qp_mbox_in { struct xsc_inbox_hdr hdr; __be32 qpn; u8 rsvd[4]; - }; struct xsc_destroy_qp_mbox_out { @@ -324,6 +377,15 @@ struct xsc_destroy_qp_mbox_out { u8 rsvd[8]; }; +struct xsc_query_qp_flush_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; +}; + +struct xsc_query_qp_flush_status_mbox_out { + struct xsc_outbox_hdr hdr; +}; + struct xsc_qp_context { __be32 remote_qpn; __be32 cqn_send; @@ -372,6 +434,7 @@ struct xsc_modify_qp_mbox_in { struct xsc_inbox_hdr hdr; __be32 qpn; struct xsc_qp_context ctx; + u8 no_need_wait; }; struct xsc_modify_qp_mbox_out { @@ -463,6 +526,7 @@ struct xsc_destroy_eq_mbox_in { struct xsc_inbox_hdr hdr; __be32 eqn; u8 rsvd[4]; + }; struct xsc_destroy_eq_mbox_out { @@ -490,6 +554,7 @@ struct xsc_dealloc_pd_mbox_in { struct xsc_inbox_hdr hdr; __be32 pdn; u8 rsvd[4]; + }; struct xsc_dealloc_pd_mbox_out { @@ -533,6 +598,45 @@ struct xsc_unregister_mr_mbox_out { u8 rsvd[8]; }; +struct xsc_mpt_item { + __be32 pdn; + __be32 pa_num; + __be32 len; + __be32 mkey; + u8 rsvd[5]; + u8 acc; + u8 page_mode; + u8 map_en; + __be64 va_base; +}; + +struct xsc_set_mpt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mpt_item mpt_item; +}; + +struct xsc_set_mpt_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mtt_base; + u8 rsvd[4]; +}; + +struct xsc_mtt_setting { + __be32 mtt_base; + __be32 pa_num; + __be64 pas[0]; +}; + +struct xsc_set_mtt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mtt_setting mtt_setting; +}; + +struct xsc_set_mtt_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + struct xsc_create_mkey_mbox_in { struct xsc_inbox_hdr hdr; u8 rsvd[4]; @@ -700,12 +804,16 @@ struct xsc_hca_cap { u8 log_max_xrcd; u8 rsvd26[40]; __be32 uar_page_sz; - u8 rsvd27[28]; + u8 rsvd27[8]; + __be32 hw_feature_flag;/*enum xsc_hw_feature_flag*/ + __be16 funcid[8]; u8 log_msx_atomic_size_qp; u8 rsvd28[2]; u8 log_msx_atomic_size_dc; u8 board_sn[XSC_BOARD_SN_LEN]; - u8 rsvd29[12]; + u8 max_tc; + u8 rsvd29[10]; + u8 nif_port_num; __be32 hca_core_clock; __be32 max_rwq_indirection_tables;/*rss_caps*/ __be32 max_rwq_indirection_table_size;/*rss_caps*/ @@ -737,55 +845,53 @@ struct xsc_cmd_query_hca_cap_mbox_out { struct xsc_hca_cap hca_cap; }; -struct xsc_cmd_init_hca_mbox_in { - struct xsc_inbox_hdr hdr; - u8 rsvd0[2]; - __be16 profile; - u8 rsvd1[4]; -}; - -struct xsc_cmd_init_hca_mbox_out { - struct xsc_outbox_hdr hdr; - u8 rsvd[8]; -}; - -struct xsc_cmd_teardown_hca_mbox_in { +struct xsc_cmd_enable_hca_mbox_in { struct xsc_inbox_hdr hdr; - u8 rsvd0[2]; - __be16 profile; - u8 rsvd1[4]; + u8 pf; + u8 pcie; + u8 pf_id; + u8 rsvd0; + __be16 vf_num; + __be16 max_msix_vec; + __be16 cpu_num; + u8 pp_bypass; + u8 esw_mode; }; -struct xsc_cmd_teardown_hca_mbox_out { +struct xsc_cmd_enable_hca_mbox_out { struct xsc_outbox_hdr hdr; - u8 rsvd[8]; + u8 status; + u8 rsvd0[3]; }; -struct xsc_cmd_enable_hca_mbox_in { +struct xsc_cmd_disable_hca_mbox_in { struct xsc_inbox_hdr hdr; u8 pf; u8 pcie; u8 pf_id; - __be16 vf_id; - u8 rsvd0[3]; + u8 rsvd0; + __be16 vf_num; + u8 pp_bypass; + u8 esw_mode; }; -struct xsc_cmd_enable_hca_mbox_out { +struct xsc_cmd_disable_hca_mbox_out { struct xsc_outbox_hdr hdr; u8 status; u8 rsvd0[3]; }; -struct xsc_cmd_disable_hca_mbox_in { +struct xsc_cmd_modify_hca_mbox_in { struct xsc_inbox_hdr hdr; u8 pf; u8 pcie; u8 pf_id; - __be16 vf_id; + u8 pp_bypass; + u8 esw_mode; u8 rsvd0[3]; }; -struct xsc_cmd_disable_hca_mbox_out { +struct xsc_cmd_modify_hca_mbox_out { struct xsc_outbox_hdr hdr; u8 status; u8 rsvd0[3]; @@ -804,36 +910,38 @@ struct xsc_query_special_ctxs_mbox_out { /* vport mbox */ struct xsc_nic_vport_context { - u32 min_wqe_inline_mode:3; - u32 disable_mc_local_lb:1; - u32 disable_uc_local_lb:1; - u32 roce_en:1; - - u32 arm_change_event:1; - u32 event_on_mtu:1; - u32 event_on_promisc_change:1; - u32 event_on_vlan_change:1; - u32 event_on_mc_address_change:1; - u32 event_on_uc_address_change:1; - u32 affiliation_criteria:4; - u32 affiliated_vhca_id; - - u16 mtu; - - u64 system_image_guid; - u64 port_guid; - u64 node_guid; - - u32 qkey_violation_counter; - - u16 promisc_uc:1; - u16 promisc_mc:1; - u16 promisc_all:1; - u16 allowed_list_type:3; - u16 allowed_list_size:10; + __be32 min_wqe_inline_mode:3; + __be32 disable_mc_local_lb:1; + __be32 disable_uc_local_lb:1; + __be32 roce_en:1; + + __be32 arm_change_event:1; + __be32 event_on_mtu:1; + __be32 event_on_promisc_change:1; + __be32 event_on_vlan_change:1; + __be32 event_on_mc_address_change:1; + __be32 event_on_uc_address_change:1; + __be32 affiliation_criteria:4; + __be32 affiliated_vhca_id; + + __be16 mtu; + + __be64 system_image_guid; + __be64 port_guid; + __be64 node_guid; + + __be32 qkey_violation_counter; + + __be16 promisc_uc:1; + __be16 promisc_mc:1; + __be16 promisc_all:1; + __be16 vlan_allowed:1; + __be16 allowed_list_type:3; + __be16 allowed_list_size:10; u8 permanent_address[6]; - u64 current_uc_mac_address[0]; + u8 current_address[6]; + u8 current_uc_mac_address[0][2]; }; enum { @@ -877,26 +985,29 @@ struct xsc_modify_nic_vport_context_out { }; struct xsc_modify_nic_vport_field_select { - u32 affiliation:1; - u32 disable_uc_local_lb:1; - u32 disable_mc_local_lb:1; - u32 node_guid:1; - u32 port_guid:1; - u32 min_inline:1; - u32 mtu:1; - u32 change_event:1; - u32 promisc:1; - u32 permanent_address:1; - u32 addresses_list:1; - u32 roce_en:1; - u32 rsvd:20; + __be32 affiliation:1; + __be32 disable_uc_local_lb:1; + __be32 disable_mc_local_lb:1; + __be32 node_guid:1; + __be32 port_guid:1; + __be32 min_inline:1; + __be32 mtu:1; + __be32 change_event:1; + __be32 promisc:1; + __be32 permanent_address:1; + __be32 current_address:1; + __be32 addresses_list:1; + __be32 roce_en:1; + __be32 rsvd:19; }; struct xsc_modify_nic_vport_context_in { struct xsc_inbox_hdr hdr; - u32 other_vport:1; - u32 vport_number:16; - u32 rsvd:15; + __be32 other_vport:1; + __be32 vport_number:16; + __be32 rsvd:15; + __be16 caps; + __be16 caps_mask; struct xsc_modify_nic_vport_field_select field_select; struct xsc_nic_vport_context nic_vport_ctx; @@ -1093,7 +1204,8 @@ struct xsc_create_lag_request { u8 lag_sel_mode; u8 remap_port1; u8 remap_port2; - u8 rsvd[2]; + u8 kernel_bond; + u8 rsvd[5]; }; struct xsc_modify_lag_request { @@ -1108,7 +1220,8 @@ struct xsc_modify_lag_request { struct xsc_destroy_lag_request { __be16 lag_id; - u8 rsvd[2]; + u8 kernel_bond; + u8 rsvd[5]; }; struct xsc_set_lag_qos_request { @@ -1116,8 +1229,7 @@ struct xsc_set_lag_qos_request { u8 member_bitmap; u8 lag_del; u8 pcie_no; - u8 esw_mode; - u8 resv[2]; + u8 resv[3]; }; struct xsc_create_lag_mbox_in { @@ -1289,7 +1401,20 @@ struct xsc_event_resp { struct xsc_event_linkstatus_resp { u8 linkstatus; /*0:down, 1:up*/ - u8 linkspeed; /*0:25G, 1:100G*/ +}; + +struct xsc_event_linkinfo_resp { + u8 linkstatus; /*0:down, 1:up*/ + u8 port; + u8 duplex; + u8 autoneg; + u32 linkspeed; + u64 supported; + u64 advertising; + u64 supported_fec; /* reserved, not support currently */ + u64 advertised_fec; /* reserved, not support currently */ + u64 supported_speed[2]; + u64 advertising_speed[2]; }; struct xsc_event_query_type_mbox_in { @@ -1312,6 +1437,15 @@ struct xsc_event_query_linkstatus_mbox_out { struct xsc_event_linkstatus_resp ctx; }; +struct xsc_event_query_linkinfo_mbox_in { + struct xsc_inbox_hdr hdr; +}; + +struct xsc_event_query_linkinfo_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_linkinfo_resp ctx; +}; + struct xsc_event_set_led_status_mbox_in { struct xsc_inbox_hdr hdr; u8 port_id; @@ -1377,6 +1511,27 @@ struct xsc_weight_get { u8 rsvd[7]; }; +struct xsc_dpu_port_weight_set { + u8 target; + u8 weight[DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsv[5]; +}; + +struct xsc_dpu_port_weight_get { + u8 weight[DPU_PORT_WGHT_TARGET_NUM][DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsvd[4]; +}; + +struct xsc_dpu_prio_weight_set { + u8 target; + u8 weight[QOS_PRIO_MAX + 1]; + u8 rsv[7]; +}; + +struct xsc_dpu_prio_weight_get { + u8 weight[DPU_PRIO_WGHT_TARGET_NUM][QOS_PRIO_MAX + 1]; +}; + struct xsc_cc_mbox_in { struct xsc_inbox_hdr hdr; u8 data[0]; @@ -1519,6 +1674,13 @@ struct xsc_cc_cmd_clamp_tgt_rate { u32 section; }; +struct xsc_cc_cmd_max_hai_factor { + u16 cmd; + u16 len; + u32 max_hai_factor; + u32 section; +}; + struct xsc_cc_cmd_get_cfg { u16 cmd; u16 len; @@ -1541,6 +1703,7 @@ struct xsc_cc_cmd_get_cfg { u32 cnp_pcp; u32 evt_period_alpha; u32 clamp_tgt_rate; + u32 max_hai_factor; u32 section; }; @@ -1560,6 +1723,7 @@ struct xsc_cc_cmd_stat { struct xsc_set_mtu_mbox_in { struct xsc_inbox_hdr hdr; __be16 mtu; + __be16 rx_buf_sz_min; u8 mac_port; u8 rsvd; }; @@ -1576,21 +1740,22 @@ struct xsc_hwc_mbox_out { struct hwc_set_t { u8 type; - u8 model; u8 s_wqe_mode; u8 r_wqe_mode; u8 ack_timeout; u8 group_mode; u8 lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; u8 retry_cnt_th; u8 adapt_to_other; - u8 rsvd[1]; + u8 alloc_qp_id_mode; u16 max_vf_nums; + u8 eth_pkt_offset; + u8 rdma_pkt_offset; + u8 tso_eth_pkt_offset; }; struct hwc_get_t { - u8 cur_model; - u8 next_model; u8 cur_s_wqe_mode; u8 next_s_wqe_mode; u8 cur_r_wqe_mode; @@ -1601,12 +1766,21 @@ struct hwc_get_t { u8 next_group_mode; u8 cur_lossless_prio[XSC_MAX_MAC_NUM]; u8 next_lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; u8 cur_retry_cnt_th; u8 next_retry_cnt_th; u8 cur_adapt_to_other; u8 next_adapt_to_other; u16 cur_max_vf_nums; u16 next_max_vf_nums; + u8 cur_eth_pkt_offset; + u8 next_eth_pkt_offset; + u8 cur_rdma_pkt_offset; + u8 next_rdma_pkt_offset; + u8 cur_tso_eth_pkt_offset; + u8 next_tso_eth_pkt_offset; + u8 cur_alloc_qp_id_mode; + u8 next_alloc_qp_id_mode; }; struct xsc_set_mtu_mbox_out { @@ -1636,11 +1810,9 @@ struct xsc_query_pause_cnt_mbox_out { }; enum { - XSC_TBM_CAP_IPAT_BYPASS = 0, - XSC_TBM_CAP_PCT_BYPASS, - XSC_TBM_CAP_HASH_PPH, + XSC_TBM_CAP_HASH_PPH = 0, XSC_TBM_CAP_RSS, - XSC_TBM_CAP_BC_BYPASS, + XSC_TBM_CAP_PP_BYPASS, }; struct xsc_nic_attr { @@ -1664,13 +1836,8 @@ struct xsc_cmd_enable_nic_hca_mbox_in { struct xsc_rss_attr rss; }; -struct xsc_enable_nic_hca_resp { - __be16 bomt_idx; -}; - struct xsc_cmd_enable_nic_hca_mbox_out { struct xsc_outbox_hdr hdr; - struct xsc_enable_nic_hca_resp res; u8 status; u8 rsvd0; }; @@ -1680,14 +1847,9 @@ struct xsc_nic_dis_attr { __be16 caps; }; -struct xsc_broadcast_dis_attr { - __be16 bomt_idx; -}; - struct xsc_cmd_disable_nic_hca_mbox_in { struct xsc_inbox_hdr hdr; struct xsc_nic_dis_attr nic; - struct xsc_broadcast_dis_attr bc; }; struct xsc_cmd_disable_nic_hca_mbox_out { @@ -1737,6 +1899,36 @@ struct xsc_function_reset_mbox_out { u8 rsvd[8]; }; +enum { + XSC_PCIE_LAT_FEAT_SET_EN = 0, + XSC_PCIE_LAT_FEAT_GET_EN, + XSC_PCIE_LAT_FEAT_SET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_HISTOGRAM, + XSC_PCIE_LAT_FEAT_GET_PEAK, + XSC_PCIE_LAT_FEAT_HW, + XSC_PCIE_LAT_FEAT_HW_INIT, +}; + +struct xsc_pcie_lat { + u8 pcie_lat_enable; + u32 pcie_lat_interval[XSC_PCIE_LAT_CFG_INTERVAL_MAX]; + u32 pcie_lat_histogram[XSC_PCIE_LAT_CFG_HISTOGRAM_MAX]; + u32 pcie_lat_peak; +}; + +struct xsc_pcie_lat_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + +struct xsc_pcie_lat_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + struct xsc_reg_mcia { u8 module; u8 status; @@ -1820,4 +2012,42 @@ struct xsc_rtt_stats_mbox_out { struct rtt_stats stats; }; +enum { + XSC_AP_FEAT_SET_UDP_SPORT = 0, +}; + +struct xsc_ap_feat_set_udp_sport { + u32 qpn; + u32 udp_sport; +}; + +struct xsc_ap { + struct xsc_ap_feat_set_udp_sport set_udp_sport; +}; + +struct xsc_ap_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_ap_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_set_debug_info_mbox_in { + struct xsc_inbox_hdr hdr; + u8 set_field; + u8 log_level; + u8 cmd_verbose; + u8 rsvd[5]; +}; + +struct xsc_set_debug_info_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + #endif /* XSC_CMD_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h index f4c89d502c823c03b8112940398644a46967410f..57fc4467e1e68f5529e3ef00d5fb861d92008bea 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -21,31 +20,46 @@ #include #include #include -#include #include #include #include #include -#ifdef USE_VIRTIO -#include -#include -#include -#endif /* USE_VIRTIO */ - -#include -#include -#include -#include -#include -#include -#include +#include + +#include "common/xsc_macro.h" +#include "common/xsc_cmd.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_auto_hw.h" +#include "common/driver.h" +#include "common/xsc_reg.h" +#include "common/xsc_eswitch.h" extern uint xsc_debug_mask; +extern unsigned int xsc_log_level; #ifndef mmiowb #define mmiowb() #endif +#define XSC_PCI_VENDOR_ID_OBSOLETE 0x1172 +#define XSC_PCI_VENDOR_ID 0x1f67 +#define XSC_PF1_DEVICE_ID_OBSOLETE 0x0001 +#define XSC_PF1_VF_DEVICE_ID_OBSOLETE 0x0002 + +#define XSC_MC_PF_DEV_ID 0x1011 +#define XSC_MC_VF_DEV_ID 0x1012 + +#define XSC_MF_HOST_PF_DEV_ID 0x1051 +#define XSC_MF_HOST_VF_DEV_ID 0x1052 +#define XSC_MF_SOC_PF_DEV_ID 0x1053 + +#define XSC_MS_PF_DEV_ID 0x1111 +#define XSC_MS_VF_DEV_ID 0x1112 + +#define XSC_MV_HOST_PF_DEV_ID 0x1151 +#define XSC_MV_HOST_VF_DEV_ID 0x1152 +#define XSC_MV_SOC_PF_DEV_ID 0x1153 + #define REG_ADDR(dev, offset) \ (xsc_core_is_pf(dev) ? ((dev->bar2) + ((offset) - 0xA0000000)) : ((dev->bar2) + (offset))) @@ -55,10 +69,27 @@ extern uint xsc_debug_mask; #define QPM_PAM_TBL_INDEX_SHIFT 2 #define QPM_PAM_PAGE_SHIFT 12 +enum { + XSC_LOG_LEVEL_DBG = 0, + XSC_LOG_LEVEL_INFO = 1, + XSC_LOG_LEVEL_WARN = 2, + XSC_LOG_LEVEL_ERR = 3, +}; + +#ifndef dev_fmt +#define dev_fmt(fmt) fmt +#endif + +#define xsc_dev_log(condition, level, dev, fmt, ...) \ +do { \ + if (condition) \ + dev_printk(level, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +} while (0) + #define xsc_core_dbg(__dev, format, ...) \ - dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ - __func__, __LINE__, current->pid, \ - ##__VA_ARGS__) + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_DBG, KERN_DEBUG, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) #define xsc_core_dbg_once(__dev, format, ...) \ dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ @@ -72,9 +103,9 @@ do { \ } while (0) #define xsc_core_err(__dev, format, ...) \ - dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ - __func__, __LINE__, current->pid, \ - ##__VA_ARGS__) + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_ERR, KERN_ERR, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) #define xsc_core_err_rl(__dev, format, ...) \ dev_err_ratelimited(&(__dev)->pdev->dev, \ @@ -83,12 +114,20 @@ do { \ ##__VA_ARGS__) #define xsc_core_warn(__dev, format, ...) \ - dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ - __func__, __LINE__, current->pid, \ - ##__VA_ARGS__) + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_WARN, KERN_WARNING, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) #define xsc_core_info(__dev, format, ...) \ - dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_INFO, KERN_INFO, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_pr_debug(format, ...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug(format, ##__VA_ARGS__); \ +} while (0) #define assert(__dev, expr) \ do { \ @@ -101,6 +140,12 @@ do { \ #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) +#define XSC_PCIE_NO_HOST 0x0 +#define XSC_PCIE_NO_SOC 0x1 +#define XSC_PCIE_NO_UNSET 0xFF + +extern u8 g_xsc_pcie_no; + enum xsc_dev_event { XSC_DEV_EVENT_SYS_ERROR, XSC_DEV_EVENT_PORT_UP, @@ -151,7 +196,7 @@ static DEFINE_MUTEX(xsc_intf_mutex); #define GROUP_REFER_CNT_SIZE 1024 struct qp_group_refer { - spinlock_t lock; + spinlock_t lock; /* protect refer_cnt[] */ u16 refer_cnt[GROUP_REFER_CNT_SIZE]; }; @@ -161,12 +206,14 @@ struct xsc_priv_device { dev_t devno; struct cdev cdev; struct list_head mem_list; - spinlock_t mem_lock; + spinlock_t mem_lock; /* protect mem_list */ + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ }; struct xsc_dpdk_mem { struct list_head mem_list; - spinlock_t mem_lock; + spinlock_t mem_lock; /* protect mem_list */ }; enum xsc_pci_status { @@ -195,8 +242,10 @@ enum { XSC_INTERFACE_ATTACHED, }; +#ifndef COSIM #define CONFIG_XSC_SRIOV 1 -#define CONFIG_XSC_ESWITCH 1 +#endif +//#define CONFIG_XSC_ESWITCH 1 enum xsc_coredev_type { XSC_COREDEV_PF, @@ -220,12 +269,6 @@ enum { XSC_CAP_PORT_TYPE_ETH = 0x1, }; -enum xsc_list_type { - XSC_NVPRT_LIST_TYPE_UC = 0x0, - XSC_NVPRT_LIST_TYPE_MC = 0x1, - XSC_NVPRT_LIST_TYPE_VLAN = 0x2, -}; - enum xsc_inline_modes { XSC_INLINE_MODE_NONE, XSC_INLINE_MODE_L2, @@ -268,7 +311,7 @@ struct xsc_core_sriov { int num_vfs; u16 max_vfs; u16 vf_bdf_base; - bool probe_vf; + u8 probe_vf; struct xsc_vf_context *vfs_ctx; struct kobject *config; struct kobject *groups_config; @@ -306,9 +349,9 @@ struct xsc_vport_info { int link_state; u32 min_rate; u32 max_rate; - bool spoofchk; - bool trusted; - bool roce; + u8 spoofchk; + u8 trusted; + u8 roce; /* the admin approved vlan list */ DECLARE_BITMAP(vlan_trunk_8021q_bitmap, VLAN_N_VID); u32 group; @@ -342,7 +385,7 @@ struct xsc_vport { struct xsc_vport_info info; struct { - bool enabled; + u8 enabled; u32 esw_tsar_ix; u32 bw_share; u32 min_rate; @@ -350,7 +393,7 @@ struct xsc_vport { // struct xsc_vgroup *group; } qos; - bool enabled; + u8 enabled; enum xsc_eswitch_vport_event enabled_events; u16 match_id; u32 bond_metadata; @@ -381,14 +424,14 @@ struct xsc_eswitch { }; struct xsc_core_health { - bool sick; + u8 sick; }; struct xsc_priv { char name[XSC_MAX_NAME_LEN]; struct list_head dev_list; struct list_head ctx_list; - spinlock_t ctx_lock; + spinlock_t ctx_lock; /* protect ctx_list */ int numa_node; struct xsc_core_sriov sriov; struct xsc_eswitch *eswitch; @@ -401,6 +444,23 @@ struct xsc_port_ctrl { dev_t devid; struct cdev cdev; struct device *device; + struct list_head file_list; + spinlock_t file_lock; /* protect file_list */ +}; + +struct xsc_port_ctrl_file { + struct list_head file_node; + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ + struct xsc_bdf_file *root_bdf; + struct xsc_port_ctrl *ctrl; +}; + +struct xsc_bdf_file { + unsigned long key; + struct radix_tree_root obj_tree; /* protect obj_tree */ + spinlock_t obj_lock; + struct xsc_core_device *xdev; }; struct xsc_port_caps { @@ -487,6 +547,10 @@ struct xsc_caps { u16 raweth_qp_id_end; u32 qp_rate_limit_min; u32 qp_rate_limit_max; + u32 hw_feature_flag; + u16 funcid[8]; + u16 funcid_valid; + u8 nif_port_num; }; struct cache_ent { @@ -576,8 +640,6 @@ struct xsc_rsp_layout { struct xsc_cmd_work_ent { struct xsc_cmd_msg *in; struct xsc_rsp_msg *out; - xsc_cmd_cbk_t callback; - void *context; int idx; struct completion done; struct xsc_cmd *cmd; @@ -585,7 +647,6 @@ struct xsc_cmd_work_ent { struct xsc_cmd_layout *lay; struct xsc_rsp_layout *rsp_lay; int ret; - int page_queue; u8 status; u8 token; struct timespec64 ts1; @@ -647,21 +708,15 @@ struct xsc_cmd { int events; u32 __iomem *vector; - /* protect command queue allocations - */ - spinlock_t alloc_lock; - - /* protect token allocations - */ - spinlock_t token_lock; - spinlock_t doorbell_lock; + spinlock_t alloc_lock; /* protect command queue allocations */ + spinlock_t token_lock; /* protect token allocations */ + spinlock_t doorbell_lock; /* protect cmdq req pid doorbell */ u8 token; unsigned long bitmask; char wq_name[XSC_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; struct task_struct *cq_task; struct semaphore sem; - struct semaphore pages_sem; int mode; struct xsc_cmd_work_ent *ent_arr[XSC_MAX_COMMANDS]; struct pci_pool *pool; @@ -673,18 +728,8 @@ struct xsc_cmd { u8 ownerbit_learned; }; -struct xsc_profile { - u64 mask; - u32 log_max_qp; - int cmdif_csum; - struct { - int size; - int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; -}; - struct xsc_lock { - spinlock_t lock; + spinlock_t lock; /* xsc spin lock */ }; struct xsc_reg_addr { @@ -709,25 +754,19 @@ struct xsc_core_device { struct xsc_priv priv; struct xsc_port_ctrl port_ctrl; struct xsc_dev_resource *dev_res; -#ifdef USE_VIRTIO - struct device virtio_dev; -#endif /* USE_VIRTIO */ void *xsc_ib_dev; void *netdev; void *eth_priv; void *ovs_priv; -#ifdef USE_VIRTIO - void __iomem *bar0; -#endif /* USE_VIRTIO */ void __iomem *bar2; int bar_num; - u16 bus_id; - u16 dev_id; + u16 bus_num; + u16 dev_num; u16 func_id; + u16 device_id; u8 pf; - u8 pcie; u8 mac_port; /* mac physic port */ u8 pcie_port; /* pcie physic port */ u8 pf_id; @@ -739,29 +778,27 @@ struct xsc_core_device { u16 gsi_qpn; /* logic qpn for gsi*/ u16 bomt_idx; - bool vf_pp_init; u16 msix_vec_base; - struct mutex pci_status_mutex; + struct mutex pci_status_mutex; /* protect pci_status */ enum xsc_pci_status pci_status; u32 board_id; char board_sn[XSC_BOARD_SN_LEN]; - struct mutex intf_state_mutex; + struct mutex intf_state_mutex; /* protect intf_state */ unsigned long intf_state; enum xsc_coredev_type coredev_type; struct xsc_caps caps; atomic_t num_qps; struct xsc_cmd cmd; struct xsc_lock reg_access_lock; - bool rdma_ready; - struct xsc_profile *profile; + u8 rdma_ready; void *counters_priv; struct xsc_priv_device priv_device; struct xsc_dpdk_mem dpdk_mem; void (*event)(struct xsc_core_device *dev, - enum xsc_dev_event event, unsigned long param); + enum xsc_dev_event event, unsigned long param); void (*event_handler)(void *adapter); @@ -775,6 +812,8 @@ struct xsc_core_device { cpumask_var_t xps_cpumask; void *rtt_priv; + void *ap_priv; + void *pcie_lat; }; struct xsc_feature_flag { @@ -797,7 +836,7 @@ struct xsc_interface { int (*attach)(struct xsc_core_device *dev, void *context); void (*detach)(struct xsc_core_device *dev, void *context); void (*event)(struct xsc_core_device *dev, void *context, - enum xsc_dev_event event, unsigned long param); + enum xsc_dev_event event, unsigned long param); void *(*get_dev)(void *context); }; @@ -828,16 +867,16 @@ int xsc_register_interface(struct xsc_interface *intf); void xsc_unregister_interface(struct xsc_interface *intf); void xsc_reload_interface(struct xsc_core_device *dev, int protocol); void xsc_reload_interfaces(struct xsc_core_device *dev, - int protocol1, int protocol2, - bool valid1, bool valid2); + int protocol1, int protocol2, + bool valid1, bool valid2); struct xsc_core_device *xsc_get_next_phys_dev(struct xsc_core_device *dev); void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol); void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol); int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, - int out_size, int func_id); + int out_size, int func_id); int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, - void *out, int out_size); + void *out, int out_size); int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out); int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out); int xsc_reg_mr(struct xsc_core_device *dev, void *in, void *out); @@ -857,22 +896,30 @@ int xsc_create_res(struct xsc_core_device *dev); void xsc_destroy_res(struct xsc_core_device *dev); int xsc_counters_init(struct ib_device *ib_dev, - struct xsc_core_device *dev); + struct xsc_core_device *dev); void xsc_counters_fini(struct ib_device *ib_dev, - struct xsc_core_device *dev); + struct xsc_core_device *dev); int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev); void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev); +int xsc_eth_sysfs_create(struct net_device *netdev, struct xsc_core_device *dev); +void xsc_eth_sysfs_remove(struct net_device *netdev, struct xsc_core_device *dev); int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev); +int xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev); + +#ifdef RUN_WITH_PSV +int xsc_cmd_query_psv_funcid(struct xsc_core_device *dev, + struct xsc_caps *caps); +#endif int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, - struct xsc_caps *caps); -int xsc_cmd_init_hca(struct xsc_core_device *dev); -int xsc_cmd_teardown_hca(struct xsc_core_device *dev); -int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_idx); -int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_idx); + struct xsc_caps *caps); +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix); +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num); +int xsc_cmd_modify_hca(struct xsc_core_device *dev); int xsc_get_board_id(struct xsc_core_device *dev); int xsc_irq_eq_create(struct xsc_core_device *dev); @@ -888,20 +935,20 @@ void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev); int xsc_create_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); void xsc_destroy_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, - u32 group_id, struct kobject *group_kobj); + u32 group_id, struct kobject *group_kobj); void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, - struct kobject *group_kobj); + struct kobject *group_kobj); void xsc_pci_get_vf_info(struct xsc_core_device *dev, - struct xsc_vf_info *info); -int xsc_get_pcie_no(void); + struct xsc_vf_info *info); u32 xsc_eth_pcie_read32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, - u32 eth_ip_inter_addr); + u32 eth_ip_inter_addr); void xsc_eth_pcie_write32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, - u32 eth_ip_inter_addr, u32 val); + u32 eth_ip_inter_addr, u32 val); struct cpumask *xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector); void mask_cpu_by_node(int node, struct cpumask *dstp); +int xsc_get_link_speed(struct xsc_core_device *dev); -#define XSC_ESWITCH_MANAGER(dev) (dev->caps.eswitch_manager) +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) static inline bool xsc_sriov_is_enabled(struct xsc_core_device *dev) { @@ -933,7 +980,7 @@ static inline bool xsc_core_is_ecpf(struct xsc_core_device *dev) return dev->caps.embedded_cpu; } -#define XSC_ESWITCH_MANAGER(dev) (dev->caps.eswitch_manager) +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) #define ESW_ALLOWED(esw) ((esw) && XSC_ESWITCH_MANAGER((esw)->dev)) static inline bool @@ -995,6 +1042,7 @@ static inline void acquire_ia_lock(struct xsc_core_device *xdev, int *iae_idx) else *iae_idx = -1; } + #define ACQUIRE_IA_LOCK(bp, iae_idx) \ do { \ int idx; \ @@ -1068,7 +1116,7 @@ static inline void wait_for_complete(struct xsc_core_device *xdev, int iae_idx) } static inline void ia_write_reg_mr(struct xsc_core_device *xdev, u32 reg, - u32 *ptr, int n, int idx) + u32 *ptr, int n, int idx) { ia_write_data(xdev, ptr, n, idx); ia_write_reg_addr(xdev, reg, idx); @@ -1129,4 +1177,583 @@ static inline void reg_write32(struct xsc_core_device *dev, u32 offset, u32 val) #define REG_RD32(dev, offset) reg_read32(dev, offset) #define REG_WR32(dev, offset, val) reg_write32(dev, offset, val) +static inline unsigned long bdf_to_key(unsigned int domain, unsigned int bus, unsigned int devfn) +{ + return ((unsigned long)domain << 32) | ((bus & 0xff) << 16) | (devfn & 0xff); +} + +enum xsc_port_type_encode { + XSC_PHY_PORT_MAC_0 = 0x0, + XSC_PHY_PORT_MAC_1 = 0x1, + XSC_PHY_PORT_MAC_2 = 0x2, + XSC_PHY_PORT_MAC_3 = 0x3, + XSC_PHY_PORT_MAC_4 = 0x4, + XSC_PHY_PORT_MAC_5 = 0x5, + XSC_PHY_PORT_MAC_6 = 0x6, + XSC_PHY_PORT_MAC_7 = 0x7, + XSC_PHY_PORT_PCIE_0 = 0x8, + XSC_PHY_PORT_PCIE_1 = 0x9, + + XSC_LAG_PORT_START = 15, + XSC_LAG_PORT_END = 62, + + XSC_PORT_FUNC_ID_START = 63, + XSC_PORT_FUNC_ID_END = 1214, +}; + +#define XSC_PHY_PORT_MAC_NUM 8 + +#define XSC_PHY_PORT_MAC_N(mac_id) \ + (XSC_PHY_PORT_MAC_0 + (mac_id)) +#define XSC_PHY_PORT_PCIE_N(pcie_id) \ + (XSC_PHY_PORT_PCIE_0 + (pcie_id)) +#define XSC_PHY_PORT_TO_PCIE0_PF_ID(pcie_port) \ + ((pcie_port) - XSC_PHY_PORT_PCIE_0) +#define XSC_PHY_PORT_TO_PCIE1_PF_ID(pcie_port) \ + ((pcie_port) - XSC_PHY_PORT_PCIE_1 - 1) + +#define U16_DIV2(a) ((u16)((u16)(a) >> 1)) + +#define U16_TOTAL(start, end) ((u16)(1 + (end) - (start))) + +#define U16_HALF(start, end) (U16_DIV2(U16_TOTAL(start, end))) + +//use single unsigned integer overflow reduces instructions and branches +//notice: 'min' and 'max' cannot be a function or statement, to avoid possible side-effect +#define U16_JUDGE_RANGE(min, max, x) \ + (((min) < (max)) && ((u16)((x) - (min)) <= (u16)((max) - (min)))) + +//notice: 'min' and 'max' cannot be a function or statement, to avoid possible side-effect +#define U16_JUDGE_RANGE_BOTTOM_HALF(min, max, x) \ + (((min) < (max)) && \ + ((u16)((x) - (min)) <= U16_DIV2((u16)((max) - (min))))) + +//notice: 'min' and 'max' cannot be a function or statement, to avoid possible side-effect +#define U16_JUDGE_RANGE_TOP_HALF(min, max, x) \ + (((min) < (max)) && \ + ((u16)((x) - U16_HALF((min), (max))) <= U16_DIV2((u16)((max) - (min))))) + +//notice: 'b' cannot be a function or statement, to avoid possible side-effect +#define U16_THREE_EQUAL(a, b, c) \ + (((a) == (b)) && ((b) == (c))) + +static inline bool check_caps_funcid_valid(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + return true; +} + +/* Comment... + * accordence to xsc_core.h funcid[n] order must be: + * 0: pcie0_vf_begin + * 1: pcie0_vf_end + * 2: pcie0_pf_begin + * 3: pcie0_pf_end + * 4: pcie1_vf_begin + * 5: pcie1_vf_end + * 6: pcie1_pf_begin + * 7: pcie1_pf_end + */ +static inline u16 get_pcie0_vf_begin(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[0]; +} + +static inline u16 get_pcie0_vf_end(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[1]; +} + +static inline u16 get_pcie0_pf_begin(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[2]; +} + +static inline u16 get_pcie0_pf_end(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[3]; +} + +static inline u16 get_pcie1_vf_begin(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[4]; +} + +static inline u16 get_pcie1_vf_end(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[5]; +} + +static inline u16 get_pcie1_pf_begin(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[6]; +} + +static inline u16 get_pcie1_pf_end(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return caps->funcid[7]; +} + +static inline u16 get_xsc_funcid_end(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return (caps->funcid[7] + 1); +} + +static inline u16 get_pcie0_vf_num(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return U16_TOTAL(caps->funcid[0], caps->funcid[1]); +} + +static inline u16 get_pcie0_pf_num(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return U16_TOTAL(caps->funcid[2], caps->funcid[3]); +} + +static inline u16 get_pcie1_vf_num(struct xsc_caps *caps) +{ + return 0;//pcie1 has no vf +} + +static inline u16 get_pcie1_pf_num(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + return U16_TOTAL(caps->funcid[6], caps->funcid[7]); +} + +static inline u16 get_pcie0_pf0_vf_num(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + if (U16_TOTAL(caps->funcid[2], caps->funcid[3]) == 1) + return U16_TOTAL(caps->funcid[0], caps->funcid[1]); + else + return U16_HALF(caps->funcid[0], caps->funcid[1]); +} + +static inline u16 get_pcie0_pf1_vf_num(struct xsc_caps *caps) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + if (U16_TOTAL(caps->funcid[2], caps->funcid[3]) == 1) + return 0; + else + return U16_HALF(caps->funcid[0], caps->funcid[1]); +} + +static inline u16 get_pcie1_pf0_vf_num(struct xsc_caps *caps) +{ + return 0;//pcie1 has no vf +} + +static inline u16 get_pcie1_pf1_vf_num(struct xsc_caps *caps) +{ + return 0;//pcie1 has no vf +} + +static inline u16 +vf_index_to_pcie0_funcid(struct xsc_caps *caps, u16 vf_index, u16 belong_pf) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + //notice: not check vf_index out of range + return (belong_pf == 0) + ? (vf_index + caps->funcid[0]) + : (vf_index + U16_HALF(caps->funcid[0], caps->funcid[1])); +} + +static inline u16 +vf_index_to_pcie1_funcid(struct xsc_caps *caps, u16 vf_index, u16 belong_pf) +{ + return 0;//pcie1 has no vf +} + +static inline u16 +pf_index_to_pcie0_funcid(struct xsc_caps *caps, u16 pf_index) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + //notice: not check pf_index out of range + return pf_index + caps->funcid[2]; +} + +static inline u16 +pf_index_to_pcie1_funcid(struct xsc_caps *caps, u16 pf_index) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + //notice: not check pf_index out of range + return pf_index + caps->funcid[6]; +} + +static inline u16 +vf_index_to_pcie0_xscport(struct xsc_caps *caps, u16 vf_index, u16 belong_pf) +{ + //notice: not check vf_index out of range + return (belong_pf == 0) + ? (XSC_PORT_FUNC_ID_START + vf_index + caps->funcid[0]) + : (XSC_PORT_FUNC_ID_START + vf_index + + U16_HALF(caps->funcid[0], caps->funcid[1])); +} + +static inline u16 +vf_index_to_pcie1_xscport(struct xsc_caps *caps, u16 vf_index, u16 belong_pf) +{ + return 0;//pcie1 has no vf +} + +static inline u16 +pf_index_to_pcie0_xscport(struct xsc_caps *caps, u16 pf_index) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + //notice: not check pf_index out of range + return XSC_PORT_FUNC_ID_START + pf_index + caps->funcid[2]; +} + +static inline u16 +pf_index_to_pcie1_xscport(struct xsc_caps *caps, u16 pf_index) +{ + if (!caps || caps->funcid_valid == 0) + return 0; + //notice: not check pf_index out of range + return XSC_PORT_FUNC_ID_START + pf_index + caps->funcid[6]; +} + +static inline bool +check_is_vf(struct xsc_caps *caps, u16 func_id) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + if (U16_JUDGE_RANGE(caps->funcid[0], caps->funcid[1], func_id)) + return true; + + if (U16_THREE_EQUAL(caps->funcid[0], caps->funcid[1], func_id)) + return true; + //pcie1 has no vf + + return false; +} + +static inline bool +check_is_pf(struct xsc_caps *caps, u16 func_id) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + if (U16_JUDGE_RANGE(caps->funcid[2], caps->funcid[3], func_id)) + return true; + if (U16_JUDGE_RANGE(caps->funcid[6], caps->funcid[7], func_id)) + return true; + + if (U16_THREE_EQUAL(caps->funcid[2], caps->funcid[3], func_id)) + return true; + if (U16_THREE_EQUAL(caps->funcid[6], caps->funcid[7], func_id)) + return true; + + return false; +} + +static inline bool +check_is_pcie0_pf(struct xsc_caps *caps, u16 func_id) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + if (U16_JUDGE_RANGE(caps->funcid[2], caps->funcid[3], func_id)) + return true; + + if (U16_THREE_EQUAL(caps->funcid[2], caps->funcid[3], func_id)) + return true; + + return false; +} + +static inline bool +check_is_pcie1_pf(struct xsc_caps *caps, u16 func_id) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + if (U16_JUDGE_RANGE(caps->funcid[6], caps->funcid[7], func_id)) + return true; + + if (U16_THREE_EQUAL(caps->funcid[6], caps->funcid[7], func_id)) + return true; + + return false; +} + +static inline bool +check_is_pcie0_vf(struct xsc_caps *caps, u16 func_id) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + if (U16_JUDGE_RANGE(caps->funcid[0], caps->funcid[1], func_id)) + return true; + + if (U16_THREE_EQUAL(caps->funcid[0], caps->funcid[1], func_id)) + return true; + + return false; +} + +static inline bool +check_is_pcie1_vf(struct xsc_caps *caps, u16 func_id) +{ + return false;//pcie1 has no vf +} + +static inline bool +check_is_pcie0_pf0_vf(struct xsc_caps *caps, u16 func_id) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + if (U16_JUDGE_RANGE_BOTTOM_HALF(caps->funcid[0], caps->funcid[1], func_id)) + return true; + + if (U16_TOTAL(caps->funcid[2], caps->funcid[3]) == 1) { + if (U16_JUDGE_RANGE(caps->funcid[0], caps->funcid[1], func_id)) + return true; + } + + return false; +} + +static inline bool +check_is_pcie0_pf1_vf(struct xsc_caps *caps, u16 func_id) +{ + if (!caps || caps->funcid_valid == 0) + return false; + + if (U16_TOTAL(caps->funcid[2], caps->funcid[3]) == 2) { + if (U16_JUDGE_RANGE_TOP_HALF(caps->funcid[0], caps->funcid[1], func_id)) + return true; + } + + return false; +} + +static inline bool +pf_funcid_to_pf_index(struct xsc_caps *caps, u16 func_id, u8 *pf_id) +{ + if (!caps || caps->funcid_valid == 0 || !pf_id) + return false; + + *pf_id = 0xff; + if (U16_THREE_EQUAL(caps->funcid[2], caps->funcid[3], func_id)) { + *pf_id = 0; + return true; + } + if (U16_THREE_EQUAL(caps->funcid[6], caps->funcid[7], func_id)) { + *pf_id = 0; + return true; + } + + if (U16_JUDGE_RANGE(caps->funcid[2], caps->funcid[3], func_id)) + *pf_id = func_id - caps->funcid[2]; + else if (U16_JUDGE_RANGE(caps->funcid[6], caps->funcid[7], func_id)) + *pf_id = func_id - caps->funcid[6]; + else + return false; + + return true; +} + +static inline bool +vf_funcid_to_vf_index(struct xsc_caps *caps, u16 func_id, u16 *vf_id) +{ + if (!caps || caps->funcid_valid == 0 || !vf_id) + return false; + + *vf_id = 0xffff; + //vf_num = 1 is impossible + if (U16_TOTAL(caps->funcid[2], caps->funcid[3]) == 1) { + if (U16_JUDGE_RANGE(caps->funcid[0], caps->funcid[1], func_id)) { + *vf_id = func_id - caps->funcid[0]; + return true; + } + } + //pcie1 has no vf + if (U16_JUDGE_RANGE_BOTTOM_HALF(caps->funcid[0], caps->funcid[1], func_id)) + *vf_id = func_id - caps->funcid[0]; + else if (U16_JUDGE_RANGE_TOP_HALF(caps->funcid[0], caps->funcid[1], func_id)) + *vf_id = func_id - U16_HALF(caps->funcid[0], caps->funcid[1]); + else + return false; + + return true; +} + +static inline bool +funcid_to_pf_index(struct xsc_caps *caps, u16 func_id, u8 *pf_id) +{ + if (!caps || caps->funcid_valid == 0 || !pf_id) + return false; + + *pf_id = 0xff; + //vf_num = 1 is impossible + if (U16_THREE_EQUAL(caps->funcid[2], caps->funcid[3], func_id)) { + *pf_id = 0; + return true; + } + if (U16_THREE_EQUAL(caps->funcid[6], caps->funcid[7], func_id)) { + *pf_id = 0; + return true; + } + + if (U16_TOTAL(caps->funcid[2], caps->funcid[3]) == 1) { + if (U16_JUDGE_RANGE(caps->funcid[0], caps->funcid[1], func_id)) { + *pf_id = 0; + return true; + } + } + //pcie1 has no vf + if (U16_JUDGE_RANGE(caps->funcid[2], caps->funcid[3], func_id)) + *pf_id = func_id - caps->funcid[2]; + else if (U16_JUDGE_RANGE(caps->funcid[6], caps->funcid[7], func_id)) + *pf_id = func_id - caps->funcid[6]; + else if (U16_JUDGE_RANGE_BOTTOM_HALF(caps->funcid[0], caps->funcid[1], func_id)) + *pf_id = 0; + else if (U16_JUDGE_RANGE_TOP_HALF(caps->funcid[0], caps->funcid[1], func_id)) + *pf_id = 1; + else + return false; + + return true; +} + +static inline bool +funcid_to_pcie_no(struct xsc_caps *caps, u16 func_id, u8 *pcie_no) +{ + if (!caps || caps->funcid_valid == 0 || !pcie_no) + return false; + + *pcie_no = 0xff; + //vf_num = 1 is impossible + if (U16_THREE_EQUAL(caps->funcid[2], caps->funcid[3], func_id)) { + *pcie_no = 0; + return true; + } + if (U16_THREE_EQUAL(caps->funcid[6], caps->funcid[7], func_id)) { + *pcie_no = 1; + return true; + } + //pcie1 has no vf + if (U16_JUDGE_RANGE(caps->funcid[0], caps->funcid[1], func_id)) + *pcie_no = 0; + else if (U16_JUDGE_RANGE(caps->funcid[2], caps->funcid[3], func_id)) + *pcie_no = 0; + else if (U16_JUDGE_RANGE(caps->funcid[6], caps->funcid[7], func_id)) + *pcie_no = 1; + else + return false; + + return true; +} + +static inline bool +funcid_to_pf_vf_index(struct xsc_caps *caps, u16 func_id, + u8 *is_pf, u8 *pf_id, u8 *pcie_no, u16 *vf_id) +{ + if (!caps || caps->funcid_valid == 0 || + !is_pf || !pf_id || !pcie_no || !vf_id) + return false; + + *is_pf = 0xff; + *pf_id = 0xff; + *pcie_no = 0xff; + *vf_id = 0xffff; + //vf_num = 1 is impossible + if (U16_THREE_EQUAL(caps->funcid[2], caps->funcid[3], func_id)) { + *is_pf = 1; + *pf_id = 0; + *pcie_no = 0; + *vf_id = 0xffff; + return true; + } + if (U16_THREE_EQUAL(caps->funcid[6], caps->funcid[7], func_id)) { + *is_pf = 1; + *pf_id = 1; + *pcie_no = 1; + *vf_id = 0xffff; + return true; + } + + if (U16_TOTAL(caps->funcid[2], caps->funcid[3]) == 1) { + if (U16_JUDGE_RANGE(caps->funcid[0], caps->funcid[1], func_id)) { + *is_pf = 0; + *pf_id = 0; + *pcie_no = 0; + *vf_id = func_id - caps->funcid[0]; + return true; + } + } + //pcie1 has no vf + if (U16_JUDGE_RANGE(caps->funcid[2], caps->funcid[3], func_id)) { + *is_pf = 1; + *pf_id = func_id - caps->funcid[2]; + *pcie_no = 0; + *vf_id = 0xffff; + } else if (U16_JUDGE_RANGE(caps->funcid[6], caps->funcid[7], func_id)) { + *is_pf = 1; + *pf_id = func_id - caps->funcid[6]; + *pcie_no = 1; + *vf_id = 0xffff; + } else if (U16_JUDGE_RANGE_BOTTOM_HALF(caps->funcid[0], caps->funcid[1], func_id)) { + *is_pf = 0; + *pf_id = 0; + *vf_id = func_id - caps->funcid[0]; + *pcie_no = 0; + } else if (U16_JUDGE_RANGE_TOP_HALF(caps->funcid[0], caps->funcid[1], func_id)) { + *is_pf = 0; + *pf_id = 1; + *vf_id = func_id - U16_HALF(caps->funcid[0], caps->funcid[1]); + *pcie_no = 0; + } else { + return false; + } + return true; +} + +static inline bool +is_support_rdma(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag | XSC_HW_RDMA_SUPPORT) + return true; + + return false; +} + #endif /* XSC_CORE_H */ + diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h index f2ab499e6f4a0429e7551e5407a8b53b51d236d7..a9addcd55c0a698978d364820e6ca18b423c747c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h new file mode 100644 index 0000000000000000000000000000000000000000..ef037ce3bf012d574ed383ad9faffcf14471922e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FS_H +#define XSC_FS_H + +#include +#include + +enum xsc_list_type { + XSC_NVPRT_LIST_TYPE_UC = 0x0, + XSC_NVPRT_LIST_TYPE_MC = 0x1, + XSC_NVPRT_LIST_TYPE_VLAN = 0x2, +}; + +int xsc_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +int xsc_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h index d2ab84a0c0e67d22b35d0852e324972d1f83e14c..df1ce571c82bdae616d96b2cdeabc1a708856620 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -11,76 +10,20 @@ #include #include -#include -#include -#include -#include -#include - -#define MSIX_SUPPORT -#define XSC_BQL_SUPPORT -#define XSC_RSS_SUPPORT +#include "common/xsc_macro.h" #ifdef MSIX_SUPPORT #else #define NEED_CREATE_RX_THREAD #endif -/*define product macro*/ -#if ((CHIP_VERSION_L == 0xa) || \ - (CHIP_VERSION_L == 0xa10)) -#define XSC_CHIP_ANDES -#elif ((CHIP_VERSION_L == 0xb) || \ - (CHIP_VERSION_L == 0xb20) || \ - (CHIP_VERSION_L == 0xb11) || \ - (CHIP_VERSION_L == 0xb13)) -#define XSC_CHIP_BERYL -#if (CHIP_VERSION_L == 0xb20) -#define XSC_CHIP_BERYL_100 -#endif -#if (CHIP_VERSION_L == 0xb11) -#define XSC_CHIP_BERYL_50 -#endif -#if (CHIP_VERSION_L == 0xb13) -#define XSC_CHIP_BERYL_50S -#endif -#if (CHIP_VERSION_L == 0xb12) -#define XSC_CHIP_BERYL_50R -#endif -#elif ((CHIP_VERSION_L == 0xc) || \ - (CHIP_VERSION_L == 0xc11) || \ - (CHIP_VERSION_L == 0xc12) || \ - (CHIP_VERSION_L == 0xc13)) -#define XSC_CHIP_CRYSTAL -#if (CHIP_VERSION_L == 0xc13) -#define XSC_CHIP_CRYSTAL_A -#endif -#if (CHIP_VERSION_L == 0xc12) -#define XSC_CHIP_CRYSTAL_B -#endif -#if (CHIP_VERSION_L == 0xc11) -#define XSC_CHIP_CRYSTAL_C -#endif -#endif - -#if (CHIP_HOTFIX_NUM > 0x22) -#if ((CHIP_VERSION_L == 0xb13) || \ - (CHIP_VERSION_L == 0xc11) || \ - (CHIP_VERSION_L == 0xc12) || \ - (CHIP_VERSION_L == 0xc13)) -#define XSC_CHIP_RDMA_UNSUPPORTED 1 -#endif -#endif - #ifndef RUN_WITH_PSV -#if (CHIP_HOTFIX_NUM >= 0x23) #define XSC_MSIX_BAR_EMUL #endif -#endif #define PAGE_SHIFT_4K 12 #define PAGE_SIZE_4K (_AC(1, UL) << PAGE_SHIFT_4K) -#define PAGE_MASK_4K (~(PAGE_SIZE_4K-1)) +#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) #ifndef EQ_NUM_MAX #define EQ_NUM_MAX 1024 @@ -89,16 +32,6 @@ #define EQ_SIZE_MAX 1024 #endif -#if defined XSC_CHIP_ANDES -#define XSC_RSS_INDIR_TBL_N 1032 /*8PFs+1024VFs*/ -#elif defined XSC_CHIP_BERYL -#define XSC_RSS_INDIR_TBL_N 514 /*2PFs+512VFs*/ -#elif defined XSC_CHIP_BERYL_100 -#define XSC_RSS_INDIR_TBL_N 514 /*2PFs+512VFs*/ -#elif defined XSC_CHIP_CRYSTAL -#define XSC_RSS_INDIR_TBL_N 514 /*2PFs+512VFs*/ -#endif - #define XSC_RSS_INDIR_TBL_S 256 #define XSC_MAX_TSO_PAYLOAD 0x10000/*64kb*/ @@ -133,6 +66,7 @@ #define XSC_MAX_RX_FRAGS 4 #define XSC_RX_FRAG_SZ_ORDER 0 #define XSC_RX_FRAG_SZ (PAGE_SIZE << XSC_RX_FRAG_SZ_ORDER) +#define DEFAULT_FRAG_SIZE (2048) /* message opcode */ enum { @@ -214,6 +148,7 @@ enum { XSC_QUEUE_TYPE_VIRTIO_BLK = 4, XSC_QUEUE_TYPE_RAW_TPE = 5, XSC_QUEUE_TYPE_RAW_TSO = 6, + XSC_QUEUE_TYPE_RAW_TX = 7, XSC_QUEUE_TYPE_INVALID = 0xFF, }; @@ -241,6 +176,8 @@ enum { XSC_BASE_WQE_SHIFT = 4, XSC_SEND_SEG_NUM = 4, XSC_SEND_WQE_SHIFT = 6, + XSC_CTRL_SEG_NUM = 1, + XSC_RADDR_SEG_NUM = 1, }; enum { @@ -254,8 +191,8 @@ enum { }; /* Descriptors that are allocated by SW and accessed by HW, 32-byte aligned + * this is to keep descriptor structures packed */ -/* this is to keep descriptor structures packed */ struct regpair { __le32 lo; __le32 hi; @@ -415,8 +352,8 @@ enum xsc_tbm_pct_inport { /*for beryl tcam table .end*/ /* Size of WQE */ -#define XSC_SEND_WQE_SIZE (1 << XSC_SEND_WQE_SHIFT) -#define XSC_RECV_WQE_SIZE (1 << XSC_RECV_WQE_SHIFT) +#define XSC_SEND_WQE_SIZE BIT(XSC_SEND_WQE_SHIFT) +#define XSC_RECV_WQE_SIZE BIT(XSC_RECV_WQE_SHIFT) union xsc_db_data { struct { @@ -438,216 +375,10 @@ union xsc_db_data { __le32 raw_data; }; -enum xsc_port_type_encode { - XSC_PHY_PORT_MAC_0 = 0x0, - XSC_PHY_PORT_MAC_1 = 0x1, - XSC_PHY_PORT_MAC_2 = 0x2, - XSC_PHY_PORT_MAC_3 = 0x3, - XSC_PHY_PORT_MAC_4 = 0x4, - XSC_PHY_PORT_MAC_5 = 0x5, - XSC_PHY_PORT_MAC_6 = 0x6, - XSC_PHY_PORT_MAC_7 = 0x7, - XSC_PHY_PORT_PCIE_0 = 0x8, - XSC_PHY_PORT_PCIE_1 = 0x9, - - XSC_LAG_PORT_START = 15, - XSC_LAG_PORT_END = 62, - - XSC_PORT_FUNC_ID_START = 63, - XSC_PORT_FUNC_ID_END = 1214, -}; - -#define XSC_PHY_PORT_MAC_NUM 8 - -#ifndef XSC_CHIP_ANDES -#ifdef XSC_CHIP_BERYL_50S -#define XSC_PCIE0_VF_LOGIC_PORT_NUM 512 -#else -#define XSC_PCIE0_VF_LOGIC_PORT_NUM 256 -#endif -#define XSC_PCIE1_VF_LOGIC_PORT_NUM 0 -#define XSC_PCIE0_PF_LOGIC_PORT_NUM 2 -#define XSC_PCIE1_PF_LOGIC_PORT_NUM 8 - -#ifndef PCIE0_PF1_VF_NUM -#define PCIE0_PF1_VF_NUM 0 -#endif - -enum xsc_port_func_id_encode { - XSC_PCIE0_VF0_FUNC_ID = 0, - XSC_PCIE0_PF0_VF0_FUNC_ID = XSC_PCIE0_VF0_FUNC_ID, -#if (CHIP_HOTFIX_NUM >= 0x23) - XSC_PCIE0_PF1_VF0_FUNC_ID = (XSC_PCIE0_PF0_VF0_FUNC_ID + - PCIE0_PF0_VF_NUM), - XSC_PCIE0_VF_FUNC_ID_END = (XSC_PCIE0_PF1_VF0_FUNC_ID + - PCIE0_PF1_VF_NUM - 1), -#else - XSC_PCIE0_PF1_VF0_FUNC_ID = (XSC_PCIE0_PF0_VF0_FUNC_ID + - XSC_PCIE0_VF_LOGIC_PORT_NUM), - XSC_PCIE0_VF_FUNC_ID_END = (XSC_PCIE0_PF0_VF0_FUNC_ID + - 2*XSC_PCIE0_VF_LOGIC_PORT_NUM - 1),//511 -#endif - XSC_PCIE1_VF0_FUNC_ID = XSC_PCIE0_VF_FUNC_ID_END, - XSC_PCIE1_PF0_VF0_FUNC_ID = XSC_PCIE0_VF_FUNC_ID_END, - XSC_PCIE1_PF1_VF0_FUNC_ID = XSC_PCIE0_VF_FUNC_ID_END, - XSC_PCIE1_VF_FUNC_ID_END = XSC_PCIE0_VF_FUNC_ID_END, - -#if (CHIP_HOTFIX_NUM >= 0x23) - XSC_PCIE0_PF0_FUNC_ID = (XSC_PCIE0_VF_FUNC_ID_END + 1), - XSC_PCIE0_PF_FUNC_ID_END = (XSC_PCIE0_PF0_FUNC_ID + PCIE0_PF_NUM - 1), - XSC_PCIE1_PF0_FUNC_ID = (XSC_PCIE0_PF_FUNC_ID_END + 1), - XSC_PCIE1_PF_FUNC_ID_END = (XSC_PCIE1_PF0_FUNC_ID + PCIE1_PF_NUM - 1),//521 -#else - XSC_PCIE0_PF0_FUNC_ID = (XSC_PCIE1_PF0_VF0_FUNC_ID + 1),//512 - XSC_PCIE0_PF_FUNC_ID_END = (XSC_PCIE0_PF0_FUNC_ID + - XSC_PCIE0_PF_LOGIC_PORT_NUM - 1),//513 - XSC_PCIE1_PF0_FUNC_ID = (XSC_PCIE0_PF0_FUNC_ID + - XSC_PCIE0_PF_LOGIC_PORT_NUM),//514 - XSC_PCIE1_PF_FUNC_ID_END = (XSC_PCIE1_PF0_FUNC_ID + - XSC_PCIE1_PF_LOGIC_PORT_NUM - 1),//521 -#endif - XSC_FUNC_ID_END, -}; -#else -#define XSC_PCIE0_VF_LOGIC_PORT_NUM 512 -#define XSC_PCIE1_VF_LOGIC_PORT_NUM 0 -#define XSC_PCIE0_PF_LOGIC_PORT_NUM 2 -#define XSC_PCIE1_PF_LOGIC_PORT_NUM 8 -enum xsc_port_func_id_encode { - XSC_PCIE0_VF0_FUNC_ID = 0, - XSC_PCIE0_PF0_VF0_FUNC_ID = XSC_PCIE0_VF0_FUNC_ID, - XSC_PCIE0_PF1_VF0_FUNC_ID = (XSC_PCIE0_PF0_VF0_FUNC_ID + - XSC_PCIE0_VF_LOGIC_PORT_NUM),//512 - XSC_PCIE0_VF_FUNC_ID_END = (XSC_PCIE0_VF0_FUNC_ID + - 2*XSC_PCIE0_VF_LOGIC_PORT_NUM - 1),//1023 - - XSC_PCIE1_VF0_FUNC_ID = XSC_PCIE0_VF_FUNC_ID_END, - XSC_PCIE1_PF0_VF0_FUNC_ID = XSC_PCIE0_VF_FUNC_ID_END, - XSC_PCIE1_PF1_VF0_FUNC_ID = XSC_PCIE0_VF_FUNC_ID_END, - XSC_PCIE1_VF_FUNC_ID_END = XSC_PCIE0_VF_FUNC_ID_END, - - XSC_PCIE0_PF0_FUNC_ID = (XSC_PCIE0_VF0_FUNC_ID + - 2*XSC_PCIE0_VF_LOGIC_PORT_NUM),//1024 - XSC_PCIE0_PF_FUNC_ID_END = (XSC_PCIE0_PF0_FUNC_ID + - XSC_PCIE0_PF_LOGIC_PORT_NUM - 1),//1025 - - XSC_PCIE1_PF0_FUNC_ID = 1032,//from program manual - XSC_PCIE1_PF_FUNC_ID_END = (XSC_PCIE1_PF0_FUNC_ID + - XSC_PCIE1_PF_LOGIC_PORT_NUM - 1),//1039 - XSC_FUNC_ID_END, -}; -#endif - -#define XSC_PHY_PORT_MAC_N(mac_id) \ - (XSC_PHY_PORT_MAC_0 + mac_id) -#define XSC_PHY_PORT_PCIE_N(pcie_id) \ - (XSC_PHY_PORT_PCIE_0 + pcie_id) -#define XSC_PHY_PORT_TO_PCIE0_PF_ID(pcie_port) \ - (pcie_port - XSC_PHY_PORT_PCIE_0) -#define XSC_PHY_PORT_TO_PCIE1_PF_ID(pcie_port) \ - (pcie_port - XSC_PHY_PORT_PCIE_1 - 1) - -#define XSC_GLB_FUNC_TO_PCIE0_PF_ID(glb_func) \ - (glb_func - XSC_PCIE0_PF0_FUNC_ID) -#define XSC_GLB_FUNC_TO_PCIE1_PF_ID(glb_func) \ - (glb_func - XSC_PCIE1_PF0_FUNC_ID) - -#define XSC_PCIE0_PF0_VF_N_FUNC_ID(vf_id) \ - (XSC_PCIE0_PF0_VF0_FUNC_ID + vf_id) -#define XSC_PCIE0_PF1_VF_N_FUNC_ID(vf_id) \ - (XSC_PCIE0_PF1_VF0_FUNC_ID + vf_id) - -#define XSC_PCIE1_PF0_VF_N_FUNC_ID(vf_id) \ - (XSC_PCIE1_PF0_VF0_FUNC_ID + vf_id) -#define XSC_PCIE1_PF1_VF_N_FUNC_ID(vf_id) \ - (XSC_PCIE1_PF1_VF0_FUNC_ID + vf_id) - -#define XSC_PCIE0_PF_N_FUNC_ID(pf_id) \ - (XSC_PCIE0_PF0_FUNC_ID + pf_id) -#define XSC_PCIE1_PF_N_FUNC_ID(pf_id) \ - (XSC_PCIE1_PF0_FUNC_ID + pf_id) - -#define XSC_PCIE0_PF_N_LOGIC_PORT(pf_id) \ - (XSC_PORT_FUNC_ID_START + XSC_PCIE0_PF0_FUNC_ID + pf_id) -#define XSC_PCIE1_PF_N_LOGIC_PORT(pf_id) \ - (XSC_PORT_FUNC_ID_START + XSC_PCIE1_PF0_FUNC_ID + pf_id) - -#define XSC_PCIE0_PF0_VF_N_LOGIC_PORT(vf_id) \ - (XSC_PORT_FUNC_ID_START + XSC_PCIE0_PF0_VF_N_FUNC_ID(vf_id)) -#define XSC_PCIE0_PF1_VF_N_LOGIC_PORT(vf_id) \ - (XSC_PORT_FUNC_ID_START + XSC_PCIE0_PF1_VF_N_FUNC_ID(vf_id)) - -#define XSC_PCIE1_PF0_VF_N_LOGIC_PORT(vf_id) \ - (XSC_PORT_FUNC_ID_START + XSC_PCIE1_PF0_VF_N_FUNC_ID(vf_id)) -#define XSC_PCIE1_PF1_VF_N_LOGIC_PORT(vf_id) \ - (XSC_PORT_FUNC_ID_START + XSC_PCIE1_PF1_VF_N_FUNC_ID(vf_id)) - #define XSC_BROADCASTID_MAX 2 -#define XSC_TBM_BOMT_DESTINFO_SHIFT (XSC_BROADCASTID_MAX/2) +#define XSC_TBM_BOMT_DESTINFO_SHIFT (XSC_BROADCASTID_MAX / 2) #define XSC_TBM_BOMT_BROADCASTID_MASK (XSC_BROADCASTID_MAX - 1) -static inline bool xsc_cal_pf_vf_id(int func_id, u8 *pf_id, u8 *pcie_no, u16 *vf_id) -{ - bool is_pf = true; - - if (func_id >= XSC_PCIE0_PF0_FUNC_ID && - func_id <= XSC_PCIE0_PF_FUNC_ID_END) { - is_pf = true; - *pf_id = func_id - XSC_PCIE0_PF0_FUNC_ID; - *pcie_no = 0; - } else if (func_id >= XSC_PCIE1_PF0_FUNC_ID && - func_id <= XSC_PCIE1_PF_FUNC_ID_END) { - is_pf = true; - *pf_id = func_id - XSC_PCIE1_PF0_FUNC_ID; - *pcie_no = 1; - } else if (func_id >= XSC_PCIE0_PF0_VF0_FUNC_ID && - func_id < XSC_PCIE0_PF1_VF0_FUNC_ID) { - is_pf = false; - *pf_id = 0; - *vf_id = func_id - XSC_PCIE0_PF0_VF0_FUNC_ID; - *pcie_no = 0; - } else if (func_id >= XSC_PCIE0_PF1_VF0_FUNC_ID && - func_id <= XSC_PCIE0_VF_FUNC_ID_END) { - is_pf = false; - *pf_id = 1; - *vf_id = func_id - XSC_PCIE0_PF1_VF0_FUNC_ID; - *pcie_no = 0; - } - return is_pf; -} - -#define XSC_IS_VF(glb_func) (((glb_func) >= XSC_PCIE0_VF0_FUNC_ID && \ - (glb_func) <= XSC_PCIE0_VF_FUNC_ID_END) || \ - ((glb_func) >= XSC_PCIE1_VF0_FUNC_ID && \ - (glb_func) <= XSC_PCIE1_VF_FUNC_ID_END)) - -#define XSC_IS_PF0_VF(glb_func) (((glb_func) >= XSC_PCIE0_PF0_VF0_FUNC_ID && \ - (glb_func) < XSC_PCIE0_PF1_VF0_FUNC_ID) || \ - ((glb_func) >= XSC_PCIE1_PF0_VF0_FUNC_ID && \ - (glb_func) < XSC_PCIE1_PF1_VF0_FUNC_ID)) - -#define XSC_IS_PCIE0_PF(glb_func) ((glb_func) >= XSC_PCIE0_PF0_FUNC_ID && \ - (glb_func) <= XSC_PCIE0_PF_FUNC_ID_END) - -#define XSC_PF_GET_PF_ID(glb_func) (XSC_IS_PCIE0_PF(glb_func) ? \ - (glb_func) - XSC_PCIE0_PF0_FUNC_ID : \ - (glb_func) - XSC_PCIE1_PF0_FUNC_ID) - -#define XSC_PF_VF_GET_PF_ID(glb_func) (XSC_IS_VF(glb_func) ? \ - (XSC_IS_PF0_VF(glb_func) ? 0 : 1) : \ - XSC_PF_GET_PF_ID(glb_func)) - -#define XSC_IS_PCIE0(glb_func) (((glb_func) >= XSC_PCIE0_VF0_FUNC_ID && \ - (glb_func) <= XSC_PCIE0_VF_FUNC_ID_END) || \ - ((glb_func) >= XSC_PCIE0_PF0_FUNC_ID && \ - (glb_func) <= XSC_PCIE0_PF_FUNC_ID_END)) - -#define XSC_GET_PCIE_NO(glb_func) (XSC_IS_PCIE0(glb_func) ? 0 : 1) - -#define XSC_IS_PF(func_id) \ - (((func_id) >= XSC_PCIE0_PF0_FUNC_ID && (func_id) <= XSC_PCIE0_PF_FUNC_ID_END) || \ - ((func_id) >= XSC_PCIE1_PF0_FUNC_ID && (func_id) <= XSC_PCIE1_PF_FUNC_ID_END)) - /* Doorbell registers */ // //#define SQM_DB_NEXT_PID_OFFSET 0 diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h index 387250d4f5255112dfaeae8d870b2fd24b4f85bd..69350a6338750d65c6006d4c7ca25eaa10b9a79d 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -20,12 +19,15 @@ _IOWR(XSC_IOCTL_MAGIC, 3, struct xsc_ioctl_hdr) #define XSC_IOCTL_MEM \ _IOWR(XSC_IOCTL_MAGIC, 4, struct xsc_ioctl_hdr) +#define XSC_IOCTL_CMDQ_RAW \ + _IOWR(XSC_IOCTL_MAGIC, 5, struct xsc_ioctl_hdr) #define XSC_IOCTL_CHECK_FILED 0x01234567 enum { XSC_IOCTL_OP_GET_LOCAL, XSC_IOCTL_OP_GET_VF_INFO, XSC_IOCTL_OP_GET_CONTEXT, + XSC_IOCTL_OP_GET_INFO_BY_BDF, XSC_IOCTL_OP_GET_MAX }; @@ -54,6 +56,13 @@ enum { XSC_IOCTL_MEM_MAX }; +enum { + XSC_IOCTL_GET_VECTOR_MATRIX = 0x400, + XSC_IOCTL_SET_LOG_LEVEL = 0x401, + XSC_IOCTL_SET_CMD_VERBOSE = 0x402, + XSC_IOCTL_DRIVER_MAX +}; + enum xsc_flow_tbl_id { XSC_FLOW_TBL_IPAT, //IN_PORT_ATTR XSC_FLOW_TBL_IPVLANMT, //IN_PORT_VLAN_MEMBER @@ -141,6 +150,8 @@ enum xsc_flow_tbl_id { XSC_FLOW_MAC_PORT_MTU, //MAC_PORT_MTU XSC_FLOW_ECP_PKT_LEN_INC, //ECP_PKT_LEN_INC XSC_FLOW_TCP_FLAGS_CFG, //TCP_FLAGS_CFG + XSC_FLOW_DBG_CNT, //DBG_CNT + XSC_FLOW_PRS_REC_PORT_UDF_SEL, XSC_FLOW_TBL_MAX }; @@ -177,6 +188,9 @@ struct xsc_ioctl_qp_range { }; struct xsc_ioctl_get_phy_info_res { + u32 domain; + u32 bus; + u32 devfn; u32 phy_port; //local pf pcie funcid xdev->pcie_port u32 func_id; //local pf port funcid xdev->glb_func_id ? u32 logic_in_port; //local pf port logical_in_port xdev->logic_port @@ -185,7 +199,7 @@ struct xsc_ioctl_get_phy_info_res { u16 lag_id; u16 raw_qp_id_base; u16 raw_rss_qp_id_base; - u16 funcid_encode[8]; + u16 funcid[8]; u16 lag_port_start; u16 raw_tpe_qp_num; int send_seg_num; @@ -194,6 +208,7 @@ struct xsc_ioctl_get_phy_info_res { u8 dma_rw_tbl_vld; u8 pct_compress_vld; u32 chip_version; + u32 hca_core_clock; }; struct xsc_ioctl_get_vf_info_res { @@ -204,6 +219,12 @@ struct xsc_ioctl_get_vf_info_res { u32 logic_port; }; +struct xsc_alloc_ucontext_req { + u32 domain; + u32 bus; + u32 devfn; +}; + struct xsc_ioctl_global_pcp { int pcp; }; @@ -236,6 +257,11 @@ struct xsc_ioctl_cma_dscp { int dscp; }; +struct xsc_ioctl_set_debug_info { + unsigned int log_level; + unsigned int cmd_verbose; +}; + /* type-value */ struct xsc_ioctl_data_tl { u16 table; /* table id */ @@ -252,8 +278,17 @@ struct xsc_ioctl_attr { u8 data[0]; /* specific table info */ }; +struct xsc_ioctl_emu_hdr { + u16 in_length; /* cmd req length */ + u16 out_length; /* cmd rsp length */ + u8 data[0]; /* emu cmd content start from here */ +}; + struct xsc_ioctl_hdr { u32 check_filed; /* Validity verification fileds */ + u32 domain; + u32 bus; + u32 devfn; struct xsc_ioctl_attr attr; }; diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h index 1f12bec9a680c8c937904adde67e20a25ef5910d..bc07e99a12e8d50aac88225cfe6971cb1abd63b3 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_LAG_H #define XSC_LAG_H +#include + struct lag_func { struct xsc_core_device *xdev; struct net_device *netdev; @@ -19,7 +20,9 @@ struct lag_tracker { enum netdev_lag_tx_type tx_type; struct netdev_lag_lower_state_info netdev_state[XSC_MAX_PORTS]; struct net_device *ndev[XSC_MAX_PORTS]; - unsigned int is_bonded:1; + unsigned int is_hw_bonded:1; + unsigned int is_kernel_bonded:1; + unsigned int is_kernel_bonded_change:1; unsigned int lag_disable:1; u8 gw_dmac0[6]; u8 gw_dmac1[6]; @@ -56,9 +59,14 @@ struct xsc_fib_event_work { }; enum { - XSC_LAG_FLAG_ROCE = 1 << 0, - XSC_LAG_FLAG_SRIOV = 1 << 1, - XSC_LAG_FLAG_MULTIPATH = 1 << 2, + XSC_LAG_FLAG_ROCE = 1 << 0, + XSC_LAG_FLAG_SRIOV = 1 << 1, + XSC_LAG_FLAG_MULTIPATH = 1 << 2, +}; + +enum { + XSC_BOND_FLAG_KERNEL = 1 << 3, + XSC_BOND_FLAG_LAG = 1 << 4, }; enum xsc_lag_hash { @@ -78,8 +86,13 @@ enum xsc_lag_hash { XSC_LAG_FLAG_MULTIPATH) #define GET_LAG_MEMBER_BITMAP(remap_port1, remap_port2) \ - (((remap_port1 != MAC_INVALID) ? BIT(remap_port1 - MAC_SHIFT) : 0) | \ - ((remap_port2 != MAC_INVALID) ? BIT(remap_port2 - MAC_SHIFT) : 0)) + ((((remap_port1) != MAC_INVALID) ? BIT((remap_port1) - MAC_SHIFT) : 0) | \ + (((remap_port2) != MAC_INVALID) ? BIT((remap_port2) - MAC_SHIFT) : 0)) + +static inline bool __xsc_bond_is_active(struct xsc_lag *ldev) +{ + return !!(ldev->flags & XSC_BOND_FLAG_KERNEL); +} static inline bool __xsc_lag_is_active(struct xsc_lag *ldev) { @@ -109,4 +122,3 @@ void xsc_lag_enable(struct xsc_core_device *xdev); void xsc_lag_disable(struct xsc_core_device *xdev); #endif /* XSC_LAG_H */ - diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h new file mode 100644 index 0000000000000000000000000000000000000000..5429ec58e25435115ec2158e73773a3577367621 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_MACRO_H +#define XSC_MACRO_H + +#ifndef NO_MSIX_SUPPORT +#define MSIX_SUPPORT +#endif + +#ifndef NO_RSS_SUPPORT +#define XSC_RSS_SUPPORT +#endif + +#ifndef NO_BQL_SUPPORT +#define XSC_BQL_SUPPORT +#endif + +#ifndef NO_ESWITCH_SUPPORT +#define CONFIG_XSC_ESWITCH +#endif + +#endif /*XSC_MACRO_H*/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h index fa476590d4cfa50b7cc4e9db9fddf788b5f62a09..0be041f100736f4bb917860630170b4021b722dc 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h @@ -1,13 +1,22 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_PORT_CTRL_H #define XSC_PORT_CTRL_H -typedef void (*port_ctrl_cb)(struct xsc_core_device *xdev, unsigned int cmd, +/*mmap msg encode*/ +enum { + XSC_MMAP_MSG_SQDB = 0, + XSC_MMAP_MSG_RQDB = 1, + XSC_MMAP_MSG_CQDB = 2, + XSC_MMAP_MSG_ARM_CQDB = 3, +}; + +#define TRY_NEXT_CB 0x1a2b3c4d + +typedef int (*port_ctrl_cb)(struct xsc_bdf_file *file, unsigned int cmd, struct xsc_ioctl_hdr __user *user_hdr, void *data); void xsc_port_ctrl_remove(struct xsc_core_device *dev); @@ -17,6 +26,6 @@ void xsc_port_ctrl_cb_dereg(const char *name); void xsc_port_ctrl_fini(void); int xsc_port_ctrl_init(void); - +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn); #endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h index f05b4509e2a752d4612e52fa5a1f98f199ee5aaf..daf191b6ce0221f08a5b994e3f55cf6cf7dd1666 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -88,14 +87,14 @@ struct epp_pph { u16 rsv11:2; u16 pkt_hdr_ptr:14; //38 bytes - u64 rsv12:5; - u64 csum_ofst:8; - u64 csum_val:29; - u64 csum_plen:14; - u64 rsv11_0:8; //46 bytes + u64 rsv12:5; + u64 csum_ofst:8; + u64 csum_val:29; + u64 csum_plen:14; + u64 rsv11_0:8; //46 bytes - u64 rsv11_1; - u64 rsv11_2; + u64 rsv11_1; + u64 rsv11_2; u16 rsv11_3; }; @@ -118,60 +117,60 @@ struct epp_pph { #define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET (0) #define XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(PPH_BASE_ADDR) \ - ((*(u16 *)((u8 *)PPH_BASE_ADDR + EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET) & \ + ((*(u16 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET) & \ EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK) >> EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET) #define XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(PPH_BASE_ADDR) \ - ((*(u8 *)((u8 *)PPH_BASE_ADDR + EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET) & \ + ((*(u8 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET) & \ EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK) >> EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET) #define PPH_OUTER_IP_TYPE_OFF (4UL) #define PPH_OUTER_IP_TYPE_MASK (0x3) #define PPH_OUTER_IP_TYPE_SHIFT (11) #define PPH_OUTER_IP_TYPE(base) \ - ((ntohs(*(u16 *)((u8 *)base + PPH_OUTER_IP_TYPE_OFF)) >> \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_TYPE_OFF)) >> \ PPH_OUTER_IP_TYPE_SHIFT) & PPH_OUTER_IP_TYPE_MASK) #define PPH_OUTER_IP_OFST_OFF (4UL) #define PPH_OUTER_IP_OFST_MASK (0x1f) #define PPH_OUTER_IP_OFST_SHIFT (6) #define PPH_OUTER_IP_OFST(base) \ - ((ntohs(*(u16 *)((u8 *)base + PPH_OUTER_IP_OFST_OFF)) >> \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_OFST_OFF)) >> \ PPH_OUTER_IP_OFST_SHIFT) & PPH_OUTER_IP_OFST_MASK) #define PPH_OUTER_IP_LEN_OFF (4UL) #define PPH_OUTER_IP_LEN_MASK (0x3f) #define PPH_OUTER_IP_LEN_SHIFT (0) #define PPH_OUTER_IP_LEN(base) \ - ((ntohs(*(u16 *)((u8 *)base + PPH_OUTER_IP_LEN_OFF)) >> \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_LEN_OFF)) >> \ PPH_OUTER_IP_LEN_SHIFT) & PPH_OUTER_IP_LEN_MASK) #define PPH_OUTER_TP_TYPE_OFF (6UL) #define PPH_OUTER_TP_TYPE_MASK (0x7) #define PPH_OUTER_TP_TYPE_SHIFT (12) #define PPH_OUTER_TP_TYPE(base) \ - ((ntohs(*(u16 *)((u8 *)base + PPH_OUTER_TP_TYPE_OFF)) >> \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_TP_TYPE_OFF)) >> \ PPH_OUTER_TP_TYPE_SHIFT) & PPH_OUTER_TP_TYPE_MASK) #define PPH_PAYLOAD_OFST_OFF (14UL) #define PPH_PAYLOAD_OFST_MASK (0xff) #define PPH_PAYLOAD_OFST_SHIFT (3) #define PPH_PAYLOAD_OFST(base) \ - ((ntohs(*(u16 *)((u8 *)base + PPH_PAYLOAD_OFST_OFF)) >> \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_PAYLOAD_OFST_OFF)) >> \ PPH_PAYLOAD_OFST_SHIFT) & PPH_PAYLOAD_OFST_MASK) #define PPH_CSUM_OFST_OFF (38UL) #define PPH_CSUM_OFST_MASK (0xff) #define PPH_CSUM_OFST_SHIFT (51) #define PPH_CSUM_OFST(base) \ - ((be64_to_cpu(*(u64 *)((u8 *)base + PPH_CSUM_OFST_OFF)) >> \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_OFST_OFF)) >> \ PPH_CSUM_OFST_SHIFT) & PPH_CSUM_OFST_MASK) #define PPH_CSUM_VAL_OFF (38UL) #define PPH_CSUM_VAL_MASK (0xeffffff) #define PPH_CSUM_VAL_SHIFT (22) #define PPH_CSUM_VAL(base) \ - ((be64_to_cpu(*(u64 *)((u8 *)base + PPH_CSUM_VAL_OFF)) >> \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_VAL_OFF)) >> \ PPH_CSUM_VAL_SHIFT) & PPH_CSUM_VAL_MASK) #endif /* XSC_TBM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h index 4e5a3fc6a7a7b2cdcf916f1e03385ddf21fe9179..6b2c84017c18cafc5568743aa7c94d2d27b74f2a 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Makefile b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile index cd0430ee128315caa4d49c21edb7171f6a18022a..75c06c6250ba05ab5fcd9b12cb3aa4ff298a5e0c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/Makefile +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile @@ -1,13 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. # All rights reserved. -# -# Makefile for the Yunsilicon xsc ethernet driver -# -ccflags-y := -I $(srctree)/drivers/net/ethernet/yunsilicon/xsc/ -obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc_net.o -xsc_net-y := main.o xsc_eth_ctrl.o xsc_eth_tx.o xsc_eth_rx.o \ - xsc_eth_txrx.o ut_main.o xsc_eth_ethtool.o \ - xsc_eth_stats.o xsc_dcbnl.o +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc_eth.o + +xsc_eth-y := main.o xsc_eth_ctrl.o xsc_eth_tx.o xsc_eth_rx.o xsc_eth_txrx.o \ + ut_main.o xsc_eth_ethtool.o xsc_eth_stats.o xsc_dcbnl.o xsc_eth_sysfs.o xsc_fs.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/main.c b/drivers/net/ethernet/yunsilicon/xsc/net/main.c index 70aba07b5b3d3f0fc246ddacd68dba994f1748c6..582ee32fa1ea49f96a3a60938b9055b6dac8f42e 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/main.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/main.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -16,13 +15,14 @@ #include #include #include - -#include -#include -#include -#include -#include -#include +#include + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" +#include "common/qp.h" +#include "common/xsc_lag.h" #include "../pci/fw/xsc_tbm.h" #include "xsc_eth.h" @@ -32,64 +32,41 @@ #include "xsc_eth_stats.h" #include "xsc_accel.h" #include "xsc_eth_ctrl.h" +#include "../pci/eswitch.h" + +#include "common/xsc_fs.h" +#include "common/vport.h" + +MODULE_LICENSE("GPL"); static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq); static void xsc_eth_remove(struct xsc_core_device *xdev, void *context); static int xsc_eth_open(struct net_device *netdev); static int xsc_eth_close(struct net_device *netdev); +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc); #ifdef NEED_CREATE_RX_THREAD -extern u32 xsc_eth_rx_thread_create(struct xsc_adapter *adapter); +extern uint32_t xsc_eth_rx_thread_create(struct xsc_adapter *adapter); #endif -#define XSC_SET_FEATURE(features, feature, enable) \ - do { \ - if (enable) \ - *features |= feature; \ - else \ - *features &= ~feature; \ - } while (0) - -typedef int (*xsc_feature_handler)(struct net_device *netdev, bool enable); - -int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, - u32 qpn, u16 status) +static inline void xsc_set_feature(netdev_features_t *features, + netdev_features_t feature, + bool enable) { - int ret = -1; - int insize; - struct xsc_modify_qp_mbox_in *in; - struct xsc_modify_qp_mbox_out out; - - insize = sizeof(struct xsc_modify_qp_mbox_in); - - in = kvzalloc(insize, GFP_KERNEL); - if (!in) - return -ENOMEM; - - /*eth: only set status according to cmd,ignore other fields*/ - in->hdr.opcode = cpu_to_be16(status); - in->qpn = cpu_to_be32(qpn); - - ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); - if (ret) { - xsc_core_warn(xdev, "failed to modify qp%d status=%d, err=%d\n", - qpn, status, ret); - goto exit; - } + if (enable) + *features |= feature; + else + *features &= ~feature; +} - if (out.hdr.status != 0) { - xsc_core_warn(xdev, "return error status %d\n", out.hdr.status); - ret = -ENOEXEC; - } +typedef int (*xsc_feature_handler)(struct net_device *netdev, bool enable); -exit: - kvfree(in); - return ret; -} +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status); static void xsc_eth_build_queue_param(struct xsc_adapter *adapter, - struct xsc_queue_attr *attr, u8 type) + struct xsc_queue_attr *attr, u8 type) { struct xsc_core_device *xdev = adapter->xdev; @@ -121,7 +98,7 @@ static void xsc_eth_build_queue_param(struct xsc_adapter *adapter, attr->ele_num = adapter->nic_param.rq_size; attr->ele_size = xdev->caps.recv_ds_num * XSC_RECV_WQE_DS; attr->ele_log_size = order_base_2(attr->ele_size); - attr->q_log_size = order_base_2(attr->ele_num); + attr->q_log_size = order_base_2(attr->ele_num); } else if (type == XSC_QUEUE_TYPE_SQ) { attr->q_type = XSC_QUEUE_TYPE_SQ; attr->ele_num = adapter->nic_param.sq_size; @@ -133,7 +110,8 @@ static void xsc_eth_build_queue_param(struct xsc_adapter *adapter, static void xsc_eth_init_frags_partition(struct xsc_rq *rq) { - struct xsc_wqe_frag_info next_frag, *prev; + struct xsc_wqe_frag_info next_frag = {}; + struct xsc_wqe_frag_info *prev; int i; next_frag.di = &rq->wqe.di[0]; @@ -152,7 +130,7 @@ static void xsc_eth_init_frags_partition(struct xsc_rq *rq) next_frag.di++; next_frag.offset = 0; if (prev) - prev->last_in_page = true; + prev->last_in_page = 1; } *frag = next_frag; @@ -163,7 +141,7 @@ static void xsc_eth_init_frags_partition(struct xsc_rq *rq) } if (prev) - prev->last_in_page = true; + prev->last_in_page = 1; } static int xsc_eth_init_di_list(struct xsc_rq *rq, int wq_sz, int cpu) @@ -171,7 +149,7 @@ static int xsc_eth_init_di_list(struct xsc_rq *rq, int wq_sz, int cpu) int len = wq_sz << rq->wqe.info.log_num_frags; rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), - GFP_KERNEL, cpu_to_node(cpu)); + GFP_KERNEL, cpu_to_node(cpu)); if (!rq->wqe.di) return -ENOMEM; @@ -185,86 +163,25 @@ static void xsc_eth_free_di_list(struct xsc_rq *rq) kvfree(rq->wqe.di); } -static void xsc_rx_cache_reduce_clean_pending(struct xsc_rq *rq) -{ - struct xsc_page_cache_reduce *reduce = &rq->page_cache.reduce; - int i; - - if (!test_bit(XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, &rq->state)) - return; - - for (i = 0; i < reduce->npages; i++) - xsc_page_release_dynamic(rq, &reduce->pending[i], false); - reduce->npages = 0; - - clear_bit(XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, &rq->state); -} - -static void xsc_rx_cache_reduce_work(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct xsc_page_cache_reduce *reduce = - container_of(dwork, struct xsc_page_cache_reduce, reduce_work); - struct xsc_page_cache *cache = - container_of(reduce, struct xsc_page_cache, reduce); - struct xsc_rq *rq = container_of(cache, struct xsc_rq, page_cache); - - local_bh_disable(); - napi_schedule(rq->cq.napi); - local_bh_enable(); - xsc_rx_cache_reduce_clean_pending(rq); - - if (ilog2(cache->sz) > cache->log_min_sz) - schedule_delayed_work_on(smp_processor_id(), - dwork, reduce->delay); -} - int xsc_rx_alloc_page_cache(struct xsc_rq *rq, int node, u8 log_init_sz) { struct xsc_page_cache *cache = &rq->page_cache; - struct xsc_page_cache_reduce *reduce = &cache->reduce; - u32 max_sz; - - cache->log_max_sz = log_init_sz + XSC_PAGE_CACHE_LOG_MAX_RQ_MULT; - cache->log_min_sz = log_init_sz; - max_sz = 1 << cache->log_max_sz; - cache->page_cache = kvzalloc_node(max_sz * sizeof(*cache->page_cache), + cache->sz = 1 << log_init_sz; + cache->page_cache = kvzalloc_node(cache->sz * sizeof(*cache->page_cache), GFP_KERNEL, node); if (!cache->page_cache) return -ENOMEM; - reduce->pending = kvzalloc_node(max_sz * sizeof(*reduce->pending), - GFP_KERNEL, node); - if (!reduce->pending) - goto err_free_cache; - - cache->sz = 1 << cache->log_min_sz; - cache->head = -1; - INIT_DELAYED_WORK(&reduce->reduce_work, xsc_rx_cache_reduce_work); - reduce->delay = msecs_to_jiffies(XSC_PAGE_CACHE_REDUCE_WORK_INTERVAL); - reduce->grace_period = msecs_to_jiffies(XSC_PAGE_CACHE_REDUCE_GRACE_PERIOD); - reduce->next_ts = MAX_JIFFY_OFFSET; /* in init, no reduce is needed */ - return 0; - -err_free_cache: - kvfree(cache->page_cache); - - return -ENOMEM; } void xsc_rx_free_page_cache(struct xsc_rq *rq) { struct xsc_page_cache *cache = &rq->page_cache; - struct xsc_page_cache_reduce *reduce = &cache->reduce; - int i; - - cancel_delayed_work_sync(&reduce->reduce_work); - xsc_rx_cache_reduce_clean_pending(rq); - kvfree(reduce->pending); + u32 i; - for (i = 0; i <= cache->head; i++) { + for (i = cache->head; i != cache->tail; i = (i + 1) & (cache->sz - 1)) { struct xsc_dma_info *dma_info = &cache->page_cache[i]; xsc_page_release_dynamic(rq, dma_info, false); @@ -295,19 +212,31 @@ void xsc_eth_completion_event(struct xsc_core_cq *xcq) { struct xsc_cq *cq = container_of(xcq, struct xsc_cq, xcq); struct xsc_core_device *xdev = cq->xdev; - struct xsc_rq *rq = &cq->channel->qp.rq[0]; + struct xsc_rq *rq = NULL; + + if (unlikely(!cq->channel)) { + xsc_core_warn(xdev, "cq%d->channel is null\n", xcq->cqn); + return; + } + + rq = &cq->channel->qp.rq[0]; set_bit(XSC_CHANNEL_NAPI_SCHED, &cq->channel->flags); cq->channel->stats->events++; cq->channel->stats->poll = 0; + if (cq->rx) + cq->channel->rx_int = 1; + else + cq->channel->rx_int = 0; + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) xsc_core_warn(xdev, "ch%d_cq%d, napi_flag=0x%lx\n", - cq->channel->chl_idx, xcq->cqn, cq->napi->state); + cq->channel->chl_idx, xcq->cqn, cq->napi->state); napi_schedule(cq->napi); } -inline int xsc_cmd_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *xcq) +static inline int xsc_cmd_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *xcq) { struct xsc_destroy_cq_mbox_in in; struct xsc_destroy_cq_mbox_out out; @@ -318,18 +247,18 @@ inline int xsc_cmd_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *x in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); in.cqn = cpu_to_be32(xcq->cqn); err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return -1; + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to destroy cq, err=%d out.status=%u\n", + err, out.hdr.status); + return -ENOEXEC; + } xcq->cqn = 0; return 0; } int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, - struct xsc_create_cq_mbox_in *in, int insize) + struct xsc_create_cq_mbox_in *in, int insize) { int err, ret = -1; struct xsc_cq_table *table = &xdev->dev_res->cq_table; @@ -337,8 +266,11 @@ int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); - if (ret) - return ret; + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create cq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } xcq->cqn = be32_to_cpu(out.cqn) & 0xffffff; xcq->cons_index = 0; @@ -390,11 +322,11 @@ int xsc_eth_destroy_cq(struct xsc_core_device *xdev, struct xsc_cq *cq) err_destroy_cq: xsc_core_warn(xdev, "failed to destroy cqn=%d, err=%d\n", - cq->xcq.cqn, err); + cq->xcq.cqn, err); return err; err_delete_cq: xsc_core_warn(xdev, "cqn=%d not found in tree, err=%d\n", - cq->xcq.cqn, err); + cq->xcq.cqn, err); return err; } @@ -404,25 +336,19 @@ void xsc_eth_free_cq(struct xsc_cq *cq) } int xsc_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, - struct xsc_create_multiqp_mbox_in *in, - int insize, - int *prqn_base) + struct xsc_create_multiqp_mbox_in *in, + int insize, + int *prqn_base) { int ret; struct xsc_create_multiqp_mbox_out out; in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MULTI_QP); ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); - if (ret) { - xsc_core_warn(xdev, - "failed to create rss rq, qp_num=%d, type=%d, err=%d\n", - in->qp_num, in->qp_type, ret); - return ret; - } - - if (out.hdr.status != 0) { - xsc_core_warn(xdev, "create rss qp(num=%d) return err status=%d\n", - in->qp_num, out.hdr.status); + if (ret || out.hdr.status) { + xsc_core_err(xdev, + "failed to create rss rq, qp_num=%d, type=%d, err=%d out.status=%u\n", + in->qp_num, in->qp_type, ret, out.hdr.status); return -ENOEXEC; } @@ -451,30 +377,27 @@ void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) case XSC_EVENT_TYPE_WQ_CATAS_ERROR: case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: - xsc_core_err(xdev, "%s:Async event %x on QP %d\n", - __func__, type, qp->qpn); + xsc_core_err(xdev, "%s:Async event %x on QP %d\n", __func__, type, qp->qpn); break; default: xsc_core_err(xdev, "%s: Unexpected event type %d on QP %d\n", - __func__, type, qp->qpn); + __func__, type, qp->qpn); return; } - - // TODO: add eth event handler - } int xsc_eth_create_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq, - struct xsc_create_qp_mbox_in *in, int insize) + struct xsc_create_qp_mbox_in *in, int insize) { int ret = -1; struct xsc_create_qp_mbox_out out; in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); - if (ret) { - xsc_core_warn(xdev, "failed to create rq, err=%d\n", ret); - return ret; + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create rq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; } prq->rqn = be32_to_cpu(out.qpn) & 0xffffff; @@ -483,8 +406,7 @@ int xsc_eth_create_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq, ret = create_resource_common(xdev, &prq->cqp); if (ret) { - xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", - __func__, prq->rqn, ret); + xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", __func__, prq->rqn, ret); return ret; } @@ -498,10 +420,9 @@ int xsc_eth_destroy_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq) int err; err = xsc_eth_modify_qp_status(xdev, prq->rqn, - XSC_CMD_OP_2RST_QP); + XSC_CMD_OP_2RST_QP); if (err) { - xsc_core_warn(xdev, "failed to set rq%d status=rst, err=%d\n", - prq->rqn, err); + xsc_core_warn(xdev, "failed to set rq%d status=rst, err=%d\n", prq->rqn, err); return err; } @@ -510,26 +431,29 @@ int xsc_eth_destroy_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq) in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); in.qpn = cpu_to_be32(prq->rqn); err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (err) { - xsc_core_warn(xdev, "failed to destroy rq%d, err=%d\n", - prq->rqn, err); - return err; - } - - if (out.hdr.status) { - xsc_core_warn(xdev, "cmdq destroy rq%d return err status=%d\n", - prq->rqn, out.hdr.status); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy rq%d, err=%d out.status=%u\n", + prq->rqn, err, out.hdr.status); return -ENOEXEC; } return 0; } -static void xsc_free_qp_rq(struct xsc_rq *rq) +static void xsc_eth_free_rx_wqe(struct xsc_rq *rq) { - if (rq->dealloc_wqes) - rq->dealloc_wqes(rq); + u16 wqe_ix; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + while (!xsc_wq_cyc_is_empty(wq)) { + wqe_ix = xsc_wq_cyc_get_tail(wq); + rq->dealloc_wqe(rq, wqe_ix); + xsc_wq_cyc_pop(wq); + } +} + +static void xsc_free_qp_rq(struct xsc_rq *rq) +{ if (rq->page_cache.page_cache) xsc_rx_free_page_cache(rq); @@ -543,16 +467,17 @@ static void xsc_free_qp_rq(struct xsc_rq *rq) } int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, - struct xsc_create_qp_mbox_in *in, int insize) + struct xsc_create_qp_mbox_in *in, int insize) { - int ret = -1; struct xsc_create_qp_mbox_out out; + int ret; in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); - if (ret) { - xsc_core_warn(xdev, "failed to create sq, err=%d\n", ret); - return ret; + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; } psq->sqn = be32_to_cpu(out.qpn) & 0xffffff; @@ -563,15 +488,19 @@ int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, int xsc_eth_modify_qp_sq(struct xsc_core_device *xdev, struct xsc_modify_raw_qp_mbox_in *in) { struct xsc_modify_raw_qp_mbox_out out; + int ret; in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_RAW_QP); - xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), - &out, sizeof(struct xsc_modify_raw_qp_mbox_out)); - if (out.hdr.status) - xsc_core_warn(xdev, "failed to modify sq, err=%d\n", out.hdr.status); + ret = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + &out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to modify sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } - return out.hdr.status; + return 0; } int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) @@ -582,8 +511,7 @@ int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) err = xsc_eth_modify_qp_status(xdev, psq->sqn, XSC_CMD_OP_2RST_QP); if (err) { - xsc_core_warn(xdev, "failed to set sq%d status=rst, err=%d\n", - psq->sqn, err); + xsc_core_warn(xdev, "failed to set sq%d status=rst, err=%d\n", psq->sqn, err); return err; } @@ -592,15 +520,9 @@ int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); in.qpn = cpu_to_be32(psq->sqn); err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (err) { - xsc_core_warn(xdev, "failed to destroy sq%d, err=%d\n", - psq->sqn, err); - return err; - } - - if (out.hdr.status) { - xsc_core_warn(xdev, "destroy sq%d return err status=%d\n", - psq->sqn, out.hdr.status); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy sq%d, err=%d out.status=%u\n", + psq->sqn, err, out.hdr.status); return -ENOEXEC; } @@ -625,11 +547,9 @@ static int xsc_eth_alloc_qp_sq_db(struct xsc_sq *sq, int numa) struct xsc_core_device *xdev = sq->cq.xdev; int df_sz = wq_sz * xdev->caps.send_ds_num; - sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, - sizeof(*sq->db.dma_fifo)), + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sizeof(*sq->db.dma_fifo)), GFP_KERNEL, numa); - sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, - sizeof(*sq->db.wqe_info)), + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.wqe_info)), GFP_KERNEL, numa); if (!sq->db.dma_fifo || !sq->db.wqe_info) { @@ -642,9 +562,8 @@ static int xsc_eth_alloc_qp_sq_db(struct xsc_sq *sq, int numa) return 0; } -static int xsc_eth_alloc_cq(struct xsc_channel *c, - struct xsc_cq *pcq, - struct xsc_cq_param *pcq_param) +static int xsc_eth_alloc_cq(struct xsc_channel *c, struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) { int ret; struct xsc_core_device *xdev = c->adapter->xdev; @@ -653,14 +572,12 @@ static int xsc_eth_alloc_cq(struct xsc_channel *c, u8 q_log_size = pcq_param->cq_attr.q_log_size; u8 ele_log_size = pcq_param->cq_attr.ele_log_size; - /*xdev params is not match with function defination*/ -// ret = xsc_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); -// if (ret) -// return ret; + pcq_param->wq.db_numa_node = cpu_to_node(c->cpu); + pcq_param->wq.buf_numa_node = cpu_to_node(c->cpu); ret = xsc_eth_cqwq_create(xdev, &pcq_param->wq, - q_log_size, ele_log_size, &pcq->wq, - &pcq->wq_ctrl); + q_log_size, ele_log_size, &pcq->wq, + &pcq->wq_ctrl); if (ret) return ret; @@ -668,7 +585,6 @@ static int xsc_eth_alloc_cq(struct xsc_channel *c, core_cq->comp = xsc_eth_completion_event; core_cq->event = xsc_eth_cq_error_event; core_cq->vector = c->chl_idx; -// core_cq->irqn = irqn; for (i = 0; i < xsc_cqwq_get_size(&pcq->wq); i++) { struct xsc_cqe64 *cqe = xsc_cqwq_get_wqe(&pcq->wq, i); @@ -682,8 +598,8 @@ static int xsc_eth_alloc_cq(struct xsc_channel *c, #ifdef NEED_CREATE_RX_THREAD static int xsc_eth_set_cq(struct xsc_channel *c, - struct xsc_cq *pcq, - struct xsc_cq_param *pcq_param) + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) { int ret = XSCALE_RET_SUCCESS; struct xsc_create_cq_mbox_in *in; @@ -705,19 +621,19 @@ static int xsc_eth_set_cq(struct xsc_channel *c, in->ctx.glb_func_id = cpu_to_be16(c->adapter->xdev->glb_func_id); xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, - &in->pas[0], hw_npages); + &in->pas[0], hw_npages); ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); kfree(in); xsc_core_info(c->adapter->xdev, "%s: create cqn%d, func_id=%d, ret=%d\n", - __func__, pcq->xcq.cqn, c->adapter->xdev->glb_func_id, ret); + __func__, pcq->xcq.cqn, c->adapter->xdev->glb_func_id, ret); return ret; } #else static int xsc_eth_set_cq(struct xsc_channel *c, - struct xsc_cq *pcq, - struct xsc_cq_param *pcq_param) + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) { int ret = XSCALE_RET_SUCCESS; struct xsc_core_device *xdev = c->adapter->xdev; @@ -753,14 +669,14 @@ static int xsc_eth_set_cq(struct xsc_channel *c, err: kvfree(in); xsc_core_info(c->adapter->xdev, "%s: create ch%d cqn%d, eqn=%d, func_id=%d, ret=%d\n", - __func__, c->chl_idx, pcq->xcq.cqn, eqn, xdev->glb_func_id, ret); + __func__, c->chl_idx, pcq->xcq.cqn, eqn, xdev->glb_func_id, ret); return ret; } #endif static int xsc_eth_open_cq(struct xsc_channel *c, - struct xsc_cq *pcq, - struct xsc_cq_param *pcq_param) + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) { int ret = XSCALE_RET_SUCCESS; @@ -776,6 +692,7 @@ static int xsc_eth_open_cq(struct xsc_channel *c, pcq->napi = &c->napi; pcq->channel = c; + pcq->rx = (pcq_param->cq_attr.q_type == XSC_QUEUE_TYPE_RQCQ) ? 1 : 0; return 0; @@ -792,7 +709,7 @@ static int xsc_eth_close_cq(struct xsc_channel *c, struct xsc_cq *pcq) ret = xsc_eth_destroy_cq(xdev, pcq); if (ret) { xsc_core_warn(xdev, "failed to close ch%d cq%d, ret=%d\n", - c->chl_idx, pcq->xcq.cqn, ret); + c->chl_idx, pcq->xcq.cqn, ret); return ret; } @@ -801,33 +718,61 @@ static int xsc_eth_close_cq(struct xsc_channel *c, struct xsc_cq *pcq) return 0; } -int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu) +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status) +{ + int ret = 0; + int insize; + struct xsc_modify_qp_mbox_in *in; + struct xsc_modify_qp_mbox_out out; + + insize = sizeof(struct xsc_modify_qp_mbox_in); + + in = kvzalloc(insize, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*eth: only set status according to cmd,ignore other fields*/ + in->hdr.opcode = cpu_to_be16(status); + in->qpn = cpu_to_be32(qpn); + + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to modify qp%d status=%d, err=%d out.status %u\n", + qpn, status, ret, out.hdr.status); + ret = -ENOEXEC; + } + + kvfree(in); + return ret; +} + +int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu, u16 rx_buf_sz) { - int ret; struct xsc_set_mtu_mbox_in in; struct xsc_set_mtu_mbox_out out; - - if (!xsc_core_is_pf(dev)) - return 0; + int ret; memset(&in, 0, sizeof(struct xsc_set_mtu_mbox_in)); memset(&out, 0, sizeof(struct xsc_set_mtu_mbox_out)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTU); in.mtu = cpu_to_be16(mtu); + in.rx_buf_sz_min = cpu_to_be16(rx_buf_sz); in.mac_port = dev->mac_port; ret = xsc_cmd_exec(dev, &in, sizeof(struct xsc_set_mtu_mbox_in), &out, - sizeof(struct xsc_set_mtu_mbox_out)); + sizeof(struct xsc_set_mtu_mbox_out)); if (ret || out.hdr.status) { - xsc_core_warn(dev, "%s: set hw mtu %u failed!\n", __func__, mtu); + xsc_core_err(dev, "failed to set hw_mtu=%u rx_buf_sz=%u, err=%d, status=%d\n", + mtu, rx_buf_sz, ret, out.hdr.status); ret = -ENOEXEC; } return ret; } -int xsc_eth_get_mac(struct xsc_core_device *dev, u8 index, char *mac) +int xsc_eth_get_mac(struct xsc_core_device *dev, char *mac) { struct xsc_query_eth_mac_mbox_out *out; struct xsc_query_eth_mac_mbox_in in; @@ -840,23 +785,17 @@ int xsc_eth_get_mac(struct xsc_core_device *dev, u8 index, char *mac) memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_ETH_MAC); in.hdr.opmod = cpu_to_be16(0x1); - in.index = xsc_core_is_pf(dev) ? index : (index | BIT(7)); - err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); - if (err) { - xsc_core_warn(dev, "get mac fail: %d\n", err); - goto exit; - } err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); if (err || out->hdr.status) { - xsc_core_warn(dev, "failed! err=%d, status=%d\n", err, out->hdr.status); + xsc_core_warn(dev, "get mac failed! err=%d, out.status=%u\n", err, out->hdr.status); err = -ENOEXEC; goto exit; } memcpy(mac, out->mac, 6); xsc_core_dbg(dev, "get mac %02x:%02x:%02x:%02x:%02x:%02x\n", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); exit: kfree(out); @@ -870,16 +809,16 @@ int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel * int i; for (i = 0; i < c->qp.rq_num; i++) { + c->qp.rq[i].post_wqes(&c->qp.rq[i]); ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.rq[i].rqn, - XSC_CMD_OP_RTR2RTS_QP); + XSC_CMD_OP_RTR2RTS_QP); if (ret) return ret; - c->qp.rq[i].post_wqes(&c->qp.rq[i]); } for (i = 0; i < c->qp.sq_num; i++) { ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.sq[i].sqn, - XSC_CMD_OP_RTR2RTS_QP); + XSC_CMD_OP_RTR2RTS_QP); if (ret) return ret; } @@ -887,7 +826,7 @@ int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel * } int xsc_eth_modify_qps(struct xsc_adapter *adapter, - struct xsc_eth_channels *chls) + struct xsc_eth_channels *chls) { int ret; int i; @@ -903,33 +842,43 @@ int xsc_eth_modify_qps(struct xsc_adapter *adapter, return 0; } -#ifdef XSC_RSS_SUPPORT -static int xsc_eth_open_rss_qp_rq(struct xsc_channel *c, - struct xsc_rq *prq, - struct xsc_rq_param *prq_param) +u32 xsc_rx_get_linear_frag_sz(u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + + return XSC_SKB_FRAG_SZ(byte_count); +} + +bool xsc_rx_is_linear_skb(u32 mtu) +{ + u32 linear_frag_sz = xsc_rx_get_linear_frag_sz(mtu); + + return linear_frag_sz <= PAGE_SIZE; +} + +static int xsc_eth_alloc_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param) { struct xsc_adapter *adapter = c->adapter; - struct page_pool_params pagepool_params = { 0 }; u8 q_log_size = prq_param->rq_attr.q_log_size; - u8 ele_log_size = prq_param->rq_attr.ele_log_size; + struct page_pool_params pagepool_params = { 0 }; u32 pool_size = 1 << q_log_size; - int wq_sz; - int i, f; - struct xsc_wq_param wq_param; + u8 ele_log_size = prq_param->rq_attr.ele_log_size; struct xsc_stats *stats = c->adapter->stats; - struct xsc_channel_stats *channel_stats = &stats->channel_stats[c->chl_idx]; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; int cache_init_sz = 0; - int frag_used_num = adapter->nic_param.rq_frags_size/XSC_RX_FRAG_SZ; + int wq_sz; + int i, f; int ret = 0; prq->stats = &channel_stats->rq; + prq_param->wq.db_numa_node = cpu_to_node(c->cpu); - wq_param.buf_numa_node = dev_to_node(c->adapter->dev); - wq_param.db_numa_node = cpu_to_node(c->cpu); - - ret = xsc_eth_wq_cyc_create(c->adapter->xdev, &wq_param, - q_log_size, ele_log_size, &prq->wqe.wq, - &prq->wq_ctrl); + ret = xsc_eth_wq_cyc_create(c->adapter->xdev, &prq_param->wq, + q_log_size, ele_log_size, &prq->wqe.wq, + &prq->wq_ctrl); if (ret) return ret; @@ -937,9 +886,9 @@ static int xsc_eth_open_rss_qp_rq(struct xsc_channel *c, prq->wqe.info = prq_param->frags_info; prq->wqe.frags = kvzalloc_node(array_size((wq_sz << prq->wqe.info.log_num_frags), - sizeof(*prq->wqe.frags)), - GFP_KERNEL, - cpu_to_node(c->cpu)); + sizeof(*prq->wqe.frags)), + GFP_KERNEL, + cpu_to_node(c->cpu)); if (!prq->wqe.frags) { ret = -ENOMEM; goto err_alloc_frags; @@ -958,7 +907,7 @@ static int xsc_eth_open_rss_qp_rq(struct xsc_channel *c, #endif /* Create a page_pool and register it with rxq */ - pool_size = wq_sz * frag_used_num; + pool_size = wq_sz << prq->wqe.info.log_num_frags; pagepool_params.order = XSC_RX_FRAG_SZ_ORDER; pagepool_params.flags = 0; /* No-internal DMA mapping in page_pool */ pagepool_params.pool_size = pool_size; @@ -972,12 +921,13 @@ static int xsc_eth_open_rss_qp_rq(struct xsc_channel *c, prq->page_pool = NULL; goto err_create_pool; } + if (c->chl_idx == 0) xsc_core_dbg(adapter->xdev, - "page pool: size=%d, cpu=%d, node=%d, cache_size=%d, frags_size=%d, mtu=%d\n", - pool_size, c->cpu, pagepool_params.nid, - cache_init_sz, adapter->nic_param.rq_frags_size, - adapter->nic_param.mtu); + "page pool: size=%d, cpu=%d, pool_numa=%d, cache_size=%d, mtu=%d, wqe_numa=%d\n", + pool_size, c->cpu, pagepool_params.nid, + cache_init_sz, adapter->nic_param.mtu, + prq_param->wq.buf_numa_node); for (i = 0; i < wq_sz; i++) { struct xsc_eth_rx_wqe_cyc *wqe = @@ -989,8 +939,8 @@ static int xsc_eth_open_rss_qp_rq(struct xsc_channel *c, wqe->data[f].seg_len = cpu_to_le32(frag_size); wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); } - /* check if num_frags is not a pow of two */ - if (prq->wqe.info.num_frags < (1 << prq->wqe.info.log_num_frags)) { + + for (; f < prq->wqe.info.frags_max_num; f++) { wqe->data[f].seg_len = 0; wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); wqe->data[f].va = 0; @@ -999,11 +949,12 @@ static int xsc_eth_open_rss_qp_rq(struct xsc_channel *c, prq->post_wqes = xsc_eth_post_rx_wqes; prq->handle_rx_cqe = xsc_eth_handle_rx_cqe; - prq->dealloc_wqes = xsc_eth_free_rx_wqes; - prq->wqe.skb_from_cqe = xsc_skb_from_cqe_nonlinear; + prq->dealloc_wqe = xsc_eth_dealloc_rx_wqe; + prq->wqe.skb_from_cqe = xsc_rx_is_linear_skb(adapter->nic_param.mtu) ? + xsc_skb_from_cqe_linear : + xsc_skb_from_cqe_nonlinear; prq->ix = c->chl_idx; - prq->hw_mtu = XSC_SW2HW_FRAG_SIZE(adapter->nic_param.mtu); - prq->frags_reuse_num = adapter->nic_param.rq_frags_size/XSC_RX_FRAG_SZ; + prq->frags_sz = adapter->nic_param.rq_frags_size; return 0; @@ -1016,12 +967,13 @@ static int xsc_eth_open_rss_qp_rq(struct xsc_channel *c, return ret; } +#ifdef XSC_RSS_SUPPORT static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, - struct xsc_rq_param *prq_param, - struct xsc_eth_channels *chls, - unsigned int num_chl) + struct xsc_rq_param *prq_param, + struct xsc_eth_channels *chls, + unsigned int num_chl) { - int ret = 0, err = XSCALE_RET_SUCCESS; + int ret = 0, err = 0; struct xsc_create_multiqp_mbox_in *in; struct xsc_create_qp_request *req; u8 q_log_size = prq_param->rq_attr.q_log_size; @@ -1039,9 +991,9 @@ static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, for (j = 0; j < c->qp.rq_num; j++) { prq = &c->qp.rq[j]; - ret = xsc_eth_open_rss_qp_rq(c, prq, prq_param); + ret = xsc_eth_alloc_rq(c, prq, prq_param); if (ret) - goto err_open_rss_rq; + goto err_alloc_rqs; hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); /*support different npages number smoothly*/ @@ -1083,8 +1035,7 @@ static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &req->pas[0], hw_npages); n++; - req = (struct xsc_create_qp_request *)(&in->data[0] + - entry_len*n); + req = (struct xsc_create_qp_request *)(&in->data[0] + entry_len * n); } } @@ -1098,31 +1049,32 @@ static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, c = &chls->c[i]; for (j = 0; j < c->qp.rq_num; j++) { prq = &c->qp.rq[j]; - prq->cqp.qpn = prq->rqn = rqn_base + n; + prq->rqn = rqn_base + n; + prq->cqp.qpn = prq->rqn; prq->cqp.event = xsc_eth_qp_event; prq->cqp.eth_queue_type = XSC_RES_RQ; ret = create_resource_common(adapter->xdev, &prq->cqp); if (ret) { - err = XSCALE_RET_ERROR; + err = ret; xsc_core_err(adapter->xdev, - "create resource common error qp:%d errno:%d\n", - prq->rqn, ret); + "create resource common error qp:%d errno:%d\n", + prq->rqn, ret); continue; } n++; } } - if (err != XSCALE_RET_SUCCESS) - return XSCALE_RET_ERROR; + if (err) + return err; adapter->channels.rqn_base = rqn_base; xsc_core_info(adapter->xdev, "%s: rqn_base=%d, ch_num=%d\n", - __func__, rqn_base, num_chl); + __func__, rqn_base, num_chl); return 0; err_create_rss_rqs: i = num_chl; -err_open_rss_rq: +err_alloc_rqs: for (--i; i >= 0; i--) { c = &chls->c[i]; for (j = 0; j < c->qp.rq_num; j++) { @@ -1135,92 +1087,21 @@ static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, #else static int xsc_eth_open_qp_rq(struct xsc_channel *c, - struct xsc_rq *prq, - struct xsc_rq_param *prq_param, - u32 rq_idx) + struct xsc_rq *prq, + struct xsc_rq_param *prq_param, + u32 rq_idx) { - int ret = 0; - struct page_pool_params pp_params = { 0 }; struct xsc_adapter *adapter = c->adapter; struct xsc_core_device *xdev = adapter->xdev; u8 q_log_size = prq_param->rq_attr.q_log_size; - u8 ele_log_size = prq_param->rq_attr.ele_log_size; - u32 pool_size = 1 << q_log_size; - int wq_sz; - int inlen; - int i, f; - int hw_npages; struct xsc_create_qp_mbox_in *in; - struct xsc_wq_param wq_param; - struct xsc_stats *stats = adapter->stats; - struct xsc_channel_stats *channel_stats = &stats->channel_stats[c->chl_idx]; - - prq->stats = &channel_stats->rq; - - wq_param.buf_numa_node = dev_to_node(c->adapter->dev); - wq_param.db_numa_node = cpu_to_node(c->cpu); - - ret = xsc_eth_wq_cyc_create(xdev, &wq_param, - q_log_size, ele_log_size, &prq->wqe.wq, - &prq->wq_ctrl); - if (ret) - return ret; - - wq_sz = xsc_wq_cyc_get_size(&prq->wqe.wq); - - prq->wqe.info = prq_param->frags_info; - - prq->wqe.frags = kvzalloc_node(array_size((wq_sz << prq->wqe.info.log_num_frags), - sizeof(*prq->wqe.frags)), - GFP_KERNEL, - cpu_to_node(c->cpu)); - if (!prq->wqe.frags) { - ret = -ENOMEM; - goto err_rq_wq_destroy; - } + int hw_npages; + int inlen; + int ret = 0; - ret = xsc_eth_init_di_list(prq, wq_sz, c->cpu); + ret = xsc_eth_alloc_rq(c, prq, prq_param); if (ret) - goto err_frags_destroy; - - /* Create a page_pool and register it with rxq */ - pp_params.order = XSC_RX_FRAG_SZ_ORDER; - pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ - pp_params.pool_size = pool_size; - pp_params.nid = cpu_to_node(c->cpu); - pp_params.dev = c->adapter->dev; - pp_params.dma_dir = prq->buff.map_dir; - - /* page_pool can be used even when there is no rq->xdp_prog, - * given page_pool does not handle DMA mapping there is no - * required state to clear. And page_pool gracefully handle - * elevated refcnt. - */ - prq->page_pool = page_pool_create(&pp_params); - if (IS_ERR(prq->page_pool)) { - ret = PTR_ERR(prq->page_pool); - prq->page_pool = NULL; - goto err_di_destroy; - } - - for (i = 0; i < wq_sz; i++) { - - struct xsc_eth_rx_wqe_cyc *wqe = - xsc_wq_cyc_get_wqe(&prq->wqe.wq, i); - - for (f = 0; f < prq->wqe.info.num_frags; f++) { - u32 frag_size = prq->wqe.info.arr[f].frag_size; - - wqe->data[f].seg_len = cpu_to_le32(frag_size); - wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); - } - /* check if num_frags is not a pow of two */ - if (prq->wqe.info.num_frags < (1 << prq->wqe.info.log_num_frags)) { - wqe->data[f].seg_len = 0; - wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); - wqe->data[f].va = 0; - } - } + goto out; hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); inlen = sizeof(struct xsc_create_qp_mbox_in) + @@ -1229,7 +1110,7 @@ static int xsc_eth_open_qp_rq(struct xsc_channel *c, in = kvzalloc(inlen, GFP_KERNEL); if (!in) { ret = -ENOMEM; - goto err_rq_pagepool_destroy; + goto err_alloc_rq; } in->req.input_qpn = cpu_to_be16(XSC_QPN_RQN_STUB); /*no use for eth*/ @@ -1244,7 +1125,7 @@ static int xsc_eth_open_qp_rq(struct xsc_channel *c, ret = xsc_eth_create_qp_rq(xdev, prq, in, inlen); if (ret) - goto err_rq_in_destroy; + goto err_create_rq; prq->cqp.qpn = prq->rqn; prq->cqp.event = xsc_eth_qp_event; @@ -1252,47 +1133,25 @@ static int xsc_eth_open_qp_rq(struct xsc_channel *c, ret = create_resource_common(xdev, &prq->cqp); if (ret) { - xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", - __func__, prq->rqn, ret); - goto err_rq_destroy; + xsc_core_err(xdev, "%s:failed to init rqn%d, err=%d\n", + __func__, prq->rqn, ret); + goto err_destroy_rq; } - prq->post_wqes = xsc_eth_post_rx_wqes; - prq->handle_rx_cqe = xsc_eth_handle_rx_cqe; - prq->dealloc_wqes = xsc_eth_free_rx_wqes; - prq->wqe.skb_from_cqe = xsc_skb_from_cqe_nonlinear; - - prq->ix = c->chl_idx; - - prq->hw_mtu = XSC_ETH_HW_MTU_RECV; - - xsc_core_info(c->adapter->xdev, "%s ok\n", __func__); + xsc_core_info(c->adapter->xdev, "%s: rqn=%d ch_num=%d\n", + __func__, prq->rqn, c->chl_idx); kvfree(in); - return XSCALE_RET_SUCCESS; + return 0; -err_rq_destroy: +err_destroy_rq: xsc_eth_destroy_qp_rq(xdev, prq); - -err_rq_pagepool_destroy: - if (prq->page_pool) - page_pool_destroy(prq->page_pool); - -err_rq_in_destroy: +err_create_rq: kvfree(in); - -err_di_destroy: - if (prq->wqe.di) - kvfree(prq->wqe.di); - -err_frags_destroy: - if (prq->wqe.frags) - kvfree(prq->wqe.frags); - -err_rq_wq_destroy: - xsc_eth_wq_destroy(&prq->wq_ctrl); - +err_alloc_rq: + xsc_free_qp_rq(prq); +out: return ret; } #endif @@ -1308,37 +1167,37 @@ static int xsc_eth_close_qp_rq(struct xsc_channel *c, struct xsc_rq *prq) if (ret) return ret; + xsc_eth_free_rx_wqe(prq); xsc_free_qp_rq(prq); return 0; } static int xsc_eth_open_qp_sq(struct xsc_channel *c, - struct xsc_sq *psq, - struct xsc_sq_param *psq_param, - u32 sq_idx) + struct xsc_sq *psq, + struct xsc_sq_param *psq_param, + u32 sq_idx) { - int ret; struct xsc_adapter *adapter = c->adapter; struct xsc_core_device *xdev = adapter->xdev; u8 q_log_size = psq_param->sq_attr.q_log_size; u8 ele_log_size = psq_param->sq_attr.ele_log_size; + struct xsc_stats *stats = adapter->stats; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; struct xsc_create_qp_mbox_in *in; struct xsc_modify_raw_qp_mbox_in *modify_in; - int inlen; int hw_npages; - struct xsc_wq_param wq_param; - struct xsc_stats *stats = adapter->stats; - struct xsc_channel_stats *channel_stats = &stats->channel_stats[c->chl_idx]; + int inlen; + int ret; + u8 pf_id; psq->stats = &channel_stats->sq[sq_idx]; + psq_param->wq.db_numa_node = cpu_to_node(c->cpu); - wq_param.buf_numa_node = 0; - wq_param.db_numa_node = 0; - - ret = xsc_eth_wq_cyc_create(xdev, &wq_param, - q_log_size, ele_log_size, &psq->wq, - &psq->wq_ctrl); + ret = xsc_eth_wq_cyc_create(xdev, &psq_param->wq, + q_log_size, ele_log_size, &psq->wq, + &psq->wq_ctrl); if (ret) return ret; @@ -1360,7 +1219,7 @@ static int xsc_eth_open_qp_sq(struct xsc_channel *c, in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); xsc_fill_page_frag_array(&psq->wq_ctrl.buf, - &in->req.pas[0], hw_npages); + &in->req.pas[0], hw_npages); ret = xsc_eth_create_qp_sq(xdev, psq, in, inlen); if (ret) @@ -1373,25 +1232,22 @@ static int xsc_eth_open_qp_sq(struct xsc_channel *c, ret = create_resource_common(xdev, &psq->cqp); if (ret) { xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", - __func__, psq->sqn, ret); + __func__, psq->sqn, ret); goto err_sq_destroy; } psq->channel = c; psq->ch_ix = c->chl_idx; - psq->txq_ix = psq->ch_ix * c->num_tc + sq_idx; + psq->txq_ix = psq->ch_ix + sq_idx * adapter->channels.num_chl; /*need to querify from hardware*/ psq->hw_mtu = XSC_ETH_HW_MTU_SEND; psq->stop_room = 1; - ret = xsc_eth_alloc_qp_sq_db(psq, cpu_to_node(c->cpu)); + ret = xsc_eth_alloc_qp_sq_db(psq, psq_param->wq.db_numa_node); if (ret) goto err_sq_common_destroy; - xsc_core_info(c->adapter->xdev, "%s ok, ch%d sqn=%d\n", - __func__, c->chl_idx, psq->sqn); - inlen = sizeof(struct xsc_modify_raw_qp_mbox_in); modify_in = kvzalloc(inlen, GFP_KERNEL); if (!modify_in) { @@ -1399,8 +1255,13 @@ static int xsc_eth_open_qp_sq(struct xsc_channel *c, goto err_sq_common_destroy; } - modify_in->req.qp_out_port = XSC_PF_VF_GET_PF_ID(xdev->glb_func_id); - modify_in->pcie_no = xsc_get_pcie_no(); + if (funcid_to_pf_index(&xdev->caps, xdev->glb_func_id, &pf_id)) { + modify_in->req.qp_out_port = pf_id; + } else { + ret = -EINVAL; + goto err_sq_modify_in_destroy; + } + modify_in->pcie_no = g_xsc_pcie_no; modify_in->req.qpn = cpu_to_be16((u16)(psq->sqn)); modify_in->req.func_id = cpu_to_be16(xdev->glb_func_id); modify_in->req.dma_direct = DMA_DIR_TO_MAC; @@ -1411,6 +1272,11 @@ static int xsc_eth_open_qp_sq(struct xsc_channel *c, kvfree(modify_in); kvfree(in); + + xsc_core_info(c->adapter->xdev, "%s ok, ch%d_sq%d=%d, db_numa=%d, buf_numa=%d\n", + __func__, c->chl_idx, sq_idx, psq->sqn, + psq_param->wq.db_numa_node, psq_param->wq.buf_numa_node); + return 0; err_sq_modify_in_destroy: @@ -1428,7 +1294,6 @@ static int xsc_eth_open_qp_sq(struct xsc_channel *c, err_sq_wq_destroy: xsc_eth_wq_destroy(&psq->wq_ctrl); return ret; - } static int xsc_eth_close_qp_sq(struct xsc_channel *c, struct xsc_sq *psq) @@ -1449,9 +1314,9 @@ static int xsc_eth_close_qp_sq(struct xsc_channel *c, struct xsc_sq *psq) } int xsc_eth_open_channel(struct xsc_adapter *adapter, - int idx, - struct xsc_channel *c, - struct xsc_channel_param *chl_param) + int idx, + struct xsc_channel *c, + struct xsc_channel_param *chl_param) { int ret = 0; struct net_device *netdev = adapter->netdev; @@ -1459,7 +1324,6 @@ int xsc_eth_open_channel(struct xsc_adapter *adapter, struct xsc_core_device *xdev = adapter->xdev; int i, j, eqn, irqn; struct cpumask *aff; - c->adapter = adapter; c->netdev = adapter->netdev; c->chl_idx = idx; @@ -1479,8 +1343,7 @@ int xsc_eth_open_channel(struct xsc_adapter *adapter, c->cpu = cpumask_first(aff); } - if ((c->qp.sq_num > XSC_MAX_NUM_TC) || - (c->qp.rq_num > XSC_MAX_NUM_TC)) { + if (c->qp.sq_num > XSC_MAX_NUM_TC || c->qp.rq_num > XSC_MAX_NUM_TC) { ret = -EINVAL; goto err; } @@ -1543,42 +1406,122 @@ int xsc_eth_open_channel(struct xsc_adapter *adapter, xsc_eth_close_cq(c, &c->qp.rq[j].cq); err: xsc_core_warn(adapter->xdev, - "failed to open channel: ch%d, sq_num=%d, rq_num=%d, err=%d\n", - idx, c->qp.sq_num, c->qp.rq_num, ret); + "failed to open channel: ch%d, sq_num=%d, rq_num=%d, err=%d\n", + idx, c->qp.sq_num, c->qp.rq_num, ret); return ret; } -static void xsc_build_rq_frags_info(struct xsc_queue_attr *attr, - struct xsc_rq_frags_info *frags_info) +static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) { - int i; - int ds_num = attr->ele_size/XSC_RECV_WQE_DS; + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + int frag_stride; + int i = 0; + + if (xsc_rx_is_linear_skb(mtu)) { + frag_stride = xsc_rx_get_linear_frag_sz(mtu); + frag_stride = roundup_pow_of_two(frag_stride); + + frags_info->arr[0].frag_size = byte_count; + frags_info->arr[0].frag_stride = frag_stride; + frags_info->num_frags = 1; + frags_info->wqe_bulk = PAGE_SIZE / frag_stride; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + goto out; + } + + if (byte_count <= DEFAULT_FRAG_SIZE) { + frags_info->arr[0].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[0].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->num_frags = 1; + } else if (byte_count <= PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 1; + } else if (byte_count <= (PAGE_SIZE_4K + DEFAULT_FRAG_SIZE)) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[0].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->arr[1].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[1].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->arr[2].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[2].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->num_frags = 3; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else if (byte_count <= 2 * PAGE_SIZE_4K) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else { + if (PAGE_SIZE < 4 * PAGE_SIZE_4K) { + frags_info->num_frags = roundup(byte_count, PAGE_SIZE_4K) / PAGE_SIZE_4K; + for (i = 0; i < frags_info->num_frags; i++) { + frags_info->arr[i].frag_size = PAGE_SIZE_4K; + frags_info->arr[i].frag_stride = PAGE_SIZE_4K; + } + } else { + frags_info->arr[0].frag_size = 4 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 4 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } - for (i = 0; i < ds_num; i++) { - frags_info->arr[i].frag_size = XSC_RX_FRAG_SZ; - frags_info->arr[i].frag_stride = - roundup_pow_of_two(frags_info->arr[i].frag_size); + if (PAGE_SIZE <= PAGE_SIZE_4K) { + frags_info->wqe_bulk_min = 4; + frags_info->wqe_bulk = max_t(u8, frags_info->wqe_bulk_min, 8); + } else { + frags_info->wqe_bulk = + PAGE_SIZE / (frags_info->num_frags * frags_info->arr[0].frag_size); + frags_info->wqe_bulk_min = frags_info->wqe_bulk; } - frags_info->num_frags = ds_num; +out: frags_info->log_num_frags = order_base_2(frags_info->num_frags); - frags_info->wqe_bulk = 1 + (frags_info->num_frags % 2); - frags_info->wqe_bulk = max_t(u8, frags_info->wqe_bulk, 8); + + return frags_info->num_frags * frags_info->arr[0].frag_size; +} + +static void xsc_build_rq_frags_info(struct xsc_queue_attr *attr, + struct xsc_rq_frags_info *frags_info, + struct xsc_eth_params *params) +{ + params->rq_frags_size = xsc_get_rq_frag_info(frags_info, params->mtu); + frags_info->frags_max_num = attr->ele_size / XSC_RECV_WQE_DS; } static void xsc_eth_build_channel_param(struct xsc_adapter *adapter, struct xsc_channel_param *chl_param) { xsc_eth_build_queue_param(adapter, &chl_param->rqcq_param.cq_attr, - XSC_QUEUE_TYPE_RQCQ); + XSC_QUEUE_TYPE_RQCQ); + chl_param->rqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + xsc_eth_build_queue_param(adapter, &chl_param->sqcq_param.cq_attr, - XSC_QUEUE_TYPE_SQCQ); + XSC_QUEUE_TYPE_SQCQ); + chl_param->sqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sq_param.sq_attr, + XSC_QUEUE_TYPE_SQ); + chl_param->sq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + xsc_eth_build_queue_param(adapter, &chl_param->rq_param.rq_attr, - XSC_QUEUE_TYPE_RQ); + XSC_QUEUE_TYPE_RQ); + chl_param->rq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + xsc_build_rq_frags_info(&chl_param->rq_param.rq_attr, - &chl_param->rq_param.frags_info); - xsc_eth_build_queue_param(adapter, &chl_param->sq_param.sq_attr, - XSC_QUEUE_TYPE_SQ); + &chl_param->rq_param.frags_info, + &adapter->nic_param); } int xsc_eth_open_channels(struct xsc_adapter *adapter) @@ -1592,13 +1535,13 @@ int xsc_eth_open_channels(struct xsc_adapter *adapter) chls->num_chl = adapter->nic_param.num_channels; chls->c = kcalloc_node(chls->num_chl, sizeof(struct xsc_channel), - GFP_KERNEL, xdev->priv.numa_node); + GFP_KERNEL, xdev->priv.numa_node); if (!chls->c) { ret = -ENOMEM; goto err; } - chl_param = kvzalloc(sizeof(struct xsc_channel_param), GFP_KERNEL); + chl_param = kvzalloc(sizeof(*chl_param), GFP_KERNEL); if (!chl_param) { ret = -ENOMEM; goto err_free_ch; @@ -1625,6 +1568,9 @@ int xsc_eth_open_channels(struct xsc_adapter *adapter) for (i = 0; i < chls->num_chl; i++) napi_enable(&chls->c[i].napi); + /* flush cache to memory before interrupt and napi_poll running */ + smp_wmb(); + ret = xsc_eth_modify_qps(adapter, chls); if (ret) goto err_modify_qps; @@ -1645,7 +1591,7 @@ int xsc_eth_open_channels(struct xsc_adapter *adapter) err: chls->num_chl = 0; xsc_core_warn(adapter->xdev, "failed to open %d channels, err=%d\n", - chls->num_chl, ret); + chls->num_chl, ret); return ret; } @@ -1736,6 +1682,8 @@ static void xsc_eth_build_tx2sq_maps(struct xsc_adapter *adapter) for (tc = 0; tc < c->num_tc; tc++) { psq = &c->qp.sq[tc]; adapter->txq2sq[psq->txq_ix] = psq; + adapter->channel_tc2realtxq[i][tc] = + i + tc * adapter->channels.num_chl; } } } @@ -1746,6 +1694,7 @@ void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) struct net_device *netdev = adapter->netdev; num_txqs = adapter->channels.num_chl * adapter->nic_param.num_tc; + xsc_netdev_set_tcs(adapter, adapter->channels.num_chl, adapter->nic_param.num_tc); netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, adapter->channels.num_chl); @@ -1797,6 +1746,7 @@ static void xsc_eth_close_channels(struct xsc_adapter *adapter) int i; struct xsc_channel *c = NULL; + xsc_core_dbg(adapter->xdev, "start to close channel\n"); for (i = 0; i < adapter->channels.num_chl; i++) { c = &adapter->channels.c[i]; xsc_eth_close_channel(c, true); @@ -1807,28 +1757,9 @@ static void xsc_eth_close_channels(struct xsc_adapter *adapter) } static void xsc_eth_set_port_status(struct xsc_core_device *xdev, - enum xsc_port_status status) -{ - -} - -#ifdef NEED_WAIT_RDMA_READY -static void xsc_wait_rdma_ready(struct xsc_core_device *xdev) + enum xsc_port_status status) { - int count = 10; - int i = 0; - - do { - if (xdev->rdma_ready == true) - break; - - msleep(100); - } while (i++ < count); - - xsc_core_dbg(xdev, "waiting for %d ms, rdma is %s\n", i * 100, - xdev->rdma_ready ? "ready" : "not ready"); } -#endif int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter) { @@ -1843,72 +1774,93 @@ int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter) err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); if (err || out.status) { - xsc_core_err(adapter->xdev, "%s set led to %d status res %d err %d\n", - __func__, id, out.status, err); + xsc_core_err(adapter->xdev, "failed to set led to %d, err=%d, status=%d\n", + id, err, out.status); return -1; } - return XSCALE_RET_SUCCESS; + return 0; } -#ifdef NEED_AGILEX_TRAINING -int xsc_eth_get_linkinfo(struct xsc_event_linkstatus_resp *plinkinfo, struct xsc_adapter *adapter) +bool xsc_eth_get_link_status(struct xsc_adapter *adapter) { - int err; - struct xsc_event_query_linkstatus_mbox_in in; struct xsc_event_query_linkstatus_mbox_out out; + int err; - /*query linkstatus cmd*/ in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_PHYPORT_STATE); err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return XSCALE_RET_ERROR; + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get link status, err=%d, status=%d\n", + err, out.hdr.status); + return false; + } + + xsc_core_dbg(adapter->xdev, "link_status=%d\n", out.ctx.linkstatus); + + return out.ctx.linkstatus ? true : false; +} + +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo_resp *plinkinfo) +{ + struct xsc_event_query_linkinfo_mbox_in in; + struct xsc_event_query_linkinfo_mbox_out out; + int i, err; - /*0:down, 1:up*/ - xsc_core_dbg(adapter->xdev, "status = %d, speed mode = %d\n", - out.ctx.linkstatus, out.ctx.linkspeed); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_LINK_INFO); - plinkinfo->linkspeed = out.ctx.linkspeed; - plinkinfo->linkstatus = out.ctx.linkstatus; + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get link info, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + memcpy(plinkinfo, &out.ctx, sizeof(*plinkinfo)); + + plinkinfo->linkspeed = be32_to_cpu(plinkinfo->linkspeed); + plinkinfo->supported = be64_to_cpu(plinkinfo->supported); + plinkinfo->advertising = be64_to_cpu(plinkinfo->advertising); + for (i = 0; i < ARRAY_SIZE(plinkinfo->supported_speed); i++) { + plinkinfo->supported_speed[i] = be64_to_cpu(plinkinfo->supported_speed[i]); + plinkinfo->advertising_speed[i] = be64_to_cpu(plinkinfo->advertising_speed[i]); + } - return XSCALE_RET_SUCCESS; + return 0; } -bool xsc_eth_get_phyport_state(struct xsc_adapter *adapter) +int xsc_get_link_speed(struct xsc_core_device *dev) { - struct xsc_event_linkstatus_resp linkinfo; + struct xsc_adapter *adapter = netdev_priv(dev->netdev); + struct xsc_event_linkinfo_resp linkinfo; - if (xsc_eth_get_linkinfo(&linkinfo, adapter)) { - xsc_core_err(adapter->xdev, "%s fail to get linkinfo\n", __func__); - return XSCALE_ETH_PHYPORT_DOWN; + if (xsc_eth_get_link_info(adapter, &linkinfo)) { + xsc_core_err(adapter->xdev, "fail to get linkspeed, return 25G\n"); + return XSC_CMD_RESP_LINKSPEED_MODE_25G; } - return linkinfo.linkstatus; + return linkinfo.linkspeed; } +EXPORT_SYMBOL(xsc_get_link_speed); #if defined(MSIX_SUPPORT) -int xsc_eth_change_linkstatus(struct xsc_adapter *adapter) +int xsc_eth_change_link_status(struct xsc_adapter *adapter) { - struct xsc_event_linkstatus_resp linkinfo; + bool link_up; - if (xsc_eth_get_linkinfo(&linkinfo, adapter)) { - xsc_core_err(adapter->xdev, "%s fail to get linkinfo\n", __func__); - return XSCALE_RET_ERROR; - } + link_up = xsc_eth_get_link_status(adapter); - /*save get_linkstatus*/ - if ((linkinfo.linkstatus == XSCALE_ETH_PHYPORT_UP) && !netif_carrier_ok(adapter->netdev)) { + if (link_up && !netif_carrier_ok(adapter->netdev)) { netdev_info(adapter->netdev, "Link up\n"); netif_carrier_on(adapter->netdev); - } else if ((linkinfo.linkstatus != XSCALE_ETH_PHYPORT_UP) && - netif_carrier_ok(adapter->netdev)) { + } else if (!link_up && netif_carrier_ok(adapter->netdev)) { netdev_info(adapter->netdev, "Link down\n"); netif_carrier_off(adapter->netdev); } - return XSCALE_RET_SUCCESS; + return 0; } static void xsc_eth_event_work(struct work_struct *work) @@ -1925,14 +1877,15 @@ static void xsc_eth_event_work(struct work_struct *work) in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EVENT_TYPE); err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); - if (err) { - xsc_core_err(adapter->xdev, "failed to xsc_event_work, err=%d\n", err); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to query event type, err=%d, stats=%d\n", + err, out.hdr.status); goto failed; } switch (out.ctx.resp_cmd_type) { case XSC_CMD_EVENT_RESP_CHANGE_LINK: - err = xsc_eth_change_linkstatus(adapter); + err = xsc_eth_change_link_status(adapter); if (err) { xsc_core_err(adapter->xdev, "failed to change linkstatus, err=%d\n", err); goto failed; @@ -1942,7 +1895,7 @@ static void xsc_eth_event_work(struct work_struct *work) break; default: xsc_core_info(adapter->xdev, "unknown event cmdtype=%04x\n", - out.ctx.resp_cmd_type); + out.ctx.resp_cmd_type); break; } @@ -1957,7 +1910,6 @@ void xsc_eth_event_handler(void *arg) queue_work(adapter->workq, &adapter->event_work); } #endif -#endif int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) { @@ -1971,7 +1923,7 @@ int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_NIC_HCA); in.nic.info.pf = xdev->pf; - in.nic.info.pcie = xdev->pcie; + in.nic.info.pcie = g_xsc_pcie_no; in.nic.info.mac_port = xdev->mac_port; in.nic.info.pcie_port = xdev->pcie_port; in.nic.info.pf_id = xdev->pf_id; @@ -1982,10 +1934,6 @@ int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) in.nic.info.mac_logic_port = cpu_to_be16(xdev->mac_logic_port); in.nic.info.gsi_qpn = cpu_to_be16(xdev->gsi_qpn); - if (!xsc_core_is_pf(xdev) && !xsc_vf_pp_init_enable(xdev)) - caps_mask = caps |= (BIT(XSC_TBM_CAP_IPAT_BYPASS) | - BIT(XSC_TBM_CAP_PCT_BYPASS)) | (BIT(XSC_TBM_CAP_BC_BYPASS)); - #ifdef XSC_RSS_SUPPORT in.rss.rss_en = 1; in.rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - @@ -2005,6 +1953,10 @@ int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) caps |= BIT(XSC_TBM_CAP_HASH_PPH); caps_mask |= BIT(XSC_TBM_CAP_HASH_PPH); + if (xsc_get_pp_bypass_res(adapter->xdev)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + memcpy(in.nic.mac_addr, netdev->dev_addr, ETH_ALEN); in.nic.caps = cpu_to_be16(caps); @@ -2012,12 +1964,11 @@ int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); if (err || out.hdr.status) { - xsc_core_warn(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); return -ENOEXEC; } - xdev->bomt_idx = be16_to_cpu(out.res.bomt_idx); - xsc_core_info(xdev, "rss_qp_base=%d bomt_idx=%d\n", in.rss.rqn_base, xdev->bomt_idx); + xsc_core_info(xdev, "rss_qp_base=%d\n", in.rss.rqn_base); return 0; } @@ -2027,12 +1978,12 @@ int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) struct xsc_core_device *xdev = adapter->xdev; struct xsc_cmd_disable_nic_hca_mbox_in in = {}; struct xsc_cmd_disable_nic_hca_mbox_out out = {}; - u16 caps = 0; int err; + u16 caps = 0; in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_NIC_HCA); in.nic.info.pf = xdev->pf; - in.nic.info.pcie = xdev->pcie; + in.nic.info.pcie = g_xsc_pcie_no; in.nic.info.mac_port = xdev->mac_port; in.nic.info.pcie_port = xdev->pcie_port; in.nic.info.pf_id = xdev->pf_id; @@ -2042,16 +1993,13 @@ int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) in.nic.info.pf_logic_port = cpu_to_be16(xdev->pf_logic_port); in.nic.info.mac_logic_port = cpu_to_be16(xdev->mac_logic_port); - in.bc.bomt_idx = cpu_to_be16(xdev->bomt_idx); + if (xsc_get_pp_bypass_res(adapter->xdev)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); - if (!xsc_core_is_pf(xdev) && !xsc_vf_pp_init_enable(xdev)) - caps |= BIT(XSC_TBM_CAP_IPAT_BYPASS) | - BIT(XSC_TBM_CAP_PCT_BYPASS) | (BIT(XSC_TBM_CAP_BC_BYPASS)); in.nic.caps = cpu_to_be16(caps); - err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); if (err || out.hdr.status) { - xsc_core_warn(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); return -ENOEXEC; } @@ -2106,7 +2054,8 @@ void xsc_eth_rss_params_change(struct xsc_adapter *adapter, u32 change, void *mo if (rss_caps_mask) { in->rss.caps_mask = rss_caps_mask; in->rss.rss_en = 1; - in->nic.caps = in->nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_RSS)); + in->nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_RSS)); + in->nic.caps = in->nic.caps_mask; } } @@ -2119,7 +2068,7 @@ int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 flags) in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); in.nic.info.pf = xdev->pf; - in.nic.info.pcie = xdev->pcie; + in.nic.info.pcie = g_xsc_pcie_no; in.nic.info.mac_port = xdev->mac_port; in.nic.info.pcie_port = xdev->pcie_port; in.nic.info.pf_id = xdev->pf_id; @@ -2133,13 +2082,30 @@ int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 flags) err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); if (err || out.hdr.status) { - xsc_core_warn(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + xsc_core_err(xdev, "failed!! err=%d, status=%u\n", err, out.hdr.status); return -ENOEXEC; } return 0; } +#ifdef MSIX_SUPPORT +static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, + struct xsc_eth_params *params) +{ + struct xsc_core_device *xdev = priv->xdev; + int num_comp_vectors, irq; + + num_comp_vectors = priv->nic_param.comp_vectors; + cpumask_clear(xdev->xps_cpumask); + + for (irq = 0; irq < num_comp_vectors; irq++) { + mask_cpu_by_node(xdev->priv.numa_node, xdev->xps_cpumask); + netif_set_xps_queue(priv->netdev, xdev->xps_cpumask, irq); + } +} +#endif + int xsc_eth_open(struct net_device *netdev) { struct xsc_adapter *adapter = netdev_priv(netdev); @@ -2150,7 +2116,7 @@ int xsc_eth_open(struct net_device *netdev) mutex_lock(&adapter->state_lock); if (adapter->status == XSCALE_ETH_DRIVER_OK) { xsc_core_warn(adapter->xdev, "unnormal ndo_open when status=%d\n", - adapter->status); + adapter->status); goto ret; } @@ -2168,50 +2134,43 @@ int xsc_eth_open(struct net_device *netdev) if (ret) goto ret; -#ifdef NEED_WAIT_RDMA_READY - xsc_wait_rdma_ready(adapter->xdev); -#endif - #ifdef NEED_CREATE_RX_THREAD ret = xsc_eth_rx_thread_create(adapter); if (ret) { - xsc_core_warn(adapter->xdev, "xsc_eth_rx_thread_create failed, err=%d\n", - ret); + xsc_core_warn(adapter->xdev, "xsc_eth_rx_thread_create failed, err=%d\n", ret); goto ret; } #endif -#ifdef NEED_AGILEX_TRAINING #if defined(MSIX_SUPPORT) if (xsc_core_is_pf(adapter->xdev)) { /*INIT_WORK*/ INIT_WORK(&adapter->event_work, xsc_eth_event_work); adapter->xdev->event_handler = xsc_eth_event_handler; - if (xsc_eth_get_phyport_state(adapter)) { + if (xsc_eth_get_link_status(adapter)) { netdev_info(netdev, "Link up\n"); netif_carrier_on(adapter->netdev); - } else + } else { netdev_info(netdev, "Link down\n"); - } -#else /*no msix*/ - if (xsc_core_is_pf(adapter->xdev)) { - timer_setup(&adapter->link_timer, xsc_eth_update_carrier_timer, 0); - mod_timer(&adapter->link_timer, jiffies + 2*HZ); - } -#endif - if (!xsc_core_is_pf(adapter->xdev)) + } + } else { netif_carrier_on(netdev); + } #else netif_carrier_on(netdev); #endif adapter->status = XSCALE_ETH_DRIVER_OK; +#ifdef MSIX_SUPPORT + xsc_set_default_xps_cpumasks(adapter, &adapter->nic_param); +#endif + ret: mutex_unlock(&adapter->state_lock); xsc_core_info(adapter->xdev, "%s: return %s, ret=%d\n", __func__, - ret?"fail":"ok", ret); + ret ? "fail" : "ok", ret); if (ret) return XSCALE_RET_ERROR; else @@ -2253,35 +2212,29 @@ int xsc_eth_close(struct net_device *netdev) ret: mutex_unlock(&adapter->state_lock); xsc_core_info(adapter->xdev, "%s: return %s, ret=%d\n", __func__, - ret?"fail":"ok", ret); + ret ? "fail" : "ok", ret); return ret; } -static void xsc_eth_set_rx_mode(struct net_device *netdev) -{ - struct xsc_adapter *adapter = netdev_priv(netdev); - - queue_work(adapter->workq, &adapter->set_rx_mode_work); -} - -void xsc_eth_set_mac_hw(unsigned char *dev_addr, unsigned char addr_len) -{ - /****TBD****/ -} - static int xsc_eth_set_mac(struct net_device *netdev, void *addr) { struct xsc_adapter *adapter = netdev_priv(netdev); struct sockaddr *saddr = addr; + struct xsc_core_device *xdev = adapter->xdev; + int ret; + u16 vport = xdev->pf ? 0 : (xdev->vf_id + 1); if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); - xsc_eth_set_mac_hw(netdev->dev_addr, netdev->addr_len); + ret = xsc_modify_nic_vport_mac_address(xdev, vport, saddr->sa_data, false); + if (ret) + xsc_core_err(adapter->xdev, "%s: xsc set mac addr failed\n", __func__); - queue_work(adapter->workq, &adapter->set_rx_mode_work); + netif_addr_lock_bh(netdev); + ether_addr_copy(netdev->dev_addr, saddr->sa_data); + netif_addr_unlock_bh(netdev); return 0; } @@ -2310,7 +2263,9 @@ static int xsc_update_netdev_queues(struct xsc_adapter *priv) int num_txqs, num_rxqs, nch, ntc; int old_num_txqs, old_ntc; int err; +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES bool disabling; +#endif old_num_txqs = netdev->real_num_tx_queues; old_ntc = netdev->num_tc ? : 1; @@ -2320,26 +2275,31 @@ static int xsc_update_netdev_queues(struct xsc_adapter *priv) num_txqs = nch * ntc; num_rxqs = nch;// * priv->profile->rq_groups; +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES disabling = num_txqs < netdev->real_num_tx_queues; +#endif xsc_netdev_set_tcs(priv, nch, ntc); err = netif_set_real_num_tx_queues(netdev, num_txqs); if (err) { - netdev_warn(netdev, "netif_set_real_num_tx_queues failed, txqs=%d->%d, tc=%d->%d, err=%d\n", - old_num_txqs, num_txqs, old_ntc, ntc, err); + netdev_warn(netdev, + "netif_set_real_num_tx_queues failed, txqs=%d->%d, tc=%d->%d, err=%d\n", + old_num_txqs, num_txqs, old_ntc, ntc, err); goto err_tcs; } err = netif_set_real_num_rx_queues(netdev, num_rxqs); if (err) { netdev_warn(netdev, "netif_set_real_num_rx_queues failed, rxqs=%d, err=%d\n", - num_rxqs, err); + num_rxqs, err); goto err_txqs; } +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES if (disabling) synchronize_net(); +#endif return 0; @@ -2356,23 +2316,8 @@ static int xsc_update_netdev_queues(struct xsc_adapter *priv) return err; } -static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, - struct xsc_eth_params *params) -{ - struct xsc_core_device *xdev = priv->xdev; - int num_comp_vectors, irq; - - num_comp_vectors = priv->nic_param.comp_vectors; - cpumask_clear(xdev->xps_cpumask); - - for (irq = 0; irq < num_comp_vectors; irq++) { - mask_cpu_by_node(xdev->priv.numa_node, xdev->xps_cpumask); - netif_set_xps_queue(priv->netdev, xdev->xps_cpumask, irq); - } -} - void xsc_build_default_indir_rqt(u32 *indirection_rqt, int len, - int num_channels) + int num_channels) { int i; @@ -2392,19 +2337,19 @@ int xsc_eth_num_channels_changed(struct xsc_adapter *priv) if (!netif_is_rxfh_configured(priv->netdev)) xsc_build_default_indir_rqt(priv->rss_params.indirection_rqt, - XSC_INDIR_RQT_SIZE, count); + XSC_INDIR_RQT_SIZE, count); return 0; err: netdev_err(netdev, "%s: failed to change rss rxq number %d, err=%d\n", - __func__, count, err); + __func__, count, err); return err; } int xsc_safe_switch_channels(struct xsc_adapter *adapter, - xsc_eth_fp_preactivate preactivate, - xsc_eth_fp_postactivate postactivate) + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate) { struct net_device *netdev = adapter->netdev; int carrier_ok; @@ -2464,26 +2409,20 @@ int xsc_safe_switch_channels(struct xsc_adapter *adapter, if (carrier_ok) netif_carrier_on(netdev); xsc_core_dbg(adapter->xdev, "channels=%d, mtu=%d, err=%d\n", - adapter->nic_param.num_channels, - adapter->nic_param.mtu, ret); + adapter->nic_param.num_channels, + adapter->nic_param.mtu, ret); return ret; } int xsc_eth_nic_mtu_changed(struct xsc_adapter *priv) { u32 new_mtu = priv->nic_param.mtu; - u32 frags_size = XSC_SW2HW_FRAG_SIZE(new_mtu); - int ret = 0; - - ret = xsc_eth_set_hw_mtu(priv->xdev, XSC_SW2HW_MTU(new_mtu)); - if (ret) - return ret; + int ret; - frags_size = roundup(frags_size, XSC_RX_FRAG_SZ); - if (frags_size != priv->nic_param.rq_frags_size) - priv->nic_param.rq_frags_size = frags_size; + ret = xsc_eth_set_hw_mtu(priv->xdev, XSC_SW2HW_MTU(new_mtu), + XSC_SW2HW_RX_PKT_LEN(new_mtu)); - return 0; + return ret; } static int xsc_eth_change_mtu(struct net_device *netdev, int new_mtu) @@ -2491,13 +2430,22 @@ static int xsc_eth_change_mtu(struct net_device *netdev, int new_mtu) struct xsc_adapter *adapter = netdev_priv(netdev); int old_mtu = netdev->mtu; int ret = 0; + int max_buf_len = 0; if (new_mtu > netdev->max_mtu || new_mtu < netdev->min_mtu) { netdev_err(netdev, "%s: Bad MTU (%d), valid range is: [%d..%d]\n", - __func__, new_mtu, netdev->min_mtu, netdev->max_mtu); + __func__, new_mtu, netdev->min_mtu, netdev->max_mtu); return -EINVAL; } + if (!xsc_rx_is_linear_skb(new_mtu)) { + max_buf_len = adapter->xdev->caps.recv_ds_num * PAGE_SIZE; + if (new_mtu > max_buf_len) { + netdev_err(netdev, "Bad MTU (%d), max buf len is %d\n", + new_mtu, max_buf_len); + return -EINVAL; + } + } mutex_lock(&adapter->state_lock); adapter->nic_param.mtu = new_mtu; if (adapter->status != XSCALE_ETH_DRIVER_OK) { @@ -2518,7 +2466,7 @@ static int xsc_eth_change_mtu(struct net_device *netdev, int new_mtu) out: mutex_unlock(&adapter->state_lock); xsc_core_info(adapter->xdev, "%s: mtu: %d->%d, expected_mtu=%d, err=%d\n", - __func__, old_mtu, netdev->mtu, new_mtu, ret); + __func__, old_mtu, netdev->mtu, new_mtu, ret); return ret; } @@ -2533,15 +2481,33 @@ int xsc_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct xsc_adapter *adapter = netdev_priv(netdev); struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; - struct xsc_core_device *vf_xdev; + struct xsc_core_device *xdev = adapter->xdev; + int ret; - netdev_info(netdev, "%s: vf_idx=%d, sriov=%p\n", __func__, vf, sriov); - if (vf >= sriov->num_vfs) + if (g_xsc_pcie_no != XSC_PCIE_NO_HOST || vf >= sriov->num_vfs) return -EINVAL; - vf_xdev = sriov->vfs[vf].dev; - netdev_info(netdev, "%s: vf_idx=%d, mac[5]=0x%02x\n", __func__, vf, mac[5]); - return xsc_eth_set_mac(vf_xdev->netdev, mac); + ret = xsc_eswitch_set_vport_mac(xdev->priv.eswitch, vf + 1, mac); + if (ret) + xsc_core_err(xdev, "%s: xsc set mac addr failed\n", __func__); + + return ret; +} + +int xsc_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + int err; + + if (!netif_device_present(dev)) + return -EOPNOTSUPP; + + err = xsc_eswitch_get_vport_config(esw, vf + 1, ivi); + + return err; } int set_feature_rxcsum(struct net_device *netdev, bool enable) @@ -2553,17 +2519,18 @@ int set_feature_rxcsum(struct net_device *netdev, bool enable) int err; in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); - in.nic.info.pcie = xdev->pcie; + in.nic.info.pcie = g_xsc_pcie_no; in.nic.info.pf = xsc_core_is_pf(xdev) ? 1 : 0; in.nic.info.pf_id = xdev->pf_id; in.nic.info.vf_id = cpu_to_be16(xdev->vf_id); + in.nic.info.logic_port = cpu_to_be16(xdev->logic_port); in.nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_HASH_PPH)); in.nic.caps = cpu_to_be16(enable << XSC_TBM_CAP_HASH_PPH); err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); if (err || out.hdr.status) { netdev_err(netdev, "failed to change rxcsum=%d, err=%d, status=%d\n", - enable, err, out.hdr.status); + enable, err, out.hdr.status); return -ENOEXEC; } @@ -2571,10 +2538,10 @@ int set_feature_rxcsum(struct net_device *netdev, bool enable) } static int xsc_handle_feature(struct net_device *netdev, - netdev_features_t *features, - netdev_features_t wanted_features, - netdev_features_t feature, - xsc_feature_handler feature_handler) + netdev_features_t *features, + netdev_features_t wanted_features, + netdev_features_t feature, + xsc_feature_handler feature_handler) { netdev_features_t changes = wanted_features ^ netdev->features; bool enable = !!(wanted_features & feature); @@ -2586,11 +2553,12 @@ static int xsc_handle_feature(struct net_device *netdev, err = feature_handler(netdev, enable); if (err) { netdev_err(netdev, "%s feature %pNF failed, err %d\n", - enable ? "Enable" : "Disable", &feature, err); + enable ? "Enable" : "Disable", &feature, err); return err; } - XSC_SET_FEATURE(features, feature, enable); + xsc_set_feature(features, feature, enable); + return 0; } @@ -2611,31 +2579,96 @@ int xsc_eth_set_features(struct net_device *netdev, netdev_features_t features) return 0; } -static const struct net_device_ops xsc_netdev_ops = { +u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int txq_ix, up = 0; + u16 num_channels; + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!adapter) { + pr_err("%s adapter is null\n", __func__); + return txq_ix; + } + + txq_ix = netdev_pick_tx(dev, skb, NULL); + if (!netdev_get_num_tc(dev)) + return txq_ix; + + if (skb_vlan_tag_present(skb)) { + up = skb_vlan_tag_get_prio(skb); + if (adapter->nic_param.num_tc > 1) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = 0; + } + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + num_channels = adapter->channels.num_chl; + if (txq_ix >= num_channels) + txq_ix = adapter->txq2sq[txq_ix]->ch_ix; + + return adapter->channel_tc2realtxq[txq_ix][up]; +} + +static int xsc_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_core_device *pf_xdev; + struct net_device *pf_netdev; + struct pci_dev *pdev = xdev->pdev; + int ret = len; + + if (!pdev) + return -EOPNOTSUPP; + if (!xsc_core_is_pf(xdev)) { + if (!pdev->physfn) + return -EOPNOTSUPP; + pf_xdev = pci_get_drvdata(pdev->physfn); + if (!pf_xdev || !pf_xdev->netdev) + return -EOPNOTSUPP; + pf_netdev = (struct net_device *)pf_xdev->netdev; + ret = snprintf(buf, len, "%s_%d", + pf_netdev->name, xdev->vf_id); + } else { + return -EOPNOTSUPP; + } + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} +static const struct net_device_ops xsc_netdev_ops = { .ndo_open = xsc_eth_open, .ndo_stop = xsc_eth_close, .ndo_start_xmit = xsc_eth_xmit_start, - .ndo_set_rx_mode = xsc_eth_set_rx_mode, + .ndo_set_rx_mode = NULL, .ndo_validate_addr = NULL, .ndo_set_mac_address = xsc_eth_set_mac, - .ndo_change_mtu = xsc_eth_change_mtu, + .ndo_change_mtu = xsc_eth_change_mtu, + .ndo_tx_timeout = NULL, - .ndo_set_tx_maxrate = NULL, - .ndo_vlan_rx_add_vid = NULL, - .ndo_vlan_rx_kill_vid = NULL, + .ndo_set_tx_maxrate = NULL, + .ndo_vlan_rx_add_vid = xsc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = xsc_vlan_rx_kill_vid, .ndo_do_ioctl = NULL, .ndo_set_vf_mac = xsc_set_vf_mac, - .ndo_set_vf_vlan = NULL, + .ndo_set_vf_vlan = NULL, .ndo_set_vf_rate = NULL, .ndo_set_vf_spoofchk = NULL, .ndo_set_vf_rss_query_en = NULL, .ndo_set_vf_trust = NULL, - .ndo_get_vf_config = NULL, +#ifdef NETLINK_MIN_DUMP_ALLOC_U32 + .ndo_get_vf_config = xsc_get_vf_config, +#endif .ndo_get_stats64 = xsc_get_stats, - .ndo_setup_tc = NULL, - + .ndo_setup_tc = NULL, .ndo_set_features = xsc_eth_set_features, .ndo_fix_features = NULL, .ndo_fdb_add = NULL, @@ -2643,11 +2676,14 @@ static const struct net_device_ops xsc_netdev_ops = { .ndo_bridge_getlink = NULL, .ndo_dfwd_add_station = NULL, .ndo_dfwd_del_station = NULL, + .ndo_bpf = NULL, + .ndo_xdp_xmit = NULL, + .ndo_get_phys_port_name = xsc_get_phys_port_name, + .ndo_udp_tunnel_add = NULL, .ndo_udp_tunnel_del = NULL, .ndo_features_check = NULL, - .ndo_bpf = NULL, - .ndo_xdp_xmit = NULL, + .ndo_select_queue = xsc_select_queue, }; static int xsc_eth_check_required_cap(struct xsc_core_device *xdev) @@ -2670,15 +2706,6 @@ static int xsc_get_max_num_channels(struct xsc_core_device *xdev) #endif } -static void xsc_eth_update_carrier_work(struct work_struct *work) -{ - //TBD -} - -static void xsc_eth_set_rx_mode_work(struct work_struct *work) -{ -} - static int xsc_eth_netdev_init(struct xsc_adapter *adapter) { unsigned int node, tc, nch; @@ -2687,23 +2714,19 @@ static int xsc_eth_netdev_init(struct xsc_adapter *adapter) nch = adapter->nic_param.max_num_ch; node = dev_to_node(adapter->dev); adapter->txq2sq = kcalloc_node(nch * tc, - sizeof(*adapter->txq2sq), GFP_KERNEL, node); + sizeof(*adapter->txq2sq), GFP_KERNEL, node); if (!adapter->txq2sq) goto err_out; mutex_init(&adapter->state_lock); - xsc_set_default_xps_cpumasks(adapter, &adapter->nic_param); /*INIT_WORK*/ - INIT_WORK(&adapter->update_carrier_work, xsc_eth_update_carrier_work); - INIT_WORK(&adapter->set_rx_mode_work, xsc_eth_set_rx_mode_work); adapter->workq = create_singlethread_workqueue("xsc_eth"); if (!adapter->workq) goto err_free_priv; netif_carrier_off(adapter->netdev); -// dev_net_set(netdev, devlink_net(priv_to_devlink(dev))); return 0; err_free_priv: @@ -2756,10 +2779,10 @@ void xsc_build_rss_params(struct xsc_rss_params *rss_params, u16 num_channels) rss_params->hfunc = ETH_RSS_HASH_TOP; netdev_rss_key_fill(rss_params->toeplitz_hash_key, - sizeof(rss_params->toeplitz_hash_key)); + sizeof(rss_params->toeplitz_hash_key)); xsc_build_default_indir_rqt(rss_params->indirection_rqt, - XSC_INDIR_RQT_SIZE, num_channels); + XSC_INDIR_RQT_SIZE, num_channels); for (tt = 0; tt < XSC_NUM_INDIR_TIRS; tt++) { rss_params->rx_hash_fields[tt] = @@ -2781,15 +2804,13 @@ void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_nu adapter->nic_param.rq_max_size = BIT(xdev->caps.log_max_qp_depth); adapter->nic_param.sq_max_size = BIT(xdev->caps.log_max_qp_depth); - adapter->nic_param.rq_frags_size = XSC_RX_FRAG_SZ; xsc_build_rss_params(&adapter->rss_params, adapter->nic_param.num_channels); - xsc_core_info(xdev, "%s: mtu=%d, num_ch=%d(max=%d), num_tc=%d, frags_size=%d\n", - __func__, adapter->nic_param.mtu, - adapter->nic_param.num_channels, - adapter->nic_param.max_num_ch, - adapter->nic_param.num_tc, - adapter->nic_param.rq_frags_size); + xsc_core_info(xdev, "%s: mtu=%d, num_ch=%d(max=%d), num_tc=%d\n", + __func__, adapter->nic_param.mtu, + adapter->nic_param.num_channels, + adapter->nic_param.max_num_ch, + adapter->nic_param.num_tc); } void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) @@ -2822,16 +2843,10 @@ void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) netdev->vlan_features |= NETIF_F_GSO_PARTIAL; netdev->hw_features = netdev->vlan_features; -// netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; -// netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; -// netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; -// netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - -// netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; -// netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; if (xsc_vxlan_allowed(xdev) || xsc_geneve_tx_allowed(xdev) || - xsc_any_tunnel_proto_supported(xdev)) { + xsc_any_tunnel_proto_supported(xdev)) { netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; //NETIF_F_TSO_ECN netdev->hw_enc_features |= NETIF_F_TSO6; @@ -2840,12 +2855,10 @@ void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) netdev->features |= netdev->hw_features; netdev->features |= NETIF_F_HIGHDMA; -// netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; -// netdev->hw_features |= NETIF_F_RXCSUM; } static int xsc_eth_nic_init(struct xsc_adapter *adapter, - void *rep_priv, u32 ch_num, u32 tc_num) + void *rep_priv, u32 ch_num, u32 tc_num) { int err = -1; @@ -2913,7 +2926,7 @@ static void xsc_eth_l2_addr_init(struct xsc_adapter *adapter) char mac[6] = {0}; int ret = 0; - ret = xsc_eth_get_mac(adapter->xdev, adapter->xdev->mac_port, mac); + ret = xsc_eth_get_mac(adapter->xdev, mac); if (ret) { xsc_core_warn(adapter->xdev, "get mac failed %d, generate random mac...", ret); eth_random_addr(mac); @@ -2931,12 +2944,13 @@ static int xsc_eth_nic_enable(struct xsc_adapter *adapter) xsc_lag_add(xdev, adapter->netdev); xsc_eth_l2_addr_init(adapter); - xsc_eth_set_hw_mtu(xdev, XSC_SW2HW_MTU(adapter->nic_param.mtu)); + + xsc_eth_set_hw_mtu(xdev, XSC_SW2HW_MTU(adapter->nic_param.mtu), + XSC_SW2HW_RX_PKT_LEN(adapter->nic_param.mtu)); #ifdef CONFIG_XSC_CORE_EN_DCB xsc_dcbnl_init_app(adapter); #endif - queue_work(adapter->workq, &adapter->set_rx_mode_work); xsc_eth_set_port_status(xdev, XSC_PORT_UP); @@ -2955,8 +2969,6 @@ static void xsc_eth_nic_disable(struct xsc_adapter *adapter) netif_device_detach(adapter->netdev); rtnl_unlock(); - queue_work(adapter->workq, &adapter->set_rx_mode_work); - xsc_lag_remove(adapter->xdev); } @@ -2977,7 +2989,7 @@ static int xsc_attach_netdev(struct xsc_adapter *adapter) if (err) return err; - xsc_core_info(adapter->xdev, "%s:ok\r\n", __func__); + xsc_core_info(adapter->xdev, "%s:ok\n", __func__); return 0; } @@ -3007,7 +3019,7 @@ static int xsc_eth_attach(struct xsc_core_device *xdev, struct xsc_adapter *adap if (err) return err; - xsc_core_info(adapter->xdev, "%s:ok\r\n", __func__); + xsc_core_info(adapter->xdev, "%s:ok\n", __func__); return 0; } @@ -3027,21 +3039,19 @@ static void *xsc_eth_add(struct xsc_core_device *xdev) struct xsc_adapter *adapter = NULL; void *rep_priv = NULL; - xsc_core_info(xdev, "%s enter\n", __func__); - err = xsc_eth_check_required_cap(xdev); if (err) return NULL; num_chl = xsc_get_max_num_channels(xdev); - num_tc = XSC_TX_NUM_TC; + num_tc = xdev->caps.max_tc; /* Allocate ourselves a network device with room for our info */ netdev = alloc_etherdev_mqs(sizeof(struct xsc_adapter), - num_chl * num_tc, num_chl); + num_chl * num_tc, num_chl); if (unlikely(!netdev)) { xsc_core_warn(xdev, "alloc_etherdev_mqs failed, txq=%d, rxq=%d\n", - (num_chl * num_tc), num_chl); + (num_chl * num_tc), num_chl); return NULL; } @@ -3057,10 +3067,9 @@ static void *xsc_eth_add(struct xsc_core_device *xdev) err = xsc_eth_nic_init(adapter, rep_priv, num_chl, num_tc); if (err) { xsc_core_warn(xdev, "xsc_nic_init failed, num_ch=%d, num_tc=%d, err=%d\n", - num_chl, num_tc, err); + num_chl, num_tc, err); goto err_free_netdev; } - xsc_core_info(xdev, "xsc_nic_init ok: ch=%d, tc=%d\n", num_chl, num_tc); err = xsc_eth_attach(xdev, adapter); if (err) { @@ -3068,7 +3077,7 @@ static void *xsc_eth_add(struct xsc_core_device *xdev) goto err_cleanup_netdev; } - adapter->stats = kvzalloc(sizeof(struct xsc_stats), GFP_KERNEL); + adapter->stats = kvzalloc(sizeof(*adapter->stats), GFP_KERNEL); if (unlikely(!adapter->stats)) goto err_detach; @@ -3078,12 +3087,17 @@ static void *xsc_eth_add(struct xsc_core_device *xdev) goto err_reg_netdev; } + err = xsc_eth_sysfs_create(netdev, xdev); + if (err) + goto err_sysfs_create; + xdev->netdev = (void *)netdev; adapter->status = XSCALE_ETH_DRIVER_INIT; - xsc_core_info(xdev, "%s success\n", __func__); return adapter; +err_sysfs_create: + unregister_netdev(adapter->netdev); err_reg_netdev: kfree(adapter->stats); err_detach: @@ -3109,8 +3123,7 @@ static void xsc_eth_remove(struct xsc_core_device *xdev, void *context) return; } - xsc_core_info(xdev, "%s: adapter=%p status=%d\n", - __func__, adapter, adapter->status); + xsc_eth_sysfs_remove(adapter->netdev, xdev); unregister_netdev(adapter->netdev); @@ -3123,7 +3136,6 @@ static void xsc_eth_remove(struct xsc_core_device *xdev, void *context) xdev->netdev = NULL; xdev->eth_priv = NULL; - xsc_core_info(xdev, "%s: ok\n", __func__); } static struct xsc_interface xsc_interface = { @@ -3137,6 +3149,7 @@ static __init int xsc_net_driver_init(void) { int ret; + pr_info("add ethernet driver\n"); ret = xsc_register_interface(&xsc_interface); if (ret != 0) { pr_err("failed to register interface\n"); @@ -3157,13 +3170,10 @@ static __init int xsc_net_driver_init(void) static __exit void xsc_net_driver_exit(void) { + pr_info("remove ethernet driver\n"); xsc_eth_ctrl_fini(); xsc_unregister_interface(&xsc_interface); } module_init(xsc_net_driver_init); module_exit(xsc_net_driver_exit); - -MODULE_DESCRIPTION("Yunsilicon XSC Ethernet driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.0"); diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c index e35501cf9a24af69ea076fde67f5262abaf9b20e..6c4afad1be8fbc6af4400917d1c2e545e7c88b98 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -17,9 +16,9 @@ #include #include -#include -#include -#include +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" #include "xsc_eth.h" #include "xsc_accel.h" @@ -85,14 +84,14 @@ u32 xsc_eth_process_napi(struct xsc_adapter *adapter) int xsc_eth_rx_thread(void *arg) { - u32 uiRet = XSCALE_RET_SUCCESS; + u32 ret = XSCALE_RET_SUCCESS; struct xsc_adapter *adapter = (struct xsc_adapter *)arg; while (kthread_should_stop() == 0) { if (need_resched()) schedule(); - uiRet = xsc_eth_process_napi(adapter); - if (uiRet != XSCALE_RET_SUCCESS) + ret = xsc_eth_process_napi(adapter); + if (ret != XSCALE_RET_SUCCESS) ETH_DEBUG_LOG("unexpected branch.\r\n"); ETH_DEBUG_LOG("adapter=%p\r\n", adapter); @@ -108,7 +107,7 @@ u32 xsc_eth_rx_thread_create(struct xsc_adapter *adapter) struct task_struct *task = NULL; task = kthread_create(xsc_eth_rx_thread, (void *)adapter, - "xsc_rx%i", g_thread_count); + "xsc_rx%i", g_thread_count); if (!task) return XSCALE_RET_ERROR; @@ -122,5 +121,4 @@ u32 xsc_eth_rx_thread_create(struct xsc_adapter *adapter) return XSCALE_RET_SUCCESS; } -#endif - +#endif /* NEED_CREATE_RX_THREAD */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h index f628761512b5dfefe9a4b9b05e7a996f19da902c..74addd2dc1c6d89064757c7a3a56a2351b2c90a8 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -9,7 +8,7 @@ #include #include -#include +#include "common/xsc_core.h" static inline void xsc_udp_gso_handle_tx_skb(struct sk_buff *skb) { @@ -17,13 +16,11 @@ static inline void xsc_udp_gso_handle_tx_skb(struct sk_buff *skb) udp_hdr(skb)->len = htons(payload_len); } - static inline struct sk_buff *xsc_accel_handle_tx(struct sk_buff *skb) { /*no not consider tls and ipsec*/ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) xsc_udp_gso_handle_tx_skb(skb); - return skb; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c index 69b1a654eaaf3f40ca2d2cdf07eae48f7c57a05b..3710dba5e6e650ff03d392fcf46168f86042b92e 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c @@ -1,14 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include #include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" #include "xsc_eth.h" #include "xsc_eth_debug.h" @@ -40,7 +39,7 @@ static int xsc_max_tc(struct xsc_core_device *dev) } static int xsc_dcbnl_set_dcbx_mode(struct xsc_adapter *priv, - enum xsc_dcbx_oper_mode mode) + enum xsc_dcbx_oper_mode mode) { return 1; } @@ -54,7 +53,7 @@ static int xsc_dcbnl_switch_to_host_mode(struct xsc_adapter *priv) } static int xsc_dcbnl_ieee_getets(struct net_device *netdev, - struct ieee_ets *ets) + struct ieee_ets *ets) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -109,7 +108,7 @@ static int xsc_dcbnl_ieee_getets(struct net_device *netdev, !is_tc_group_6_exist) priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; xsc_eth_dbg(HW, priv, "%s: tc%d, group=%d, bw=%d\n", - __func__, i, tc_group[i], ets->tc_tx_bw[i]); + __func__, i, tc_group[i], ets->tc_tx_bw[i]); } memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); @@ -156,7 +155,7 @@ static void xsc_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) } static void xsc_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, - u8 *tc_group, int max_tc) + u8 *tc_group, int max_tc) { int bw_for_ets_zero_bw_tc = 0; int last_ets_zero_bw_tc = -1; @@ -235,8 +234,8 @@ int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets) } static int xsc_dbcnl_validate_ets(struct net_device *netdev, - struct ieee_ets *ets, - bool zero_sum_allowed) + struct ieee_ets *ets, + bool zero_sum_allowed) { struct xsc_adapter *priv = netdev_priv(netdev); bool have_ets_tc = false; @@ -272,7 +271,7 @@ static int xsc_dbcnl_validate_ets(struct net_device *netdev, } static int xsc_dcbnl_ieee_setets(struct net_device *dev, - struct ieee_ets *ets) + struct ieee_ets *ets) { struct xsc_adapter *priv = netdev_priv(dev); int err; @@ -292,12 +291,10 @@ static int xsc_dcbnl_ieee_setets(struct net_device *dev, } static int xsc_dcbnl_ieee_getpfc(struct net_device *dev, - struct ieee_pfc *pfc) + struct ieee_pfc *pfc) { struct xsc_adapter *priv = netdev_priv(dev); struct xsc_core_device *xdev = priv->xdev; -// struct xsc_pport_stats *pstats = &priv->stats.pport; -// struct xsc_cee_config cee_cfg = priv->dcbx.cee_cfg; int i; pfc->pfc_cap = xsc_max_tc(xdev) + 1; @@ -313,9 +310,6 @@ static int xsc_dcbnl_ieee_getpfc(struct net_device *dev, #else pfc->pfc_en = 0; -// for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) -// pfc->pfc_en |= cee_cfg.pfc_setting[i] << i; - for (i = 0; i < pfc->pfc_cap; i++) pfc->pfc_en |= 1 << i; @@ -326,11 +320,11 @@ static int xsc_dcbnl_ieee_getpfc(struct net_device *dev, } static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, - struct ieee_pfc *pfc) + struct ieee_pfc *pfc) { struct xsc_adapter *priv = netdev_priv(dev); u32 changed = 0; - u8 curr_pfc_en = 0; + u8 curr_pfc_en; int ret = 0; #ifndef XSC_DCBX_STUB struct xsc_core_device *xdev = priv->xdev; @@ -358,7 +352,7 @@ static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, if (pfc->pfc_en != curr_pfc_en) { changed |= XSC_PORT_BUFFER_PFC; for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) { - if (pfc->pfc_en & (1<pfc_en & (1 << i)) cee_cfg->pfc_setting[i] = 1; else cee_cfg->pfc_setting[i] = 0; @@ -366,7 +360,7 @@ static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, } #endif xsc_eth_dbg(HW, priv, "%s: new_pfc_en=0x%x, cur_pfc_en=0x%x\n", - __func__, pfc->pfc_en, curr_pfc_en); + __func__, pfc->pfc_en, curr_pfc_en); if (pfc->delay && pfc->delay < XSC_MAX_CABLE_LENGTH && @@ -378,11 +372,11 @@ static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, #ifndef XSC_DCBX_STUB if (xdev->caps.port_buf) { pfc_new.pfc_en = (changed & XSC_PORT_BUFFER_PFC) ? - pfc->pfc_en : curr_pfc_en; + pfc->pfc_en : curr_pfc_en; if (priv->dcbx.manual_buffer) ret = xsc_port_manual_buffer_config(priv, changed, - dev->mtu, &pfc_new, - NULL, NULL); + dev->mtu, &pfc_new, + NULL, NULL); if (ret && (changed & XSC_PORT_BUFFER_CABLE_LEN)) priv->dcbx.cable_len = old_cable_len; } @@ -390,8 +384,8 @@ static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, if (!ret) xsc_eth_dbg(HW, priv, - "%s: PFC per priority bit mask: 0x%x\n", - __func__, pfc->pfc_en); + "%s: PFC per priority bit mask: 0x%x\n", + __func__, pfc->pfc_en); return ret; } @@ -413,7 +407,7 @@ static u8 xsc_dcbnl_setdcbx(struct net_device *dev, u8 mode) if (mode & DCB_CAP_DCBX_LLD_MANAGED) return 1; - if ((!mode) && priv->xdev->caps.dcbx) { + if (!mode && priv->xdev->caps.dcbx) { if (dcbx->mode == XSC_DCBX_PARAM_VER_OPER_AUTO) return 0; @@ -449,8 +443,7 @@ static int xsc_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) if (!priv->xdev->caps.dscp) return -EOPNOTSUPP; - if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || - (app->protocol >= XSC_MAX_DSCP)) + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) return -EINVAL; /* Save the old entry info */ @@ -502,8 +495,7 @@ static int xsc_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) if (!priv->xdev->caps.dscp) return -EOPNOTSUPP; - if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || - (app->protocol >= XSC_MAX_DSCP)) + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) return -EINVAL; /* Skip if no dscp app entry */ @@ -538,7 +530,7 @@ static int xsc_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) } static int xsc_dcbnl_ieee_getmaxrate(struct net_device *netdev, - struct ieee_maxrate *maxrate) + struct ieee_maxrate *maxrate) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -581,7 +573,7 @@ static int xsc_dcbnl_ieee_getmaxrate(struct net_device *netdev, } static int xsc_dcbnl_ieee_setmaxrate(struct net_device *netdev, - struct ieee_maxrate *maxrate) + struct ieee_maxrate *maxrate) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -613,7 +605,7 @@ static int xsc_dcbnl_ieee_setmaxrate(struct net_device *netdev, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n", - __func__, i, max_bw_value[i]); + __func__, i, max_bw_value[i]); #ifndef XSC_DCBX_STUB return xsc_modify_port_ets_rate_limit(xdev, max_bw_value, max_bw_unit); #else @@ -682,7 +674,7 @@ static u8 xsc_dcbnl_getstate(struct net_device *netdev) } static void xsc_dcbnl_getpermhwaddr(struct net_device *netdev, - u8 *perm_addr) + u8 *perm_addr) { #ifndef XSC_DCBX_STUB struct xsc_adapter *priv = netdev_priv(netdev); @@ -698,15 +690,15 @@ static void xsc_dcbnl_getpermhwaddr(struct net_device *netdev, } static void xsc_dcbnl_setpgtccfgtx(struct net_device *netdev, - int priority, u8 prio_type, - u8 pgid, u8 bw_pct, u8 up_map) + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; xsc_eth_dbg(HW, priv, "%s: prio=%d, type=%d, pgid=%d, bw_pct=%d, up_map=%d\n", - __func__, priority, prio_type, pgid, - bw_pct, up_map); + __func__, priority, prio_type, pgid, + bw_pct, up_map); if (priority >= CEE_DCBX_MAX_PRIO) { netdev_err(netdev, "%s, priority is out of range\n", __func__); @@ -723,13 +715,13 @@ static void xsc_dcbnl_setpgtccfgtx(struct net_device *netdev, } static void xsc_dcbnl_setpgbwgcfgtx(struct net_device *netdev, - int pgid, u8 bw_pct) + int pgid, u8 bw_pct) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; xsc_eth_dbg(HW, priv, "%s: pgid=%d, bw_pct=%d\n", - __func__, pgid, bw_pct); + __func__, pgid, bw_pct); if (pgid >= CEE_DCBX_MAX_PGS) { netdev_err(netdev, "%s, priority group is out of range\n", __func__); @@ -740,8 +732,8 @@ static void xsc_dcbnl_setpgbwgcfgtx(struct net_device *netdev, } static void xsc_dcbnl_getpgtccfgtx(struct net_device *netdev, - int priority, u8 *prio_type, - u8 *pgid, u8 *bw_pct, u8 *up_map) + int priority, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -766,11 +758,11 @@ static void xsc_dcbnl_getpgtccfgtx(struct net_device *netdev, *pgid = 0; xsc_eth_dbg(HW, priv, "%s: prio=%d, pgid=%d, bw_pct=%d\n", - __func__, priority, *pgid, *bw_pct); + __func__, priority, *pgid, *bw_pct); } static void xsc_dcbnl_getpgbwgcfgtx(struct net_device *netdev, - int pgid, u8 *bw_pct) + int pgid, u8 *bw_pct) { struct ieee_ets ets; struct xsc_adapter *priv = netdev_priv(netdev); @@ -784,17 +776,17 @@ static void xsc_dcbnl_getpgbwgcfgtx(struct net_device *netdev, xsc_dcbnl_ieee_getets(netdev, &ets); *bw_pct = ets.tc_tx_bw[pgid]; xsc_eth_dbg(HW, priv, "%s: pgid=%d, bw_pct=%d\n", - __func__, pgid, *bw_pct); + __func__, pgid, *bw_pct); } static void xsc_dcbnl_setpfccfg(struct net_device *netdev, - int priority, u8 setting) + int priority, u8 setting) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; xsc_eth_dbg(HW, priv, "%s: prio=%d, setting=%d\n", - __func__, priority, setting); + __func__, priority, setting); if (priority >= CEE_DCBX_MAX_PRIO) { netdev_err(netdev, "%s, priority is out of range\n", __func__); @@ -809,7 +801,7 @@ static void xsc_dcbnl_setpfccfg(struct net_device *netdev, static int xsc_dcbnl_get_priority_pfc(struct net_device *netdev, - int priority, u8 *setting) + int priority, u8 *setting) { struct xsc_adapter *priv = netdev_priv(netdev); struct ieee_pfc pfc; @@ -823,12 +815,12 @@ xsc_dcbnl_get_priority_pfc(struct net_device *netdev, *setting = (pfc.pfc_en >> priority) & 0x01; xsc_eth_dbg(HW, priv, "%s: prio=%d, setting=%d\n", - __func__, priority, *setting); + __func__, priority, *setting); return err; } static void xsc_dcbnl_getpfccfg(struct net_device *netdev, - int priority, u8 *setting) + int priority, u8 *setting) { if (priority >= CEE_DCBX_MAX_PRIO) { netdev_err(netdev, @@ -843,7 +835,7 @@ static void xsc_dcbnl_getpfccfg(struct net_device *netdev, } static u8 xsc_dcbnl_getcap(struct net_device *netdev, - int capid, u8 *cap) + int capid, u8 *cap) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -883,12 +875,12 @@ static u8 xsc_dcbnl_getcap(struct net_device *netdev, } xsc_eth_dbg(HW, priv, "%s: capid=%d, cap=%d, ret=%d\n", - __func__, capid, *cap, rval); + __func__, capid, *cap, rval); return rval; } static int xsc_dcbnl_getnumtcs(struct net_device *netdev, - int tcs_id, u8 *num) + int tcs_id, u8 *num) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -903,7 +895,7 @@ static int xsc_dcbnl_getnumtcs(struct net_device *netdev, } xsc_eth_dbg(HW, priv, "%s: tcs_id=%d, tc_num=%d\n", - __func__, tcs_id, *num); + __func__, tcs_id, *num); return 0; } @@ -922,22 +914,22 @@ static void xsc_dcbnl_setpfcstate(struct net_device *netdev, u8 state) struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; - if ((state != XSC_CEE_STATE_UP) && (state != XSC_CEE_STATE_DOWN)) + if (state != XSC_CEE_STATE_UP && state != XSC_CEE_STATE_DOWN) return; cee_cfg->pfc_enable = state; } static int xsc_dcbnl_getbuffer(struct net_device *dev, - struct dcbnl_buffer *dcb_buffer) + struct dcbnl_buffer *dcb_buffer) { struct xsc_adapter *priv = netdev_priv(dev); struct xsc_core_device *xdev = priv->xdev; - int err = 0; -#ifndef XSC_DCBX_STUB + struct xsc_port_buffer port_buffer = {0}; + u8 buffer[XSC_MAX_PRIORITY] = {0}; int i; - struct xsc_port_buffer port_buffer; - u8 buffer[XSC_MAX_PRIORITY]; +#ifndef XSC_DCBX_STUB + int err = 0; #endif if (!xdev->caps.port_buf) @@ -947,35 +939,35 @@ static int xsc_dcbnl_getbuffer(struct net_device *dev, err = xsc_port_query_priority2buffer(xdev, buffer); if (err) return err; +#endif for (i = 0; i < XSC_MAX_PRIORITY; i++) dcb_buffer->prio2buffer[i] = buffer[i]; +#ifndef XSC_DCBX_STUB err = xsc_port_query_buffer(priv, &port_buffer); if (err) return err; +#endif for (i = 0; i < XSC_MAX_BUFFER; i++) dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size; dcb_buffer->total_size = port_buffer.port_buffer_size; -#endif - return err; + + return 0; } static int xsc_dcbnl_setbuffer(struct net_device *dev, - struct dcbnl_buffer *dcb_buffer) + struct dcbnl_buffer *dcb_buffer) { struct xsc_adapter *priv = netdev_priv(dev); struct xsc_core_device *xdev = priv->xdev; - int i; - int err = 0; -#ifndef XSC_DCBX_STUB - struct xsc_port_buffer port_buffer; - u8 old_prio2buffer[XSC_MAX_PRIORITY]; + struct xsc_port_buffer port_buffer = {0}; + u8 old_prio2buffer[XSC_MAX_PRIORITY] = {0}; u32 *buffer_size = NULL; u8 *prio2buffer = NULL; u32 changed = 0; -#endif + int i, err = 0; if (!xdev->caps.port_buf) return -EOPNOTSUPP; @@ -990,6 +982,7 @@ static int xsc_dcbnl_setbuffer(struct net_device *dev, err = xsc_port_query_priority2buffer(xdev, old_prio2buffer); if (err) return err; +#endif for (i = 0; i < XSC_MAX_PRIORITY; i++) { if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) { @@ -999,9 +992,11 @@ static int xsc_dcbnl_setbuffer(struct net_device *dev, } } +#ifndef XSC_DCBX_STUB err = xsc_port_query_buffer(priv, &port_buffer); if (err) return err; +#endif for (i = 0; i < XSC_MAX_BUFFER; i++) { if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) { @@ -1014,9 +1009,10 @@ static int xsc_dcbnl_setbuffer(struct net_device *dev, if (!changed) return 0; - priv->dcbx.manual_buffer = true; + priv->dcbx.manual_buffer = 1; +#ifndef XSC_DCBX_STUB err = xsc_port_manual_buffer_config(priv, changed, dev->mtu, NULL, - buffer_size, prio2buffer); + buffer_size, prio2buffer); #endif return err; } @@ -1034,7 +1030,6 @@ const struct dcbnl_rtnl_ops xsc_dcbnl_ops = { .setdcbx = xsc_dcbnl_setdcbx, .dcbnl_getbuffer = xsc_dcbnl_getbuffer, .dcbnl_setbuffer = xsc_dcbnl_setbuffer, - /* CEE interfaces */ .setall = xsc_dcbnl_setall, .getstate = xsc_dcbnl_getstate, @@ -1054,7 +1049,7 @@ const struct dcbnl_rtnl_ops xsc_dcbnl_ops = { }; static void xsc_dcbnl_query_dcbx_mode(struct xsc_adapter *priv, - enum xsc_dcbx_oper_mode *mode) + enum xsc_dcbx_oper_mode *mode) { *mode = XSC_DCBX_PARAM_VER_OPER_HOST; @@ -1144,7 +1139,6 @@ static void xsc_trust_update_tx_min_inline_mode(struct xsc_adapter *priv) static void xsc_trust_update_sq_inline_mode(struct xsc_adapter *priv) { - int old_mode = priv->nic_param.tx_min_inline_mode; mutex_lock(&priv->state_lock); @@ -1190,7 +1184,7 @@ static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio) int err = 0; xsc_eth_dbg(HW, priv, "%s: dscp=%d, prio=%d\n", - __func__, dscp, prio); + __func__, dscp, prio); #ifndef XSC_DCBX_STUB err = xsc_cmd_set_dscp2prio(priv->xdev, dscp, prio); if (err) @@ -1274,11 +1268,10 @@ void xsc_dcbnl_initialize(struct xsc_adapter *priv) priv->dcbx.cap |= DCB_CAP_DCBX_HOST; priv->dcbx.port_buff_cell_sz = xsc_query_port_buffers_cell_size(priv); - priv->dcbx.manual_buffer = false; + priv->dcbx.manual_buffer = 0; priv->dcbx.cable_len = XSC_DEFAULT_CABLE_LEN; xsc_cee_init(priv); xsc_ets_init(priv); } #endif - diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h index e00849b46b705f02d4d34f7891fbc8c4511aa92d..5f9b235f66375dbb76b101dcafb59247ebf84882 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h @@ -1,33 +1,27 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_ETH_H #define XSC_ETH_H -#include +#include "common/qp.h" #include "xsc_eth_common.h" #include "xsc_eth_stats.h" -#include +#include "common/version.h" #include +#include "common/xsc_fs.h" #define XSC_INVALID_LKEY 0x100 #define XSCALE_ETH_PHYPORT_DOWN 0 #define XSCALE_ETH_PHYPORT_UP 1 - +#ifdef CONFIG_DCB #define CONFIG_XSC_CORE_EN_DCB 1 +#endif #define XSC_PAGE_CACHE 1 -//#define XSC_UDP_FRAG_HW_CSUM 1 -//#define UDP_CHECK_0 1 -//#define UDP_CSUM_DEBUG 1 - -#ifndef COSIM -#define NEED_AGILEX_TRAINING -#endif #define XSCALE_DRIVER_NAME "xsc_eth" #define XSCALE_RET_SUCCESS 0 #define XSCALE_RET_ERROR 1 @@ -52,8 +46,8 @@ struct xsc_cee_config { /* bw pct for priority group */ u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; - bool pfc_setting[CEE_DCBX_MAX_PRIO]; - bool pfc_enable; + u8 pfc_setting[CEE_DCBX_MAX_PRIO]; + u8 pfc_enable; }; enum { @@ -89,7 +83,7 @@ struct xsc_dcbx { u8 cap; /* Buffer configuration */ - bool manual_buffer; + u8 manual_buffer; u32 cable_len; u32 xoff; u16 port_buff_cell_sz; @@ -122,6 +116,11 @@ struct xsc_rss_params { u32 rss_hash_tmpl; }; +struct xsc_vlan_params { + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); +}; + struct xsc_adapter { struct net_device *netdev; struct pci_dev *pdev; @@ -130,21 +129,18 @@ struct xsc_adapter { struct xsc_eth_params nic_param; struct xsc_rss_params rss_params; + struct xsc_vlan_params vlan_params; struct workqueue_struct *workq; struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; struct work_struct event_work; -#ifdef NEED_AGILEX_TRAINING - struct timer_list link_timer; -#endif - struct xsc_eth_channels channels; struct xsc_sq **txq2sq; u32 status; - spinlock_t lock; + spinlock_t lock; /* adapter lock */ struct mutex state_lock; /* Protects Interface state */ struct xsc_stats *stats; @@ -155,6 +151,8 @@ struct xsc_adapter { u32 msglevel; struct task_struct *task; + + int channel_tc2realtxq[XSC_ETH_MAX_NUM_CHANNELS][XSC_MAX_NUM_TC]; }; struct xsc_rx_buffer { @@ -193,14 +191,13 @@ typedef int (*xsc_eth_fp_preactivate)(struct xsc_adapter *priv); typedef int (*xsc_eth_fp_postactivate)(struct xsc_adapter *priv); int xsc_safe_switch_channels(struct xsc_adapter *adapter, - xsc_eth_fp_preactivate preactivate, - xsc_eth_fp_postactivate postactivate); + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate); int xsc_eth_num_channels_changed(struct xsc_adapter *priv); int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 change); -bool xsc_eth_get_phyport_state(struct xsc_adapter *adapter); -#ifdef NEED_AGILEX_TRAINING -int xsc_eth_get_linkinfo(struct xsc_event_linkstatus_resp *plinkinfo, struct xsc_adapter *adapter); -#endif +bool xsc_eth_get_link_status(struct xsc_adapter *adapter); +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo_resp *plinkinfo); int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter); /* Use this function to get max num channels after netdev was created */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h index 9f6d88639d71d80a655dd0c5cb23a1b16a808afe..4055b4166235a191c2481566ef9acfd1b96e9f2c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -9,7 +8,8 @@ #include "xsc_queue.h" #include "xsc_eth_compat.h" -#include +#include "common/xsc_pph.h" +#include "common/xsc_hsi.h" #define SW_MIN_MTU 64 #define SW_DEFAULT_MTU 1500 @@ -17,8 +17,9 @@ #define XSC_ETH_HW_MTU_SEND 9800 /*need to obtain from hardware*/ #define XSC_ETH_HW_MTU_RECV 9800 /*need to obtain from hardware*/ -#define XSC_SW2HW_MTU(mtu) (mtu + 14 + 4) -#define XSC_SW2HW_FRAG_SIZE(mtu) (mtu + 14 + 4 + XSC_PPH_HEAD_LEN) +#define XSC_SW2HW_MTU(mtu) ((mtu) + 14 + 4) +#define XSC_SW2HW_FRAG_SIZE(mtu) ((mtu) + 14 + 8 + 4 + XSC_PPH_HEAD_LEN) +#define XSC_SW2HW_RX_PKT_LEN(mtu) ((mtu) + 14 + 256) #define XSC_RX_MAX_HEAD (256) #define XSC_RX_HEADROOM NET_SKB_PAD @@ -41,7 +42,7 @@ #define XSC_ETH_MAX_TC_TOTAL (XSC_ETH_MAX_NUM_CHANNELS * XSC_MAX_NUM_TC) #define XSC_ETH_MAX_QP_NUM_PER_CH (XSC_MAX_NUM_TC + 1) -#define XSC_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ +#define XSC_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define XSC_MIN_SKB_FRAG_SZ (XSC_SKB_FRAG_SZ(XSC_RX_HEADROOM)) #define XSC_LOG_MAX_RX_WQE_BULK \ @@ -71,6 +72,7 @@ #define XSC_EQ_ELE_SZ 8 //size of a eq entry #define XSC_CQ_POLL_BUDGET 64 +#define XSC_TX_POLL_BUDGET 128 #define XSC_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #define XSC_MAX_PRIORITY 8 @@ -128,7 +130,6 @@ struct xsc_eth_rx_wqe_cyc { }; struct xsc_eq_param { -// struct xsc_eq_cmd_param eqc; struct xsc_queue_attr eq_attr; }; @@ -141,7 +142,7 @@ struct xsc_cq_param { }; struct xsc_rq_param { -// struct xsc_rq_cmd_param rqc; + struct xsc_wq_param wq; struct xsc_queue_attr rq_attr; struct xsc_rq_frags_info frags_info; @@ -149,6 +150,7 @@ struct xsc_rq_param { struct xsc_sq_param { // struct xsc_rq_cmd_param sqc; + struct xsc_wq_param wq; struct xsc_queue_attr sq_attr; }; @@ -193,7 +195,8 @@ struct xsc_channel { struct cpumask *aff_mask; struct irq_desc *irq_desc; struct xsc_ch_stats *stats; -}; + u8 rx_int; +} ____cacheline_aligned_in_smp; enum xsc_eth_priv_flag { XSC_PFLAG_RX_NO_CSUM_COMPLETE, @@ -227,16 +230,14 @@ struct xsc_eth_params { u32 rq_frags_size; u16 num_rl_txqs; - bool rx_cqe_compress_def; -// struct net_dim_cq_moder rx_cq_moderation; -// struct net_dim_cq_moder tx_cq_moderation; - bool tunneled_offload_en; - bool lro_en; + u8 rx_cqe_compress_def; + u8 tunneled_offload_en; + u8 lro_en; u8 tx_min_inline_mode; - bool vlan_strip_disable; - bool scatter_fcs_en; - bool rx_dim_enabled; - bool tx_dim_enabled; + u8 vlan_strip_disable; + u8 scatter_fcs_en; + u8 rx_dim_enabled; + u8 tx_dim_enabled; u32 lro_timeout; u32 pflags; }; @@ -248,7 +249,7 @@ struct xsc_eth_channels { }; struct xsc_eth_redirect_rqt_param { - bool is_rss; + u8 is_rss; union { u32 rqn; /* Direct RQN (Non-RSS) */ struct { @@ -260,7 +261,7 @@ struct xsc_eth_redirect_rqt_param { union xsc_send_doorbell { struct{ - int32_t next_pid : 16; + s32 next_pid : 16; u32 qp_num : 15; }; u32 send_data; @@ -268,7 +269,7 @@ union xsc_send_doorbell { union xsc_recv_doorbell { struct{ - int32_t next_pid : 13; + s32 next_pid : 13; u32 qp_num : 15; }; u32 recv_data; diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h index b55953fe62e8cf64a5defe955814ed4b14d897a7..5e34982faa46aece80d0052c50956b059e3badef 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c index 1c8ff3b2de296a3cdb437b199d36386cc527063d..6bff3a891bd2fa294c42a5a306fc4b37aede0d46 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -8,16 +7,16 @@ #include #include #include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" #define XSC_ETH_CTRL_NAME "eth_ctrl" static void encode_rlimit_set(void *data, u32 mac_port) { - struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *) data; + struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *)data; req->rate_cir = __cpu_to_be32(req->rate_cir); req->limit_id = __cpu_to_be32(req->limit_id); @@ -25,7 +24,7 @@ static void encode_rlimit_set(void *data, u32 mac_port) static void decode_rlimit_get(void *data) { - struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *) data; + struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *)data; int i; for (i = 0; i <= QOS_PRIO_MAX; i++) @@ -35,8 +34,12 @@ static void decode_rlimit_get(void *data) } static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, u16 expect_req_size, - u16 expect_resp_size, void (*encode)(void *, u32), void (*decode)(void *)) + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) { struct xsc_qos_mbox_in *in; struct xsc_qos_mbox_out *out; @@ -47,10 +50,10 @@ static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, if (hdr->attr.length != user_size) return -EINVAL; - in = kvzalloc(sizeof(struct xsc_qos_mbox_in) + expect_req_size, GFP_KERNEL); + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); if (!in) goto err_in; - out = kvzalloc(sizeof(struct xsc_qos_mbox_out) + expect_resp_size, GFP_KERNEL); + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); if (!out) goto err_out; @@ -64,10 +67,10 @@ static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, if (encode) encode((void *)in->data, xdev->mac_port); - err = xsc_cmd_exec( - xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); - hdr->attr.error = __be32_to_cpu(out->hdr.status); + hdr->attr.error = out->hdr.status; if (decode) decode((void *)out->data); @@ -89,8 +92,12 @@ static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, } static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, u16 expect_req_size, - u16 expect_resp_size, void (*encode)(void *, u32), void (*decode)(void *)) + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) { struct xsc_hwc_mbox_in *in; struct xsc_hwc_mbox_out *out; @@ -101,10 +108,10 @@ static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, if (hdr->attr.length != user_size) return -EINVAL; - in = kvzalloc(sizeof(struct xsc_hwc_mbox_in) + expect_req_size, GFP_KERNEL); + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); if (!in) goto err_in; - out = kvzalloc(sizeof(struct xsc_hwc_mbox_out) + expect_resp_size, GFP_KERNEL); + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); if (!out) goto err_out; @@ -116,8 +123,8 @@ static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, if (encode) encode((void *)in->data, xdev->mac_port); - err = xsc_cmd_exec( - xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); hdr->attr.error = __be32_to_cpu(out->hdr.status); if (decode) @@ -141,7 +148,7 @@ static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, } static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr) + struct xsc_ioctl_hdr __user *user_hdr) { struct xsc_ioctl_hdr hdr; int err; @@ -159,77 +166,76 @@ static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, /* check ioctl cmd */ switch (hdr.attr.opcode) { case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_dscp_pmt_set), 0, - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dscp_pmt_set), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, 0, sizeof(struct xsc_dscp_pmt_get), - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dscp_pmt_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_trust_mode_set), 0, - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_trust_mode_set), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, 0, sizeof(struct xsc_trust_mode_get), - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_trust_mode_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_PCP_PMT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_pcp_pmt_set), 0, - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pcp_pmt_set), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_PCP_PMT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, 0, sizeof(struct xsc_pcp_pmt_get), - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pcp_pmt_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_default_pri_set), 0, - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_default_pri_set), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, 0, sizeof(struct xsc_default_pri_get), - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_default_pri_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_PFC: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_pfc_set), 0, - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pfc_set), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_PFC: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, 0, sizeof(struct xsc_pfc_get), - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pfc_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_set), 0, - encode_rlimit_set, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_rate_limit_set), 0, + encode_rlimit_set, NULL); case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_get), - sizeof(struct xsc_rate_limit_get), NULL, decode_rlimit_get); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_get), + sizeof(struct xsc_rate_limit_get), + NULL, decode_rlimit_get); case XSC_CMD_OP_IOCTL_SET_SP: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_sp_set), 0, - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_sp_set), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_SP: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, 0, sizeof(struct xsc_sp_get), - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_sp_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_WEIGHT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, sizeof(struct xsc_weight_set), 0, - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_weight_set), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_WEIGHT: - return _eth_ctrl_ioctl_qos( - xdev, user_hdr, &hdr, 0, sizeof(struct xsc_weight_get), - NULL, NULL); + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_port_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_port_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_prio_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_prio_weight_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_HWC: return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, - sizeof(struct hwc_set_t), 0, NULL, NULL); + sizeof(struct hwc_set_t), 0, NULL, NULL); case XSC_CMD_OP_IOCTL_GET_HWC: - return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, - sizeof(struct hwc_get_t), sizeof(struct hwc_get_t), NULL, NULL); + return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, sizeof(struct hwc_get_t), + sizeof(struct hwc_get_t), + NULL, NULL); default: - return -EINVAL; + return TRY_NEXT_CB; } in = kvzalloc(hdr.attr.length, GFP_KERNEL); @@ -259,9 +265,10 @@ static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, return err; } -static void _eth_ctrl_reg_cb(struct xsc_core_device *xdev, unsigned int cmd, - struct xsc_ioctl_hdr __user *user_hdr, void *data) +static int _eth_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) { + struct xsc_core_device *xdev = file->xdev; int err; switch (cmd) { @@ -269,9 +276,11 @@ static void _eth_ctrl_reg_cb(struct xsc_core_device *xdev, unsigned int cmd, err = _eth_ctrl_ioctl_cmdq(xdev, user_hdr); break; default: - err = -EFAULT; + err = TRY_NEXT_CB; break; } + + return err; } static void _eth_ctrl_reg_fini(void) diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h index f91f93009af2262dda6ee488cdb8338d140c3271..d7e93f0afc4197c699b47839e9e560badf5e49f2 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h index ca8053093dfdef1ad2a2659385daf45133f48967..8da0e0aa1b60364e9f9754edf71598aeb900fb66 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h @@ -1,24 +1,27 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_ETH_DEBUG_H #define XSC_ETH_DEBUG_H -#include +#include "common/xsc_core.h" #include -#include #include "xsc_eth.h" static bool debug; #define FUN_LINE_FMT "%s %d " + +#ifdef XSC_DEBUG #define ETH_DEBUG_LOG(fmt, ...) \ do { \ if (debug) \ pr_info(FUN_LINE_FMT fmt, __func__, __LINE__, ##__VA_ARGS__); \ } while (0) +#else +#define ETH_DEBUG_LOG(fmt, ...) do { } while (0) +#endif #define XSC_MSG_LEVEL (NETIF_MSG_LINK) // | NETIF_MSG_HW) @@ -32,23 +35,23 @@ do { \ #define WQE_CSEG_DUMP(seg_name, seg) \ do { \ ETH_DEBUG_LOG("dump %s:\n", seg_name); \ - ETH_DEBUG_LOG("cseg->has_pph: %d\n", seg->has_pph); \ - ETH_DEBUG_LOG("cseg->so_type: %d\n", seg->so_type); \ - ETH_DEBUG_LOG("cseg->so_hdr_len: %d\n", seg->so_hdr_len); \ - ETH_DEBUG_LOG("cseg->so_data_size: %d\n", seg->so_data_size); \ - ETH_DEBUG_LOG("cseg->msg_opcode: %d\n", seg->msg_opcode); \ - ETH_DEBUG_LOG("cseg->wqe_id: %d\n", seg->wqe_id); \ - ETH_DEBUG_LOG("cseg->ds_data_num: %d\n", seg->ds_data_num); \ - ETH_DEBUG_LOG("cseg->msg_len: %d\n", seg->msg_len); \ + ETH_DEBUG_LOG("cseg->has_pph: %d\n", (seg)->has_pph); \ + ETH_DEBUG_LOG("cseg->so_type: %d\n", (seg)->so_type); \ + ETH_DEBUG_LOG("cseg->so_hdr_len: %d\n", (seg)->so_hdr_len); \ + ETH_DEBUG_LOG("cseg->so_data_size: %d\n", (seg)->so_data_size); \ + ETH_DEBUG_LOG("cseg->msg_opcode: %d\n", (seg)->msg_opcode); \ + ETH_DEBUG_LOG("cseg->wqe_id: %d\n", (seg)->wqe_id); \ + ETH_DEBUG_LOG("cseg->ds_data_num: %d\n", (seg)->ds_data_num); \ + ETH_DEBUG_LOG("cseg->msg_len: %d\n", (seg)->msg_len); \ } while (0) #define WQE_DSEG_DUMP(seg_name, seg) \ do { \ ETH_DEBUG_LOG("dump %s:\n", seg_name); \ - ETH_DEBUG_LOG("dseg->va: %#llx\n", seg->va); \ - ETH_DEBUG_LOG("dseg->in_line: %d\n", seg->in_line); \ - ETH_DEBUG_LOG("dseg->mkey: %d\n", seg->mkey); \ - ETH_DEBUG_LOG("dseg->seg_len: %d\n", seg->seg_len); \ + ETH_DEBUG_LOG("dseg->va: %#llx\n", (seg)->va); \ + ETH_DEBUG_LOG("dseg->in_line: %d\n", (seg)->in_line); \ + ETH_DEBUG_LOG("dseg->mkey: %d\n", (seg)->mkey); \ + ETH_DEBUG_LOG("dseg->seg_len: %d\n", (seg)->seg_len); \ } while (0) static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int direct) @@ -57,7 +60,7 @@ static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int dire return; netdev_info(skb->dev, "pkt[%s]: skb_len=%d, head_len=%d\n", - (direct?"tx":"rx"), skb->len, headlen); + (direct ? "tx" : "rx"), skb->len, headlen); if (skb) { char *buf = skb->data; @@ -76,7 +79,6 @@ static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int dire for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; int fsz = skb_frag_size(frag); - buf = (char *)(page_address(frag->bv_page) + frag->bv_offset); for (i = 0; i < fsz; i++) { if (i % 16 == 0) @@ -92,12 +94,12 @@ static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int dire #define ETH_SQ_STATE(sq) \ do { \ - if (test_bit(__QUEUE_STATE_STACK_XOFF, &sq->txq->state)) \ + if (test_bit(__QUEUE_STATE_STACK_XOFF, &(sq)->txq->state)) \ ETH_DEBUG_LOG("sq is __QUEUE_STATE_STACK_XOFF\n"); \ - else if (test_bit(__QUEUE_STATE_DRV_XOFF, &sq->txq->state)) \ + else if (test_bit(__QUEUE_STATE_DRV_XOFF, &(sq)->txq->state)) \ ETH_DEBUG_LOG("sq is __QUEUE_STATE_DRV_XOFF\n"); \ else \ - ETH_DEBUG_LOG("sq is %ld\n", sq->txq->state); \ + ETH_DEBUG_LOG("sq is %ld\n", (sq)->txq->state); \ } while (0) static inline void xsc_pkt_pph_dump(char *data, int len) @@ -115,4 +117,3 @@ static inline void xsc_pkt_pph_dump(char *data, int len) } #endif /* XSC_ETH_DEBUG_H */ - diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c index eb8cc44d26b47617cd3da24729aae8f1cd5e720d..54e8e436d29bdcdc06c363f744dd0ece906d6071 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -10,8 +9,8 @@ #include "xsc_eth_debug.h" #include "xsc_eth_ethtool.h" #include "xsc_eth.h" -#include -#include +#include "common/xsc_cmd.h" +#include "common/port.h" #include "../pci/fw/xsc_tbm.h" typedef int (*xsc_pflag_handler)(struct net_device *dev, bool enable); @@ -51,7 +50,6 @@ const char xsc_self_tests[XSC_ST_NUM][ETH_GSTRING_LEN] = { static int xsc_test_loopback(struct xsc_adapter *adapter) { - if (adapter->status != XSCALE_ETH_DRIVER_OK) { netdev_err(adapter->netdev, "\tCan't perform loopback test while device is down\n"); @@ -74,31 +72,26 @@ static int xsc_test_link_state(struct xsc_adapter *adapter) if (!netif_carrier_ok(adapter->netdev)) return 1; - port_state = xsc_eth_get_phyport_state(adapter); + port_state = xsc_eth_get_link_status(adapter); return port_state == 0 ? 1 : 0; } static int xsc_test_link_speed(struct xsc_adapter *adapter) { -#ifdef NEED_AGILEX_TRAINING - - struct xsc_event_linkstatus_resp linkinfo; + struct xsc_event_linkinfo_resp linkinfo; - if (xsc_eth_get_linkinfo(&linkinfo, adapter)) { - xsc_core_err(adapter->xdev, "%s fail to get linkinfo\n", __func__); + if (xsc_eth_get_link_info(adapter, &linkinfo)) return 1; - } -#endif + return 0; } static int set_pflag_rx_no_csum_complete(struct net_device *dev, - bool enable) + bool enable) { struct xsc_adapter *priv = netdev_priv(dev); - XSC_SET_PFLAG(&priv->nic_param, - XSC_PFLAG_RX_NO_CSUM_COMPLETE, enable); + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_NO_CSUM_COMPLETE, enable); return 0; } @@ -139,8 +132,8 @@ const char *xsc_priv_flags_name(int flag) } static int xsc_handle_pflag(struct net_device *dev, - u32 wanted_flags, - enum xsc_eth_priv_flag flag) + u32 wanted_flags, + enum xsc_eth_priv_flag flag) { struct xsc_adapter *priv = netdev_priv(dev); bool enable = !!(wanted_flags & BIT(flag)); @@ -153,8 +146,8 @@ static int xsc_handle_pflag(struct net_device *dev, err = xsc_priv_flags[flag].handler(dev, enable); if (err) netdev_err(dev, "%s private flag '%s' failed err %d\n", - enable ? "Enable" : "Disable", - xsc_priv_flags[flag].name, err); + enable ? "Enable" : "Disable", + xsc_priv_flags[flag].name, err); return err; } @@ -182,7 +175,7 @@ int xsc_set_priv_flags(struct net_device *dev, u32 pflags) } static int xsc_get_module_info(struct net_device *netdev, - struct ethtool_modinfo *modinfo) + struct ethtool_modinfo *modinfo) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -224,8 +217,8 @@ static int xsc_get_module_info(struct net_device *netdev, } static int xsc_get_module_eeprom(struct net_device *netdev, - struct ethtool_eeprom *ee, - u8 *data) + struct ethtool_eeprom *ee, + u8 *data) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_core_device *xdev = priv->xdev; @@ -239,8 +232,7 @@ static int xsc_get_module_eeprom(struct net_device *netdev, memset(data, 0, ee->len); while (i < ee->len) { - size_read = xsc_query_module_eeprom(xdev, offset, ee->len - i, - data + i); + size_read = xsc_query_module_eeprom(xdev, offset, ee->len - i, data + i); if (!size_read) /* Done reading */ @@ -266,50 +258,49 @@ u32 xsc_get_priv_flags(struct net_device *dev) return priv->nic_param.pflags; } -static void xsc_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) +static void xsc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct xsc_adapter *adapter = netdev_priv(dev); struct xsc_feature_flag *ff = (struct xsc_feature_flag *)&adapter->xdev->feature_flag; snprintf(info->driver, sizeof(info->driver), "%s(cmdq-%d)", XSCALE_DRIVER_NAME, - adapter->xdev->cmdq_ver); + adapter->xdev->cmdq_ver); if (HOTFIX_NUM == 0) snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d", - BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION); + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION); else snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d.H%d", - BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION, HOTFIX_NUM); + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION, HOTFIX_NUM); if (adapter->xdev->hotfix_num >= 0x27) snprintf(info->fw_version, - sizeof(info->fw_version), - "%x.%x.%x.%s%s%s%s%s%s%s%s", - adapter->xdev->chip_ver_h, - adapter->xdev->hotfix_num, - adapter->xdev->chip_ver_l, - fpga_type_name[ff->fpga_type], - hps_ddr_name[ff->hps_ddr], - onchip_ft_name[ff->onchip_ft], - rdma_icrc_name[ff->rdma_icrc], - ma_xbar_name[ff->ma_xbar], - anlt_fec_name[ff->anlt_fec], - pp_tbl_dma_name[ff->pp_tbl_dma], - pct_exp_name[ff->pct_exp]); + sizeof(info->fw_version), + "%x.%x.%x.%s%s%s%s%s%s%s%s", + adapter->xdev->chip_ver_h, + adapter->xdev->hotfix_num, + adapter->xdev->chip_ver_l, + fpga_type_name[ff->fpga_type], + hps_ddr_name[ff->hps_ddr], + onchip_ft_name[ff->onchip_ft], + rdma_icrc_name[ff->rdma_icrc], + ma_xbar_name[ff->ma_xbar], + anlt_fec_name[ff->anlt_fec], + pp_tbl_dma_name[ff->pp_tbl_dma], + pct_exp_name[ff->pct_exp]); else snprintf(info->fw_version, - sizeof(info->fw_version), - "%x.%x.%x.%s%s%s%s%s%s", - adapter->xdev->chip_ver_h, - adapter->xdev->hotfix_num, - adapter->xdev->chip_ver_l, - fpga_type_name[ff->fpga_type], - hps_ddr_name[ff->hps_ddr], - onchip_ft_name[ff->onchip_ft], - rdma_icrc_name[ff->rdma_icrc], - ma_xbar_name[ff->ma_xbar], - anlt_fec_name[ff->anlt_fec]); + sizeof(info->fw_version), + "%x.%x.%x.%s%s%s%s%s%s", + adapter->xdev->chip_ver_h, + adapter->xdev->hotfix_num, + adapter->xdev->chip_ver_l, + fpga_type_name[ff->fpga_type], + hps_ddr_name[ff->hps_ddr], + onchip_ft_name[ff->onchip_ft], + rdma_icrc_name[ff->rdma_icrc], + ma_xbar_name[ff->ma_xbar], + anlt_fec_name[ff->anlt_fec]); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); } @@ -337,8 +328,7 @@ static void xsc_ethtool_get_strings(struct xsc_adapter *adapter, u32 stringset, case ETH_SS_TEST: for (i = 0; i < xsc_self_test_num(adapter); i++) - strcpy(data + i * ETH_GSTRING_LEN, - xsc_self_tests[i]); + strcpy(data + i * ETH_GSTRING_LEN, xsc_self_tests[i]); break; case ETH_SS_PRIV_FLAGS: @@ -393,8 +383,7 @@ static int (*xsc_st_func[XSC_ST_NUM])(struct xsc_adapter *) = { #endif }; -static void xsc_self_test(struct net_device *ndev, struct ethtool_test *etest, - u64 *buf) +static void xsc_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) { struct xsc_adapter *priv = netdev_priv(ndev); int i; @@ -422,7 +411,6 @@ static void xsc_self_test(struct net_device *ndev, struct ethtool_test *etest, } netdev_info(ndev, "Self test out: status flags(0x%x)\n", etest->flags); - } static void xsc_update_stats(struct xsc_adapter *adapter) @@ -435,7 +423,7 @@ static void xsc_update_stats(struct xsc_adapter *adapter) } static void xsc_ethtool_get_ethtool_stats(struct xsc_adapter *adapter, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *stats, u64 *data) { int i, idx = 0; @@ -448,7 +436,7 @@ static void xsc_ethtool_get_ethtool_stats(struct xsc_adapter *adapter, } static void xsc_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *stats, u64 *data) { struct xsc_adapter *adapter = netdev_priv(dev); @@ -466,9 +454,9 @@ static void xsc_set_msglevel(struct net_device *dev, u32 val) } static void xsc_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *param, - struct kernel_ethtool_ringparam *kernel_param, - struct netlink_ext_ack *extack) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct xsc_adapter *priv = netdev_priv(dev); @@ -479,9 +467,9 @@ static void xsc_get_ringparam(struct net_device *dev, } static int xsc_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *param, - struct kernel_ethtool_ringparam *kernel_param, - struct netlink_ext_ack *extack) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct xsc_adapter *priv = netdev_priv(dev); u32 old_rq_size, old_sq_size; @@ -489,39 +477,39 @@ static int xsc_set_ringparam(struct net_device *dev, if (param->rx_jumbo_pending) { netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n", - __func__); + __func__); return -EINVAL; } if (param->rx_mini_pending) { netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n", - __func__); + __func__); return -EINVAL; } if (param->rx_pending < BIT(XSC_MIN_LOG_RQ_SZ)) { netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%ld)\n", - __func__, param->rx_pending, BIT(XSC_MIN_LOG_RQ_SZ)); + __func__, param->rx_pending, BIT(XSC_MIN_LOG_RQ_SZ)); return -EINVAL; } if (param->rx_pending > priv->nic_param.rq_max_size) { netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n", - __func__, param->rx_pending, priv->nic_param.rq_max_size); + __func__, param->rx_pending, priv->nic_param.rq_max_size); return -EINVAL; } if (param->tx_pending < BIT(XSC_MIN_LOG_SQ_SZ)) { netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%ld)\n", - __func__, param->tx_pending, BIT(XSC_MIN_LOG_SQ_SZ)); + __func__, param->tx_pending, BIT(XSC_MIN_LOG_SQ_SZ)); return -EINVAL; } if (param->tx_pending > priv->nic_param.sq_max_size) { netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n", - __func__, param->tx_pending, priv->nic_param.sq_max_size); + __func__, param->tx_pending, priv->nic_param.sq_max_size); return -EINVAL; } if (param->rx_pending == priv->nic_param.rq_size && - param->tx_pending == priv->nic_param.sq_size) + param->tx_pending == priv->nic_param.sq_size) return 0; mutex_lock(&priv->state_lock); @@ -535,14 +523,14 @@ static int xsc_set_ringparam(struct net_device *dev, priv->nic_param.sq_size = param->tx_pending; netdev_info(priv->netdev, "%s: tx_pending(%d->%d), rx_pending(%d->%d)\n", - __func__, old_sq_size, param->tx_pending, - old_rq_size, priv->nic_param.rq_size); + __func__, old_sq_size, param->tx_pending, + old_rq_size, priv->nic_param.rq_size); err = xsc_safe_switch_channels(priv, NULL, NULL); if (err) { priv->nic_param.rq_size = old_rq_size; priv->nic_param.sq_size = old_sq_size; netdev_err(priv->netdev, "%s: set ringparams failed, err=%d\n", - __func__, err); + __func__, err); } unlock: @@ -551,8 +539,7 @@ static int xsc_set_ringparam(struct net_device *dev, return err; } -static void xsc_get_channels(struct net_device *dev, - struct ethtool_channels *ch) +static void xsc_get_channels(struct net_device *dev, struct ethtool_channels *ch) { struct xsc_adapter *priv = netdev_priv(dev); @@ -564,8 +551,7 @@ static void xsc_get_channels(struct net_device *dev, mutex_unlock(&priv->state_lock); } -static int xsc_set_channels(struct net_device *dev, - struct ethtool_channels *ch) +static int xsc_set_channels(struct net_device *dev, struct ethtool_channels *ch) { struct xsc_adapter *priv = netdev_priv(dev); struct xsc_eth_params *params = &priv->nic_param; @@ -575,20 +561,18 @@ static int xsc_set_channels(struct net_device *dev, int err = 0; if (!count) { - netdev_info(priv->netdev, "%s: combined_count=0 not supported\n", - __func__); + netdev_info(priv->netdev, "%s: combined_count=0 not supported\n", __func__); return -EINVAL; } if (ch->rx_count || ch->tx_count) { - netdev_info(priv->netdev, "%s: separate rx/tx count not supported\n", - __func__); + netdev_info(priv->netdev, "%s: separate rx/tx count not supported\n", __func__); return -EINVAL; } if (count > ch_max) { netdev_info(priv->netdev, "%s: count (%d) > max (%d)\n", - __func__, count, ch_max); + __func__, count, ch_max); return -EINVAL; } @@ -736,9 +720,8 @@ static int xsc_set_rss_hash_opt(struct xsc_adapter *priv, priv->rss_params.rx_hash_fields[tt] = rx_hash_field; } - xsc_core_info(priv->xdev, - "%s: flow_type=%d, change=0x%x, hash_tmpl=0x%x\n", - __func__, nfc->flow_type, change, rx_hash_field); + xsc_core_info(priv->xdev, "%s: flow_type=%d, change=0x%x, hash_tmpl=0x%x\n", + __func__, nfc->flow_type, change, rx_hash_field); if (change) ret = xsc_eth_modify_nic_hca(priv, change); @@ -798,8 +781,7 @@ static u32 xsc_get_rxfh_indir_size(struct net_device *netdev) return XSC_INDIR_RQT_SIZE; } -int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_rss_params *rss = &priv->rss_params; @@ -818,17 +800,16 @@ int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } -int xsc_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *key, const u8 hfunc) +int xsc_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct xsc_adapter *priv = netdev_priv(dev); struct xsc_rss_params *rss = &priv->rss_params; u32 refresh = 0; int err = 0; - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && - (hfunc != ETH_RSS_HASH_XOR) && - (hfunc != ETH_RSS_HASH_TOP)) + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_XOR && + hfunc != ETH_RSS_HASH_TOP) return -EINVAL; mutex_lock(&priv->state_lock); @@ -839,13 +820,12 @@ int xsc_set_rxfh(struct net_device *dev, const u32 *indir, } if (key) { - memcpy(rss->toeplitz_hash_key, key, - sizeof(rss->toeplitz_hash_key)); + memcpy(rss->toeplitz_hash_key, key, sizeof(rss->toeplitz_hash_key)); if (rss->hfunc == ETH_RSS_HASH_TOP) refresh |= BIT(XSC_RSS_HASH_KEY_UPDATE); } - if (refresh > 0 && (priv->status == XSCALE_ETH_DRIVER_OK)) + if (refresh > 0 && priv->status == XSCALE_ETH_DRIVER_OK) err = xsc_eth_modify_nic_hca(priv, refresh); mutex_unlock(&priv->state_lock); @@ -854,65 +834,31 @@ int xsc_set_rxfh(struct net_device *dev, const u32 *indir, } static int xsc_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) + struct ethtool_link_ksettings *cmd) { struct xsc_adapter *adapter = netdev_priv(netdev); - struct xsc_feature_flag *ff = (struct xsc_feature_flag *)&adapter->xdev->feature_flag; - struct xsc_event_linkstatus_resp linkinfo; - /*this stub is add for 802.3ad bond test, function has been tested*/ - u32 supported, advertising; + struct xsc_event_linkinfo_resp linkinfo; - if (xsc_eth_get_linkinfo(&linkinfo, adapter)) { - xsc_core_err(adapter->xdev, "%s fail to get linkinfo\n", __func__); + if (xsc_eth_get_link_info(adapter, &linkinfo)) return -EINVAL; - } - - cmd->base.port = PORT_FIBRE; - cmd->base.duplex = DUPLEX_FULL; - cmd->base.autoneg = ff->anlt_fec ? AUTONEG_ENABLE : AUTONEG_DISABLE; - - if (linkinfo.linkspeed == XSC_CMD_RESP_LINKSPEED_MODE_25G) { - cmd->base.speed = SPEED_25000; - supported = (SUPPORTED_FIBRE | SUPPORTED_Autoneg); - advertising = (ADVERTISED_FIBRE | ADVERTISED_Autoneg); + cmd->base.port = linkinfo.port; + cmd->base.duplex = linkinfo.duplex; + cmd->base.autoneg = linkinfo.autoneg; + cmd->base.speed = linkinfo.linkspeed; - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); - ethtool_link_ksettings_add_link_mode(cmd, supported, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 25000baseCR_Full); + bitmap_copy(cmd->link_modes.supported, (unsigned long *)linkinfo.supported_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(cmd->link_modes.advertising, (unsigned long *)linkinfo.advertising_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); - ethtool_link_ksettings_add_link_mode(cmd, supported, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 25000baseKR_Full); - - ethtool_link_ksettings_add_link_mode(cmd, supported, 25000baseSR_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 25000baseSR_Full); - } else if (linkinfo.linkspeed == XSC_CMD_RESP_LINKSPEED_MODE_100G) { - cmd->base.speed = SPEED_100000; - - supported = (SUPPORTED_FIBRE | SUPPORTED_Autoneg); - advertising = (ADVERTISED_FIBRE | ADVERTISED_Autoneg); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - - ethtool_link_ksettings_add_link_mode(cmd, supported, 100000baseKR4_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100000baseKR4_Full); - - ethtool_link_ksettings_add_link_mode(cmd, supported, 100000baseSR4_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100000baseSR4_Full); - - ethtool_link_ksettings_add_link_mode(cmd, supported, 100000baseCR4_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100000baseCR4_Full); - - ethtool_link_ksettings_add_link_mode(cmd, supported, 100000baseLR4_ER4_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100000baseLR4_ER4_Full); - } + bitmap_or(cmd->link_modes.supported, cmd->link_modes.supported, + (unsigned long *)&linkinfo.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_or(cmd->link_modes.advertising, cmd->link_modes.advertising, + (unsigned long *)&linkinfo.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); return 0; } @@ -935,7 +881,6 @@ static int xsc_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state st } return ret; - } static const struct ethtool_ops xsc_ethtool_ops = { @@ -971,4 +916,3 @@ void eth_set_ethtool_ops(struct net_device *dev) { dev->ethtool_ops = &xsc_ethtool_ops; } - diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h index 4f7dd4c456ca3b4655a93819f327ea4903cd501b..eb2eb3491c148560ef9f108b6099b73a91f1f5b9 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -18,4 +17,3 @@ void eth_set_ethtool_ops(struct net_device *dev); #define LED_ACT_ON_HW 0xff #endif /* XSC_ETH_ETHTOOL_H */ - diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c index e77b416492ad9d5ac55a94672640fcff376de484..05d10dd1d0de518688bffc3a0a27fa41658a3380 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -24,20 +23,22 @@ static inline void xsc_rq_notify_hw(struct xsc_rq *rq) u64 rqwqe_id = wq->wqe_ctr << (ilog2(xdev->caps.recv_ds_num)); ETH_DEBUG_LOG("rq%d_db_val=0x%x, recv_ds=%d\n", - rq->rqn, doorbell_value.recv_data, - xdev->caps.recv_ds_num); + rq->rqn, doorbell_value.recv_data, + xdev->caps.recv_ds_num); /*reverse wqe index to ds index*/ doorbell_value.next_pid = rqwqe_id; doorbell_value.qp_num = rq->rqn; - /*keep order*/ + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ wmb(); writel(doorbell_value.recv_data, REG_ADDR(xdev, xdev->regs.rx_db)); } static inline void xsc_skb_set_hash(struct xsc_adapter *adapter, - struct xsc_cqe64 *cqe, - struct sk_buff *skb) + struct xsc_cqe64 *cqe, + struct sk_buff *skb) { struct xsc_rss_params *rss = &adapter->rss_params; u32 hash_field; @@ -49,20 +50,20 @@ static inline void xsc_skb_set_hash(struct xsc_adapter *adapter, if (skb->protocol == htons(ETH_P_IP)) { hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP]; if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP || - hash_field & XSC_HASH_FIELD_SEL_DST_IP) + hash_field & XSC_HASH_FIELD_SEL_DST_IP) l3_hash = true; if (hash_field & XSC_HASH_FIELD_SEL_SPORT || - hash_field & XSC_HASH_FIELD_SEL_DPORT) + hash_field & XSC_HASH_FIELD_SEL_DPORT) l4_hash = true; } else if (skb->protocol == htons(ETH_P_IPV6)) { hash_field = rss->rx_hash_fields[XSC_TT_IPV6_TCP]; if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6 || - hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) l3_hash = true; if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6 || - hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) l4_hash = true; } @@ -84,125 +85,96 @@ static inline unsigned short from32to16(unsigned int x) return x; } -static inline unsigned int xsc_do_csum(const unsigned char *buff, - int len, unsigned int sum, bool plus) -{ - unsigned int result = 0; - u32 result0, result1, result2, result3, result4; - - if (len >= 4) { - const unsigned char *end = buff + ((unsigned int)len & ~3); - unsigned int carry = 0; - - do { - unsigned int w = *(unsigned int *) buff; - - buff += 4; - result += carry; - result += w; - carry = (w > result); - } while (buff < end); - result += carry; - result0 = result; - result = from32to16(result); - } - result1 = result; - if (len & 2) { - result += *(unsigned short *) buff; - buff += 2; - result2 = result; - result = from32to16(result); - } - result3 = result; - if (len & 1) -#ifdef __LITTLE_ENDIAN - result += *buff; -#else - result += (*buff << 8); -#endif - - if (plus) { - result = from32to16(sum) + from32to16(result); - result4 = result; - result = from32to16(result); - } else { - result = from32to16(sum) - from32to16(result); - } - - ETH_DEBUG_LOG("%s: len=%d, csum=0x%x -> %x -> %x -> %x -> %x\n", __func__, - len, result0, result1, result2, result3, result4); - return result; -} - static inline bool handle_udp_frag_csum(struct sk_buff *skb, struct epp_pph *pph) { -#ifdef XSC_UDP_FRAG_HW_CSUM - struct iphdr *iph; +#ifdef XSC_UDP_FRAG_CSUM char *head = (char *)pph; + struct iphdr *iph; u8 l3_proto = PPH_OUTER_IP_TYPE(head); u8 l4_proto = PPH_OUTER_TP_TYPE(head); - u8 iph_off = PPH_OUTER_IP_OFST(head); - u16 iph_len = PPH_OUTER_IP_LEN(pph); -#ifndef UDP_CHECK_0 u16 csum_off = (u16)PPH_CSUM_OFST(head); + u16 csum_plen = (u16)PPH_CSUM_PLEN(head); u8 payload_off = PPH_PAYLOAD_OFST(head); - u32 check = PPH_CSUM_VAL(head); + u32 hw_csum = PPH_CSUM_VAL(head); + u16 udp_check = 0; + u16 udp_len = 0; + u32 off = 64; + __wsum csum1, csum2, csum3, csum; + +#ifdef CUM_SKB_DATA + head = (char *)skb->data; + off = 0; #endif if (l4_proto != L4_PROTO_UDP && l4_proto != L4_PROTO_NONE) return false; + off += ETH_HLEN; if (l3_proto == L3_PROTO_IP) { - iph = (struct iphdr *)(skb->data + iph_off); + iph = (struct iphdr *)(head + off); if (!ip_is_fragment(iph)) return false; -#ifdef UDP_CHECK_0 +#ifdef UDP_CSUM_DEBUG + netdev_dbg("ip_id=%d frag_off=0x%x l4_prt=%d l3_prt=%d iph_off=%d ip_len=%d csum_off=%d pload_off=%d\n", + ntohs(iph->id), ntohs(iph->frag_off), + l4_proto, l3_proto, PPH_OUTER_IP_OFST(head), PPH_OUTER_IP_LEN(pph), + csum_off, payload_off); +#endif + + off += iph->ihl * 4; if (l4_proto == L4_PROTO_UDP) { - struct udphdr *uh = (struct udphdr *)(skb->data + iph_off + iph_len); + struct udphdr *uh = (struct udphdr *)(head + off); - uh->check = 0; + udp_check = uh->check; + udp_len = ntohs(uh->len); } - return false; -#else + if (csum_off == 0) csum_off = 256; - csum_off -= XSC_PPH_HEAD_LEN; - - if (csum_off < payload_off) { - head = (char *)(skb->data + csum_off); - check = xsc_do_csum(head, (payload_off - csum_off), check, false); - } else if (csum_off > payload_off) { - head = (char *)(skb->data + payload_off); - check = xsc_do_csum(head, (csum_off - payload_off), check, true); - } - skb->csum = csum_unfold((__force __sum16)check); -#ifdef UDP_CSUM_DEBUG - int dump_len = 256; - - ETH_DEBUG_LOG( - "ip_id=%d frag_off=0x%x l4_proto=%d l3_proto=%d iph_off=%d ip_len=%d\n", - ntohs(iph->id), ntohs(iph->frag_off), - l4_proto, l3_proto, iph_off, iph_len); - ETH_DEBUG_LOG( - "skb_len=%d csum_off=%d payload_off=%d check=0x%llx -> 0x%x -> 0x%x\n", - skb->len, csum_off, payload_off, - PPH_CSUM_VAL(pph), check, skb->csum); - - if (skb->len < (256 - 64)) - dump_len = skb->len + 64; - xsc_pkt_pph_dump((char *)pph, dump_len); -#endif + netdev_dbg("%s: ip_id=%d frag_off=0x%x skb_len=%d data_len=%d csum_off=%d csum_plen=%d payload_off=%d udp_off=%d udp_len=%d udp_check=0x%x\n", + __func__, ntohs(iph->id), ntohs(iph->frag_off), + skb->len, skb->data_len, + csum_off, csum_plen, payload_off, off, udp_len, udp_check); +#ifdef CUM_RAW_DATA_DUMP + xsc_pkt_pph_dump((char *)head, 272); #endif + + if (csum_off < off) { + csum1 = csum_partial((char *)(head + csum_off), (off - csum_off), 0); + csum2 = htons(from32to16(hw_csum)); + csum = csum_sub(csum2, csum1); + } else if (csum_off > off) { + csum2 = csum_partial((char *)(head + csum_off), csum_plen, 0); + csum1 = csum_partial((char *)(head + off), (csum_off - off), 0); + csum = htons(from32to16(hw_csum)); + csum = csum_partial((char *)(head + off), (csum_off - off), csum); + csum3 = csum_partial((char *)(head + off), (skb->len - off + 64), 0); + } else { + csum = htons(from32to16(hw_csum)); + } + skb->csum = csum_unfold(from32to16(csum)); + + ETH_DEBUG_LOG("%s: sw_cal_csum[%d:%d]=0x%x -> 0x%x\n", + __func__, off, csum_off, csum1, from32to16(csum1)); + ETH_DEBUG_LOG("%s: sw_cal_hw_csum[%d:%d]=0x%x -> 0x%x, hw_csum=0x%x -> 0x%x\n", + __func__, csum_off, csum_plen, csum2, from32to16(csum2), + hw_csum, from32to16(hw_csum)); + ETH_DEBUG_LOG("%s: sw_cal_tot_csum[%d:%d]=0x%x -> 0x%x, skb_csum=0x%x -> 0x%x\n", + __func__, off, skb->len, csum3, from32to16(csum3), csum, skb->csum); + + skb->ip_summed = CHECKSUM_COMPLETE; + return true; } #endif + return false; } static inline void xsc_handle_csum(struct xsc_cqe64 *cqe, struct xsc_rq *rq, - struct sk_buff *skb, struct xsc_wqe_frag_info *wi) + struct sk_buff *skb, struct xsc_wqe_frag_info *wi) { struct xsc_rq_stats *stats = rq->stats; struct xsc_channel *c = rq->cq.channel; @@ -218,28 +190,26 @@ static inline void xsc_handle_csum(struct xsc_cqe64 *cqe, struct xsc_rq *rq, goto csum_none; if (handle_udp_frag_csum(skb, hw_pph)) { - skb->ip_summed = CHECKSUM_COMPLETE; stats->csum_succ++; goto out; } if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && - (!(cqe->csum_err & OUTER_AND_INNER))) { + (!(cqe->csum_err & OUTER_AND_INNER))) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = 1; skb->encapsulation = 1; stats->csum_unnecessary++; } else if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && - (!(cqe->csum_err & OUTER_BIT) && - (cqe->csum_err & INNER_BIT))) { + (!(cqe->csum_err & OUTER_BIT) && (cqe->csum_err & INNER_BIT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = 0; skb->encapsulation = 1; stats->csum_unnecessary++; } else if (!XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && - (!(cqe->csum_err & OUTER_BIT))) { + (!(cqe->csum_err & OUTER_BIT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; stats->csum_unnecessary++; @@ -258,10 +228,10 @@ static inline void xsc_handle_csum(struct xsc_cqe64 *cqe, struct xsc_rq *rq, } static inline void xsc_build_rx_skb(struct xsc_cqe64 *cqe, - u32 cqe_bcnt, - struct xsc_rq *rq, - struct sk_buff *skb, - struct xsc_wqe_frag_info *wi) + u32 cqe_bcnt, + struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) { struct xsc_channel *c = rq->cq.channel; struct net_device *netdev = c->netdev; @@ -277,10 +247,10 @@ static inline void xsc_build_rx_skb(struct xsc_cqe64 *cqe, } static inline void xsc_complete_rx_cqe(struct xsc_rq *rq, - struct xsc_cqe64 *cqe, - u32 cqe_bcnt, - struct sk_buff *skb, - struct xsc_wqe_frag_info *wi) + struct xsc_cqe64 *cqe, + u32 cqe_bcnt, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) { struct xsc_rq_stats *stats = rq->stats; @@ -290,25 +260,24 @@ static inline void xsc_complete_rx_cqe(struct xsc_rq *rq, } static inline void xsc_add_skb_frag(struct xsc_rq *rq, - struct sk_buff *skb, - struct xsc_dma_info *di, - u32 frag_offset, u32 len, - unsigned int truesize) + struct sk_buff *skb, + struct xsc_dma_info *di, + u32 frag_offset, u32 len, + unsigned int truesize) { struct xsc_channel *c = rq->cq.channel; - struct device *dev = c->adapter->dev; + struct device *dev = c->adapter->dev; dma_sync_single_for_cpu(dev, di->addr + frag_offset, len, DMA_FROM_DEVICE); + page_ref_inc(di->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, di->page, frag_offset, len, truesize); - - di->refcnt_bias--; } static inline void xsc_copy_skb_header(struct device *dev, - struct sk_buff *skb, - struct xsc_dma_info *dma_info, - int offset_from, u32 headlen) + struct sk_buff *skb, + struct xsc_dma_info *dma_info, + int offset_from, u32 headlen) { void *from = page_address(dma_info->page) + offset_from; /* Aligning len to sizeof(long) optimizes memcpy performance */ @@ -319,21 +288,70 @@ static inline void xsc_copy_skb_header(struct device *dev, skb_copy_to_linear_data(skb, from, len); } +static inline struct sk_buff *xsc_build_linear_skb(struct xsc_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_dma_info *di = wi->di; + u16 rx_headroom = rq->buff.headroom; + int pph_len = has_pph ? XSC_PPH_HEAD_LEN : 0; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + + va = page_address(di->page) + wi->offset; + data = va + rx_headroom + pph_len; + frag_size = XSC_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + + dma_sync_single_range_for_cpu(rq->cq.xdev->device, di->addr, wi->offset, + frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ + prefetch(data); + + skb = xsc_build_linear_skb(rq, va, frag_size, (rx_headroom + pph_len), + (cqe_bcnt - pph_len)); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + page_ref_inc(di->page); + + return skb; +} + struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, - struct xsc_wqe_frag_info *wi, - u32 cqe_bcnt, u8 has_pph) + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) { struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; struct xsc_wqe_frag_info *head_wi = wi; - u16 headlen = min_t(u32, XSC_RX_MAX_HEAD, cqe_bcnt); + u16 headlen = min_t(u32, XSC_RX_MAX_HEAD, cqe_bcnt); u16 frag_headlen = headlen; - u16 byte_cnt = cqe_bcnt - headlen; + u16 byte_cnt = cqe_bcnt - headlen; struct sk_buff *skb; struct xsc_channel *c = rq->cq.channel; - struct device *dev = c->adapter->dev; + struct device *dev = c->adapter->dev; struct net_device *netdev = c->adapter->netdev; u8 fragcnt = 0; u16 head_offset = head_wi->offset; + u16 frag_consumed_bytes = 0; #ifndef NEED_CREATE_RX_THREAD skb = napi_alloc_skb(rq->cq.napi, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); @@ -348,32 +366,36 @@ struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, prefetchw(skb->data); if (likely(has_pph)) { + headlen = min_t(u32, XSC_RX_MAX_HEAD, (cqe_bcnt - XSC_PPH_HEAD_LEN)); + frag_headlen = headlen + XSC_PPH_HEAD_LEN; + byte_cnt = cqe_bcnt - headlen - XSC_PPH_HEAD_LEN; head_offset += XSC_PPH_HEAD_LEN; - headlen -= XSC_PPH_HEAD_LEN; } while (byte_cnt) { /*figure out whether the first fragment can be a page ?*/ - u16 frag_consumed_bytes = + frag_consumed_bytes = min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); xsc_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, - frag_consumed_bytes, frag_info->frag_stride); + frag_consumed_bytes, frag_info->frag_stride); byte_cnt -= frag_consumed_bytes; - frag_headlen = 0; - ETH_DEBUG_LOG("consumed=%d, frag_size=%d, byte_cnt=%d, refcnt=%d, addr=0x%llx\n", - frag_consumed_bytes, frag_info->frag_size, byte_cnt, - wi->di->refcnt_bias, (u64)wi->di->addr); + ETH_DEBUG_LOG("consumed=%d, frag_size=%d, byte_cnt=%d, cqe_bcnt=%d, addr=0x%llx\n", + frag_consumed_bytes, frag_info->frag_size, byte_cnt, + cqe_bcnt, (u64)wi->di->addr); /*to protect extend wqe read, drop exceed bytes*/ + frag_headlen = 0; fragcnt++; if (fragcnt == rq->wqe.info.num_frags) { if (byte_cnt) { rq->stats->oversize_pkts_sw_drop += byte_cnt; - netdev_warn(netdev, "large packet reach the maximum rev-wqe num.\n"); - netdev_warn(netdev, "%u bytes dropped: frag_num=%d, headlen=%d, cqe_cnt=%d, frag0_bytes=%d, frag_size=%d\n", - byte_cnt, fragcnt, headlen, cqe_bcnt, - frag_consumed_bytes, frag_info->frag_size); + netdev_warn(netdev, + "large packet reach the maximum rev-wqe num.\n"); + netdev_warn(netdev, + "%u bytes dropped: frag_num=%d, headlen=%d, cqe_cnt=%d, frag0_bytes=%d, frag_size=%d\n", + byte_cnt, fragcnt, headlen, cqe_bcnt, + frag_consumed_bytes, frag_info->frag_size); } break; } @@ -387,48 +409,16 @@ struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; - skb->len += headlen; + skb->len += headlen; skbdata_debug_dump(skb, headlen, 0); return skb; } -static inline void page_ref_elev(struct xsc_dma_info *dma_info) -{ - page_ref_add(dma_info->page, PAGE_REF_ELEV); - dma_info->refcnt_bias += PAGE_REF_ELEV; -} - static inline bool xsc_rx_cache_is_empty(struct xsc_page_cache *cache) { - return cache->head < 0; -} - -static inline bool xsc_rx_cache_page_busy(struct xsc_page_cache *cache, u32 i) -{ - struct xsc_dma_info *di = &cache->page_cache[i]; - - return (page_ref_count(di->page) - di->refcnt_bias) != 1; -} - -static inline void xsc_rx_cache_page_swap(struct xsc_page_cache *cache, - u32 a, u32 b) -{ - struct xsc_dma_info tmp; - - tmp = cache->page_cache[a]; - cache->page_cache[a] = cache->page_cache[b]; - cache->page_cache[b] = tmp; -} - -static inline void xsc_rx_cache_reduce_reset_watch(struct xsc_page_cache *cache) -{ - struct xsc_page_cache_reduce *reduce = &cache->reduce; - - reduce->next_ts = ilog2(cache->sz) == cache->log_min_sz ? - MAX_JIFFY_OFFSET : jiffies + reduce->grace_period; - reduce->success = 0; + return cache->head == cache->tail; } static inline bool xsc_page_is_reserved(struct page *page) @@ -436,132 +426,43 @@ static inline bool xsc_page_is_reserved(struct page *page) return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); } -static inline bool xsc_rx_cache_check_reduce(struct xsc_rq *rq) -{ - struct xsc_page_cache *cache = &rq->page_cache; - - if (!cache->page_cache) - return false; - - if (unlikely(test_bit(XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, &rq->state))) - return false; - - if (time_before(jiffies, cache->reduce.next_ts)) - return false; - - if (likely(!xsc_rx_cache_is_empty(cache)) && - xsc_rx_cache_page_busy(cache, cache->head)) - goto reset_watch; - - if (ilog2(cache->sz) == cache->log_min_sz) - goto reset_watch; - - /* would like to reduce */ - if (cache->reduce.success < XSC_PAGE_CACHE_REDUCE_SUCCESS_CNT) { - cache->reduce.success++; - return false; - } - - return true; - -reset_watch: - xsc_rx_cache_reduce_reset_watch(cache); - return false; - -} - -static inline void xsc_rx_cache_may_reduce(struct xsc_rq *rq) -{ - struct xsc_page_cache *cache = &rq->page_cache; - struct xsc_page_cache_reduce *reduce = &cache->reduce; - int max_new_head; - - if (!xsc_rx_cache_check_reduce(rq)) - return; - - /* do reduce */ - rq->stats->cache_rdc++; - cache->sz >>= 1; - max_new_head = (cache->sz >> 1) - 1; - if (cache->head > max_new_head) { - u32 npages = cache->head - max_new_head; - - cache->head = max_new_head; - if (cache->lrs >= cache->head) - cache->lrs = 0; - - memcpy(reduce->pending, &cache->page_cache[cache->head + 1], - npages * sizeof(*reduce->pending)); - reduce->npages = npages; - set_bit(XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, &rq->state); - } - - xsc_rx_cache_reduce_reset_watch(cache); -} - -static inline bool xsc_rx_cache_extend(struct xsc_rq *rq) -{ - struct xsc_page_cache *cache = &rq->page_cache; - struct xsc_page_cache_reduce *reduce = &cache->reduce; - u8 log_limit_sz = cache->log_min_sz + XSC_PAGE_CACHE_LOG_MAX_RQ_MULT; - - if (ilog2(cache->sz) >= log_limit_sz) - return false; - - rq->stats->cache_ext++; - cache->sz <<= 1; - - xsc_rx_cache_reduce_reset_watch(cache); - schedule_delayed_work_on(smp_processor_id(), &reduce->reduce_work, - reduce->delay); - return true; -} - static inline bool xsc_rx_cache_get(struct xsc_rq *rq, - struct xsc_dma_info *dma_info) + struct xsc_dma_info *dma_info) { struct xsc_page_cache *cache = &rq->page_cache; struct xsc_rq_stats *stats = rq->stats; struct xsc_core_device *xdev = rq->cq.xdev; - if (unlikely(xsc_rx_cache_is_empty(cache))) - goto err_no_page; + if (unlikely(xsc_rx_cache_is_empty(cache))) { + stats->cache_empty++; + return false; + } - xsc_rx_cache_page_swap(cache, cache->head, cache->lrs); - cache->lrs++; - if (cache->lrs >= cache->head) - cache->lrs = 0; - if (xsc_rx_cache_page_busy(cache, cache->head)) - goto err_no_page; + if (page_ref_count(cache->page_cache[cache->head].page) != 1) { + stats->cache_busy++; + return false; + } stats->cache_reuse++; - *dma_info = cache->page_cache[cache->head--]; + *dma_info = cache->page_cache[cache->head]; + cache->head = (cache->head + 1) & (cache->sz - 1); dma_sync_single_for_device(&xdev->pdev->dev, dma_info->addr, - PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(page_ref_count(dma_info->page) <= PAGE_REF_THRSD)) - page_ref_elev(dma_info); + PAGE_SIZE, DMA_FROM_DEVICE); return true; - -err_no_page: - stats->cache_alloc++; - cache->reduce.success = 0; - - return false; } static inline bool xsc_rx_cache_put(struct xsc_rq *rq, - struct xsc_dma_info *dma_info) + struct xsc_dma_info *dma_info) { struct xsc_page_cache *cache = &rq->page_cache; struct xsc_rq_stats *stats = rq->stats; + u32 tail_next = (cache->tail + 1) & (cache->sz - 1); - if (unlikely(cache->head == cache->sz - 1)) { - if (!xsc_rx_cache_extend(rq)) { - rq->stats->cache_full++; - return false; - } + if (tail_next == cache->head) { + stats->cache_full++; + return false; } if (unlikely(xsc_page_is_reserved(dma_info->page))) { @@ -569,7 +470,8 @@ static inline bool xsc_rx_cache_put(struct xsc_rq *rq, return false; } - cache->page_cache[++cache->head] = *dma_info; + cache->page_cache[cache->tail] = *dma_info; + cache->tail = tail_next; return true; } @@ -583,14 +485,11 @@ void xsc_page_dma_unmap(struct xsc_rq *rq, struct xsc_dma_info *dma_info) static inline void xsc_put_page(struct xsc_dma_info *dma_info) { - page_ref_sub(dma_info->page, dma_info->refcnt_bias); - ETH_DEBUG_LOG("free addr=0x%llx, refcnt=(%d, %d)\n", - dma_info->addr, page_ref_count(dma_info->page), dma_info->refcnt_bias); put_page(dma_info->page); } void xsc_page_release_dynamic(struct xsc_rq *rq, - struct xsc_dma_info *dma_info, bool recycle) + struct xsc_dma_info *dma_info, bool recycle) { if (likely(recycle)) { #ifdef XSC_PAGE_CACHE @@ -599,87 +498,66 @@ void xsc_page_release_dynamic(struct xsc_rq *rq, #endif xsc_page_dma_unmap(rq, dma_info); - page_ref_sub(dma_info->page, dma_info->refcnt_bias); page_pool_recycle_direct(rq->page_pool, dma_info->page); } else { xsc_page_dma_unmap(rq, dma_info); - page_pool_release_page(rq->page_pool, dma_info->page); - xsc_put_page(dma_info); } } static inline void xsc_put_rx_frag(struct xsc_rq *rq, - struct xsc_wqe_frag_info *frag, bool recycle) + struct xsc_wqe_frag_info *frag, bool recycle) { if (frag->last_in_page) xsc_page_release_dynamic(rq, frag->di, recycle); } -static inline void xsc_free_rx_wqe(struct xsc_rq *rq, - struct xsc_wqe_frag_info *wi, bool recycle) +static inline struct xsc_wqe_frag_info *get_frag(struct xsc_rq *rq, u16 ix) { - int i; - - for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) { - if (i < rq->frags_reuse_num || !recycle) - xsc_put_rx_frag(rq, wi, recycle); - } + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; } -static inline void xsc_free_rx_wqe_reserve_ds(struct xsc_rq *rq, - struct xsc_wqe_frag_info *wi) +static inline void xsc_free_rx_wqe(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, bool recycle) { int i; - for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) { - if (i >= rq->frags_reuse_num) { -#ifdef XSC_DEBUG - if (page_ref_count(wi->di->page) - wi->di->refcnt_bias > 1) - ETH_DEBUG_LOG("frag%d, last_in_page=%d, refcnt=%d, bias=%d\n", - i, wi->last_in_page, - page_ref_count(wi->di->page), wi->di->refcnt_bias); -#endif - xsc_put_rx_frag(rq, wi, false); - } - } -} - -static inline struct xsc_wqe_frag_info *get_frag(struct xsc_rq *rq, u16 ix) -{ - return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + xsc_put_rx_frag(rq, wi, recycle); } void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, - struct xsc_rq *rq, struct xsc_cqe64 *cqe) + struct xsc_rq *rq, struct xsc_cqe64 *cqe) { struct xsc_wq_cyc *wq = &rq->wqe.wq; struct xsc_channel *c = rq->cq.channel; + u8 cqe_opcode = get_cqe_opcode(cqe); struct xsc_wqe_frag_info *wi; struct sk_buff *skb; u32 cqe_bcnt; u16 ci; - u8 cqe_opcode = get_cqe_opcode(cqe); ci = xsc_wq_cyc_ctr2ix(wq, cqwq->cc); - wi = get_frag(rq, ci); + wi = get_frag(rq, ci); if (unlikely(cqe_opcode & BIT(7))) { rq->stats->wqe_err++; goto free_wqe; } cqe_bcnt = le32_to_cpu(cqe->msg_len); - ETH_DEBUG_LOG("data_len=%d, cc=%d", cqe_bcnt, ci); /* Check packet size. */ - if (unlikely(cqe_bcnt > rq->hw_mtu)) { + if (unlikely(cqe_bcnt > rq->frags_sz)) { if (!XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_DROPLESS_RQ)) { - rq->stats->oversize_pkts_sw_drop++; + rq->stats->oversize_pkts_sw_drop += cqe_bcnt; goto free_wqe; + } else { + rq->stats->oversize_pkts_err++; } } + cqe_bcnt = min_t(u32, cqe_bcnt, rq->frags_sz); skb = rq->wqe.skb_from_cqe(rq, wi, cqe_bcnt, cqe->has_pph); if (!skb) goto free_wqe; @@ -704,10 +582,9 @@ static void xsc_dump_error_rqcqe(struct xsc_rq *rq, struct net_device *netdev = c->adapter->netdev; u32 ci = xsc_cqwq_get_ci(&rq->cq.wq); - net_err_ratelimited( - "Error cqe on dev=%s, cqn=%d, ci=%d, rqn=%d, qpn=%d, error_code=0x%x\n", - netdev->name, rq->cq.xcq.cqn, ci, - rq->rqn, cqe->qp_id, get_cqe_opcode(cqe)); + net_err_ratelimited("Error cqe on dev=%s, cqn=%d, ci=%d, rqn=%d, qpn=%d, error_code=0x%x\n", + netdev->name, rq->cq.xcq.cqn, ci, + rq->rqn, cqe->qp_id, get_cqe_opcode(cqe)); #ifdef XSC_DEBUG xsc_dump_err_cqe(rq->cq.xdev, cqe); @@ -741,22 +618,22 @@ int xsc_poll_rx_cq(struct xsc_cq *cq, int budget) } if (!work_done) - return 0; + goto out; xsc_cq_notify_hw(cq); /* ensure cq space is freed before enabling more cqes */ wmb(); +out: rq->post_wqes(rq); - ch_stats->poll += work_done; if (work_done < budget) { - if (ch_stats->poll == 0) + if (ch_stats->poll == 0 && cq->channel->rx_int) ch_stats->poll_0++; - else if (ch_stats->poll <= 64) - ch_stats->poll_1_64++; + else if (ch_stats->poll < 64) + ch_stats->poll_1_63++; else if (ch_stats->poll < 512) - ch_stats->poll_65_511++; + ch_stats->poll_64_511++; else if (ch_stats->poll < 1024) ch_stats->poll_512_1023++; else if (ch_stats->poll >= 1024) @@ -770,24 +647,22 @@ static inline int xsc_page_alloc_mapped(struct xsc_rq *rq, struct xsc_dma_info *dma_info) { struct xsc_channel *c = rq->cq.channel; - struct device *dev = c->adapter->dev; + struct device *dev = c->adapter->dev; #ifdef XSC_PAGE_CACHE if (xsc_rx_cache_get(rq, dma_info)) return 0; + + rq->stats->cache_alloc++; #endif dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); if (unlikely(!dma_info->page)) return -ENOMEM; - dma_info->refcnt_bias = 0; - page_ref_elev(dma_info); - dma_info->addr = dma_map_page(dev, dma_info->page, 0, - XSC_RX_FRAG_SZ, rq->buff.map_dir); + XSC_RX_FRAG_SZ, rq->buff.map_dir); if (unlikely(dma_mapping_error(dev, dma_info->addr))) { - page_ref_sub(dma_info->page, dma_info->refcnt_bias); page_pool_recycle_direct(rq->page_pool, dma_info->page); dma_info->page = NULL; return -ENOMEM; @@ -797,7 +672,7 @@ static inline int xsc_page_alloc_mapped(struct xsc_rq *rq, } static inline int xsc_get_rx_frag(struct xsc_rq *rq, - struct xsc_wqe_frag_info *frag) + struct xsc_wqe_frag_info *frag) { int err = 0; @@ -820,20 +695,16 @@ static int xsc_alloc_rx_wqe(struct xsc_rq *rq, struct xsc_eth_rx_wqe_cyc *wqe, u int err; for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { - if (i >= rq->frags_reuse_num && wqe->data[i].va != 0) - continue; - err = xsc_get_rx_frag(rq, frag); if (unlikely(err)) goto free_frags; addr = cpu_to_le64(frag->di->addr + frag->offset + rq->buff.headroom); wqe->data[i].va = addr; - wqe->data[i].seg_len = cpu_to_le32(XSC_RX_FRAG_SZ); - ETH_DEBUG_LOG( - "rq%d_wqe%d->frag%d, off=%d, last_in_page=%d, refcnt=(%d, %d), addr=0x%llx\n", - rq->rqn, ix, i, frag->offset, frag->last_in_page, - page_ref_count(frag->di->page), frag->di->refcnt_bias, addr); + if (frag->offset == 0) + ETH_DEBUG_LOG("rq%d_wqe%d_frag%d off=%d last=%d refcnt=%d addr=0x%llx\n", + rq->rqn, ix, i, frag->offset, frag->last_in_page, + page_ref_count(frag->di->page), addr); } return 0; @@ -845,33 +716,11 @@ static int xsc_alloc_rx_wqe(struct xsc_rq *rq, struct xsc_eth_rx_wqe_cyc *wqe, u return err; } -void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix, bool resv_ds) +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix) { struct xsc_wqe_frag_info *wi = get_frag(rq, ix); - if (!resv_ds) - xsc_free_rx_wqe(rq, wi, false); - else - xsc_free_rx_wqe_reserve_ds(rq, wi); -} - -void xsc_eth_free_rx_wqes(struct xsc_rq *rq) -{ - u16 wqe_ix; - u16 wqe_ix_head, wqe_ix_tail; - struct xsc_wq_cyc *wq = &rq->wqe.wq; - - wqe_ix_tail = xsc_wq_cyc_get_tail(wq); - wqe_ix_head = xsc_wq_cyc_get_head(wq); - - for (wqe_ix = wqe_ix_head; wqe_ix < wqe_ix_tail; wqe_ix++) - xsc_eth_dealloc_rx_wqe(rq, wqe_ix, true); - - while (!xsc_wq_cyc_is_empty(wq)) { - wqe_ix = xsc_wq_cyc_get_tail(wq); - xsc_eth_dealloc_rx_wqe(rq, wqe_ix, false); - xsc_wq_cyc_pop(wq); - } + xsc_free_rx_wqe(rq, wi, false); } static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) @@ -897,7 +746,7 @@ static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) free_wqes: while (--i >= 0) - xsc_eth_dealloc_rx_wqe(rq, ix + i, false); + xsc_eth_dealloc_rx_wqe(rq, ix + i); return err; } @@ -905,35 +754,37 @@ static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) bool xsc_eth_post_rx_wqes(struct xsc_rq *rq) { struct xsc_wq_cyc *wq = &rq->wqe.wq; - int left = xsc_wq_cyc_missing(wq); - u8 wqe_bulk; - u16 head; + u8 wqe_bulk, wqe_bulk_min; int alloc; + u16 head; int err; wqe_bulk = rq->wqe.info.wqe_bulk; + wqe_bulk_min = rq->wqe.info.wqe_bulk_min; if (xsc_wq_cyc_missing(wq) < wqe_bulk) return false; do { - alloc = min_t(int, wqe_bulk, left); head = xsc_wq_cyc_get_head(wq); - err = xsc_alloc_rx_wqes(rq, head, alloc); - if (unlikely(err)) - break; + alloc = min_t(int, wqe_bulk, xsc_wq_cyc_missing(wq)); + if (alloc < wqe_bulk && alloc >= wqe_bulk_min) + alloc = alloc & 0xfffffffe; - xsc_wq_cyc_push_n(wq, alloc); - rq->stats->wqes += alloc; - left -= alloc; - } while (left > 0); + if (alloc > 0) { + err = xsc_alloc_rx_wqes(rq, head, alloc); + if (unlikely(err)) + break; + + xsc_wq_cyc_push_n(wq, alloc); + rq->stats->wqes += alloc; + } + } while (xsc_wq_cyc_missing(wq) >= wqe_bulk_min); dma_wmb(); /* ensure wqes are visible to device before updating doorbell record */ xsc_rq_notify_hw(rq); - xsc_rx_cache_may_reduce(rq); - return !!err; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c index 160a88de62ae0aeba9934234a3115ae819689098..4c47a83f316360e97d9ecabe49b536bbbdb78256 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include -#include +#include "common/xsc_cmd.h" #include "xsc_eth_stats.h" #include "xsc_eth.h" @@ -39,6 +38,7 @@ static const struct counter_desc sw_stats_desc[] = { { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqes) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqe_err) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_sw_drop) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_err) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_buff_alloc_err) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_reuse) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_full) }, @@ -51,8 +51,8 @@ static const struct counter_desc sw_stats_desc[] = { { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_events) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_0) }, - { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1_64) }, - { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_65_511) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1_63) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_64_511) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_512_1023) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1024) }, { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_arm) }, @@ -112,6 +112,7 @@ void xsc_grp_sw_update_stats(struct xsc_adapter *adapter) s->rx_wqes += rq_stats->wqes; s->rx_wqe_err += rq_stats->wqe_err; s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; + s->rx_oversize_pkts_err += rq_stats->oversize_pkts_err; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cache_reuse += rq_stats->cache_reuse; s->rx_cache_full += rq_stats->cache_full; @@ -125,8 +126,8 @@ void xsc_grp_sw_update_stats(struct xsc_adapter *adapter) s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_poll_0 += ch_stats->poll_0; - s->ch_poll_1_64 += ch_stats->poll_1_64; - s->ch_poll_65_511 += ch_stats->poll_65_511; + s->ch_poll_1_63 += ch_stats->poll_1_63; + s->ch_poll_64_511 += ch_stats->poll_64_511; s->ch_poll_512_1023 += ch_stats->poll_512_1023; s->ch_poll_1024 += ch_stats->poll_1024; s->ch_arm += ch_stats->arm; @@ -169,6 +170,7 @@ static const struct counter_desc rq_stats_desc[] = { { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cqes) }, { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, wqe_err) }, { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_sw_drop) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_err) }, { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, buff_alloc_err) }, { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_reuse) }, { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_full) }, @@ -206,8 +208,8 @@ static const struct counter_desc ch_stats_desc[] = { { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, events) }, { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll) }, { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_0) }, - { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1_64) }, - { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_65_511) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1_63) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_64_511) }, { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_512_1023) }, { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1024) }, { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, arm) }, @@ -230,7 +232,7 @@ static int xsc_grp_channels_get_num_stats(struct xsc_adapter *adapter) } static int xsc_grp_channels_fill_strings(struct xsc_adapter *adapter, u8 *data, - int idx) + int idx) { int max_nch = xsc_get_netdev_max_channels(adapter); int max_tc = xsc_get_netdev_max_tc(adapter); @@ -258,7 +260,7 @@ static int xsc_grp_channels_fill_strings(struct xsc_adapter *adapter, u8 *data, } static int xsc_grp_channels_fill_stats(struct xsc_adapter *adapter, u64 *data, - int idx) + int idx) { int max_nch = xsc_get_netdev_max_channels(adapter); int max_tc = xsc_get_netdev_max_tc(adapter); @@ -269,13 +271,13 @@ static int xsc_grp_channels_fill_stats(struct xsc_adapter *adapter, u64 *data, for (j = 0; j < NUM_CH_STATS; j++) data[idx++] = XSC_READ_CTR64_CPU(&stats->channel_stats[i].ch, - ch_stats_desc, j); + ch_stats_desc, j); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = XSC_READ_CTR64_CPU(&stats->channel_stats[i].rq, - rq_stats_desc, j); + rq_stats_desc, j); } for (tc = 0; tc < max_tc; tc++) @@ -283,7 +285,7 @@ static int xsc_grp_channels_fill_stats(struct xsc_adapter *adapter, u64 *data, for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = XSC_READ_CTR64_CPU(&stats->channel_stats[i].sq[tc], - sq_stats_desc, j); + sq_stats_desc, j); return idx; } @@ -391,11 +393,11 @@ static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) in.pport = xdev->mac_port; ret = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_prio_stats_mbox_in), - (void *)&out, sizeof(struct xsc_prio_stats_mbox_out)); - if ((ret == 0) && (out.hdr.status == 0)) { + (void *)&out, sizeof(struct xsc_prio_stats_mbox_out)); + if (ret == 0 && out.hdr.status == 0) { for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) { val = XSC_READ_CTR64_CPU(&out.prio_stats, hw_prio_stats_desc, i); - data[idx++] = __be64_to_cpu(val); + data[idx++] = __be64_to_cpu(val); } } @@ -406,8 +408,8 @@ static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) hw_in.is_lag = 0; ret = xsc_cmd_exec(adapter->xdev, (void *)&hw_in, sizeof(struct xsc_hw_stats_mbox_in), - (void *)&hw_out, sizeof(struct xsc_hw_stats_mbox_out)); - if ((ret == 0) && (hw_out.hdr.status == 0)) { + (void *)&hw_out, sizeof(struct xsc_hw_stats_mbox_out)); + if (ret == 0 && hw_out.hdr.status == 0) { for (i = 0; i < ARRAY_SIZE(hw_stats_desc); i++) { val = XSC_READ_CTR64_CPU(&hw_out.hw_stats, hw_stats_desc, i); data[idx++] = __be64_to_cpu(val); @@ -461,4 +463,3 @@ void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 * } } } - diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h index eeacfbad2dbd8a6da3f24bf4053b03c96219e210..7602cb4cfc49b7f6d07a138a9f4518e6523f1c2b 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -10,7 +9,7 @@ #include "xsc_eth_common.h" #define XSC_READ_CTR64_CPU(ptr, dsc, i) \ - (*(u64 *)((char *)ptr + dsc[i].offset)) + (*(u64 *)((char *)(ptr) + (dsc)[i].offset)) #define ETH_GSTRING_LEN 32 @@ -19,9 +18,9 @@ #define XSC_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) #define XSC_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) -#define XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio) (#fld"_prio"#prio) +#define XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio) (#fld "_prio"#prio) #define XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio) \ - (offsetof(type, fld) + (sizeof(type) * prio)) + (offsetof(type, fld) + (sizeof(type) * (prio))) #define XSC_DECLARE_HW_PRIO_STAT(type, fld, prio) \ {XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio), \ XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio)} @@ -38,6 +37,7 @@ struct xsc_rq_stats { u64 wqes; u64 wqe_err; u64 oversize_pkts_sw_drop; + u64 oversize_pkts_err; u64 buff_alloc_err; u64 cache_reuse; u64 cache_full; @@ -78,14 +78,14 @@ struct xsc_ch_stats { u64 events; u64 poll; u64 poll_0; - u64 poll_1_64; - u64 poll_65_511; + u64 poll_1_63; + u64 poll_64_511; u64 poll_512_1023; u64 poll_1024; u64 arm; u64 noarm; u64 aff_change; -}; +} ____cacheline_aligned_in_smp; struct xsc_adapter; struct xsc_stats_grp { @@ -97,7 +97,6 @@ struct xsc_stats_grp { }; struct counter_desc { - char format[ETH_GSTRING_LEN]; size_t offset; /* Byte offset */ }; @@ -133,6 +132,7 @@ struct xsc_sw_stats { u64 rx_wqes; u64 rx_wqe_err; u64 rx_oversize_pkts_sw_drop; + u64 rx_oversize_pkts_err; u64 rx_buff_alloc_err; u64 rx_cache_reuse; u64 rx_cache_full; @@ -145,8 +145,8 @@ struct xsc_sw_stats { u64 ch_events; u64 ch_poll; u64 ch_poll_0; - u64 ch_poll_1_64; - u64 ch_poll_65_511; + u64 ch_poll_1_63; + u64 ch_poll_64_511; u64 ch_poll_512_1023; u64 ch_poll_1024; u64 ch_arm; @@ -158,7 +158,7 @@ struct xsc_channel_stats { struct xsc_ch_stats ch; struct xsc_sq_stats sq[XSC_MAX_NUM_TC]; struct xsc_rq_stats rq; -}; +} ____cacheline_aligned_in_smp; struct xsc_stats { struct xsc_sw_stats sw; @@ -171,4 +171,3 @@ extern const int xsc_num_stats_grps; void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s); #endif /* XSC_EN_STATS_H */ - diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..b0e714f424828b50626907f3955836f879ed88fc --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +#include "xsc_eth.h" + +static void pcie_lat_hw_work(struct work_struct *work) +{ + int err; + struct delayed_work *dwork = to_delayed_work(work); + struct xsc_pcie_lat_work *pcie_lat = container_of(dwork, struct xsc_pcie_lat_work, work); + struct xsc_core_device *xdev = pcie_lat->xdev; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } + schedule_delayed_work_on(smp_processor_id(), dwork, + msecs_to_jiffies(pcie_lat->period * 1000)); +} + +static void pcie_lat_hw_init(struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW_INIT); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } +} + +static ssize_t pcie_lat_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_EN); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%hhu\n", out.pcie_lat.pcie_lat_enable); +} + +static ssize_t pcie_lat_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *pcie_lat = adapter->xdev->pcie_lat; + int err; + u16 pcie_lat_enable; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + err = kstrtou16(buf, 0, &pcie_lat_enable); + if (err != 0) + return -EINVAL; + + if (pcie_lat_enable != XSC_PCIE_LAT_EN_DISABLE && + pcie_lat_enable != XSC_PCIE_LAT_EN_ENABLE) { + xsc_core_err(adapter->xdev, + "pcie_lat_enable should be set as %d or %d, cannot be %d\n", + XSC_PCIE_LAT_EN_DISABLE, XSC_PCIE_LAT_EN_ENABLE, + pcie_lat_enable); + return -EPERM; + } + + if (pcie_lat_enable == XSC_PCIE_LAT_EN_ENABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_DISABLE) { + pcie_lat_hw_init(adapter->xdev); + pcie_lat->adapter = adapter; + INIT_DELAYED_WORK(&pcie_lat->work, pcie_lat_hw_work); + schedule_delayed_work_on(smp_processor_id(), &pcie_lat->work, + msecs_to_jiffies(pcie_lat->period * 1000)); + } else if (pcie_lat_enable == XSC_PCIE_LAT_EN_DISABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_ENABLE) { + cancel_delayed_work_sync(&pcie_lat->work); + pcie_lat->period = XSC_PCIE_LAT_PERIOD_MIN; + } + + pcie_lat->enable = pcie_lat_enable; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = pcie_lat_enable; + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to set pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_enable); + +static ssize_t pcie_lat_interval_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err, i; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_INTERVAL); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat interval, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_INTERVAL_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + return count; +} + +#define PCIE_LAT_CFG_INTERVAL_FORMAT "%u,%u,%u,%u,%u,%u,%u,%u" +static ssize_t pcie_lat_interval_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err, i; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + u32 *ptr = in.pcie_lat.pcie_lat_interval; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + err = sscanf(buf, PCIE_LAT_CFG_INTERVAL_FORMAT, &ptr[0], &ptr[1], &ptr[2], &ptr[3], &ptr[4], + &ptr[5], &ptr[6], &ptr[7]); + if (err != XSC_PCIE_LAT_CFG_INTERVAL_MAX) + return -EINVAL; + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_INTERVAL); + for (i = 0 ; i < XSC_PCIE_LAT_CFG_INTERVAL_MAX; i++) + in.pcie_lat.pcie_lat_interval[i] = __cpu_to_be32(ptr[i]); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to set pcie_lat interval, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_interval); + +static ssize_t pcie_lat_period_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + + return sprintf(buf, "%u\n", tmp->period); +} + +static ssize_t pcie_lat_period_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + int err; + u32 pcie_lat_period; + + err = kstrtouint(buf, 0, &pcie_lat_period); + if (err != 0) + return -EINVAL; + + if (pcie_lat_period < XSC_PCIE_LAT_PERIOD_MIN || + pcie_lat_period > XSC_PCIE_LAT_PERIOD_MAX) { + xsc_core_err(adapter->xdev, "pcie_lat_period should be set between [%d-%d], cannot be %d\n", + XSC_PCIE_LAT_PERIOD_MIN, XSC_PCIE_LAT_PERIOD_MAX, + pcie_lat_period); + return -EPERM; + } + + tmp->period = pcie_lat_period; + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_period); + +static ssize_t pcie_lat_histogram_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int i, err; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_HISTOGRAM); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to get pcie_lat histogram, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_HISTOGRAM_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + return count; +} + +static ssize_t pcie_lat_histogram_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static DEVICE_ATTR_RW(pcie_lat_histogram); + +static ssize_t pcie_lat_peak_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_PEAK); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat peak, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_peak)); +} + +static ssize_t pcie_lat_peak_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static DEVICE_ATTR_RW(pcie_lat_peak); + +static struct attribute *pcie_lat_attrs[] = { + &dev_attr_pcie_lat_enable.attr, + &dev_attr_pcie_lat_interval.attr, + &dev_attr_pcie_lat_period.attr, + &dev_attr_pcie_lat_histogram.attr, + &dev_attr_pcie_lat_peak.attr, + NULL, +}; + +static struct attribute_group pcie_lat_group = { + .name = "pcie_lat", + .attrs = pcie_lat_attrs, +}; + +static int xsc_pcie_lat_sysfs_init(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + struct xsc_pcie_lat_work *tmp; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + xdev->pcie_lat = tmp; + tmp->xdev = xdev; + + tmp->enable = XSC_PCIE_LAT_EN_DISABLE; + tmp->period = XSC_PCIE_LAT_PERIOD_MIN; + + err = sysfs_create_group(&dev->dev.kobj, &pcie_lat_group); + if (err) + goto remove_pcie_lat; + + return 0; + +remove_pcie_lat: + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + kfree(tmp); + + return err; +} + +static void xsc_pcie_lat_sysfs_fini(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_work *tmp; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + tmp = xdev->pcie_lat; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = XSC_PCIE_LAT_EN_DISABLE; + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) + xsc_core_err(xdev, "Failed to set pcie_lat disable, err(%u), status(%u)\n", + err, out.hdr.status); + + if (tmp->enable == XSC_PCIE_LAT_EN_ENABLE) + cancel_delayed_work_sync(&tmp->work); + + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + + if (!xdev->pcie_lat) + return; + + kfree(tmp); + xdev->pcie_lat = NULL; +} + +int xsc_eth_sysfs_create(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + err = xsc_pcie_lat_sysfs_init(dev, xdev); + + return err; +} + +void xsc_eth_sysfs_remove(struct net_device *dev, struct xsc_core_device *xdev) +{ + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + xsc_pcie_lat_sysfs_fini(dev, xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c index 5ba433c44b5725e0d9176b6c22aae274e62b0910..63bcf1e6b67434d18ca6f713bfe7431dafdf7a08 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -8,8 +7,8 @@ #include #include "xsc_eth_stats.h" #include "xsc_eth_common.h" -#include -#include +#include "common/xsc_hsi.h" +#include "common/qp.h" #include "xsc_eth.h" #include "xsc_eth_txrx.h" @@ -50,8 +49,8 @@ u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) } void xsc_txwqe_build_cseg_csum(struct xsc_sq *sq, - struct sk_buff *skb, - struct xsc_send_wqe_ctrl_seg *cseg) + struct sk_buff *skb, + struct xsc_send_wqe_ctrl_seg *cseg) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (skb->encapsulation) { @@ -112,9 +111,9 @@ static void xsc_dma_unmap_wqe_err(struct xsc_sq *sq, u8 num_dma) } static void xsc_txwqe_build_csegs(struct xsc_sq *sq, struct sk_buff *skb, - u16 mss, u16 ihs, u16 headlen, - u8 opcode, u16 ds_cnt, u32 num_bytes, - struct xsc_send_wqe_ctrl_seg *cseg) + u16 mss, u16 ihs, u16 headlen, + u8 opcode, u16 ds_cnt, u32 num_bytes, + struct xsc_send_wqe_ctrl_seg *cseg) { struct xsc_core_device *xdev = sq->cq.xdev; int send_wqe_ds_num_log = ilog2(xdev->caps.send_ds_num); @@ -136,12 +135,11 @@ static void xsc_txwqe_build_csegs(struct xsc_sq *sq, struct sk_buff *skb, cseg->ce = 1; WQE_CSEG_DUMP("cseg", cseg); - } static int xsc_txwqe_build_dsegs(struct xsc_sq *sq, struct sk_buff *skb, - u16 ihs, u16 headlen, - struct xsc_wqe_data_seg *dseg) + u16 ihs, u16 headlen, + struct xsc_wqe_data_seg *dseg) { dma_addr_t dma_addr = 0; u8 num_dma = 0; @@ -198,7 +196,7 @@ static inline bool xsc_wqc_has_room_for(struct xsc_wq_cyc *wq, } static inline void xsc_sq_notify_hw(struct xsc_wq_cyc *wq, u16 pc, - struct xsc_sq *sq) + struct xsc_sq *sq) { struct xsc_adapter *adapter = sq->channel->adapter; struct xsc_core_device *xdev = adapter->xdev; @@ -209,7 +207,9 @@ static inline void xsc_sq_notify_hw(struct xsc_wq_cyc *wq, u16 pc, doorbell_value.next_pid = pc << send_ds_num_log; doorbell_value.qp_num = sq->sqn; - /* keep order */ + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ wmb(); ETH_DEBUG_LOG("pc = %d sqn = %d\n", pc, sq->sqn); ETH_DEBUG_LOG("doorbell_value = %#x\n", doorbell_value.send_data); @@ -260,10 +260,9 @@ static void xsc_dump_error_sqcqe(struct xsc_sq *sq, u32 ci = xsc_cqwq_get_ci(&sq->cq.wq); struct net_device *netdev = sq->channel->netdev; - net_err_ratelimited( - "Error cqe on dev %s, cqn 0x%x, ci 0x%x, sqn 0x%x, error_code 0x%x, qpid 0x%x\n", - netdev->name, sq->cq.xcq.cqn, ci, - sq->sqn, get_cqe_opcode(cqe), cqe->qp_id); + net_err_ratelimited("Err cqe on dev %s cqn=0x%x ci=0x%x sqn=0x%x err_code=0x%x qpid=0x%x\n", + netdev->name, sq->cq.xcq.cqn, ci, + sq->sqn, get_cqe_opcode(cqe), cqe->qp_id); #ifdef XSC_DEBUG xsc_dump_err_cqe(sq->cq.xdev, cqe); @@ -301,7 +300,9 @@ void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq) sq->cc += wi->num_wqebbs; } +#ifdef XSC_BQL_SUPPORT netdev_tx_completed_queue(sq->txq, npkts, nbytes); +#endif } #ifdef NEED_CREATE_RX_THREAD @@ -373,21 +374,22 @@ bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) xsc_tx_dma_unmap(dev, dma); } +#ifndef NEED_CREATE_RX_THREAD npkts++; nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; - -#ifndef NEED_CREATE_RX_THREAD napi_consume_skb(skb, napi_budget); #else - if (refcount_read(&skb->users) < 1) - stats->txdone_skb_refcnt_err++; - + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + if (refcount_read(&skb->users) < 1) + stats->txdone_skb_refcnt_err++; napi_consume_skb(skb, 0); #endif ETH_DEBUG_LOG("ci=%d, sqcc=%d, pkts=%d\n", ci, sqcc, npkts); - } while ((++i <= napi_budget) && (cqe = xsc_cqwq_get_cqe(&cq->wq))); + } while ((++i <= XSC_TX_POLL_BUDGET) && (cqe = xsc_cqwq_get_cqe(&cq->wq))); stats->cqes += i; @@ -416,10 +418,10 @@ bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) return (i == napi_budget); } -static u32 xsc_eth_xmit_frame(struct sk_buff *skb, - struct xsc_sq *sq, - struct xsc_tx_wqe *wqe, - u16 pi) +static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, + struct xsc_sq *sq, + struct xsc_tx_wqe *wqe, + u16 pi) { struct xsc_send_wqe_ctrl_seg *cseg; struct xsc_wqe_data_seg *dseg; @@ -456,10 +458,9 @@ static u32 xsc_eth_xmit_frame(struct sk_buff *skb, headlen = skb->len - skb->data_len; ds_cnt += !!headlen; ds_cnt += skb_shinfo(skb)->nr_frags; - ETH_DEBUG_LOG( - "skb_len=%d, data_len=%d, nr_frags=%d, mss=%d, ihs=%d, headlen=%d, ds_cnt=%d\n", - skb->len, skb->data_len, skb_shinfo(skb)->nr_frags, - mss, ihs, headlen, ds_cnt); + ETH_DEBUG_LOG("skb_len=%d data_len=%d nr_frags=%d mss=%d ihs=%d headlen=%d ds_cnt=%d\n", + skb->len, skb->data_len, skb_shinfo(skb)->nr_frags, + mss, ihs, headlen, ds_cnt); /*to make the connection, only linear data is present*/ skbdata_debug_dump(skb, headlen, 1); @@ -485,7 +486,7 @@ static u32 xsc_eth_xmit_frame(struct sk_buff *skb, dseg = &wqe->data[0]; xsc_txwqe_build_csegs(sq, skb, mss, ihs, headlen, - opcode, ds_cnt, num_bytes, cseg); + opcode, ds_cnt, num_bytes, cseg); /*inline header is also use dma to transport*/ num_dma = xsc_txwqe_build_dsegs(sq, skb, ihs, headlen, dseg); @@ -493,7 +494,7 @@ static u32 xsc_eth_xmit_frame(struct sk_buff *skb, goto err_drop; xsc_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, - num_dma, wi); + num_dma, wi); stats->bytes += num_bytes; stats->xmit_more += xsc_netdev_xmit_more(skb); @@ -501,7 +502,7 @@ static u32 xsc_eth_xmit_frame(struct sk_buff *skb, err_drop: ETH_DEBUG_LOG("%s: drop skb, ds_cnt=%d, num_wqebbs=%d, num_dma=%d\n", - __func__, ds_cnt, num_wqebbs, num_dma); + __func__, ds_cnt, num_wqebbs, num_dma); stats->dropped++; dev_kfree_skb_any(skb); @@ -558,4 +559,3 @@ netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev) return ret; } - diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c index 1454ebbca89be2aa379f4bc4f778b105b61d1cdc..4d3876b27dde99b59bd3b2f5b360c963fad7877a 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -19,7 +18,7 @@ void xsc_cq_notify_hw_rearm(struct xsc_cq *cq) db.cq_id = cpu_to_le32(cq->xcq.cqn); db.arm = 0; - /* keep order */ + /* ensure doorbell record is visible to device before ringing the doorbell */ wmb(); writel(db.val, REG_ADDR(cq->xdev, cq->xdev->regs.complete_db)); if (cq->channel && cq->channel->stats) @@ -39,8 +38,6 @@ void xsc_cq_notify_hw(struct xsc_cq *cq) db.cq_next_cid = cpu_to_le32(cq->wq.cc); db.cq_id = cpu_to_le32(cq->xcq.cqn); - /* ensure doorbell record is visible to device before ringing the doorbell*/ -// wmb(); writel(db.val, REG_ADDR(xdev, xdev->regs.complete_reg)); if (cq->channel && cq->channel->stats) cq->channel->stats->noarm++; @@ -74,15 +71,23 @@ int xsc_eth_napi_poll(struct napi_struct *napi, int budget) busy |= work_done == budget; } - if (work_done < budget) { - if (unlikely(!napi_complete_done(napi, work_done))) - goto out; + if (busy) { + if (likely(xsc_channel_no_affinity_change(c))) { + rcu_read_unlock(); + return budget; + } + c->stats->aff_change++; + if (budget && work_done == budget) + work_done--; + } - for (i = 0; i < c->num_tc; i++) - xsc_cq_notify_hw_rearm(&c->qp.sq[i].cq); + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; - xsc_cq_notify_hw_rearm(&rq->cq); - } + for (i = 0; i < c->num_tc; i++) + xsc_cq_notify_hw_rearm(&c->qp.sq[i].cq); + + xsc_cq_notify_hw_rearm(&rq->cq); out: rcu_read_unlock(); diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h index 8c5df22db76546455d6948340b48d1da7e518a60..1c0960b741cda07dcd88246a5836f0815a877a10 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -8,7 +7,7 @@ #define XSC_RXTX_H #include "xsc_eth.h" -#include +#include "common/qp.h" #include "xsc_eth_debug.h" enum { @@ -46,7 +45,7 @@ static inline struct xsc_cqe64 *xsc_cqwq_get_cqe(struct xsc_cqwq *wq) cqe_ownership_bit = cqe->owner & XSC_CQE_OWNER_MASK; sw_ownership_val = xsc_cqwq_get_wrap_cnt(wq) & 1; ETH_DEBUG_LOG("ci=%d, cqe_owner=%d, sw_owner=%d\n", - ci, cqe_ownership_bit, sw_ownership_val); + ci, cqe_ownership_bit, sw_ownership_val); if (cqe_ownership_bit != sw_ownership_val) return NULL; @@ -62,18 +61,20 @@ int xsc_eth_napi_poll(struct napi_struct *napi, int budget); bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget); int xsc_poll_rx_cq(struct xsc_cq *cq, int budget); void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, - struct xsc_rq *rq, struct xsc_cqe64 *cqe); + struct xsc_rq *rq, struct xsc_cqe64 *cqe); +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, - struct xsc_wqe_frag_info *wi, - u32 cqe_bcnt, u8 has_pph); + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph); bool xsc_eth_post_rx_wqes(struct xsc_rq *rq); void xsc_cq_notify_hw(struct xsc_cq *cq); void xsc_cq_notify_hw_rearm(struct xsc_cq *cq); -void xsc_eth_free_rx_wqes(struct xsc_rq *rq); +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix); netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev); void xsc_page_release_dynamic(struct xsc_rq *rq, - struct xsc_dma_info *dma_info, - bool recycle); + struct xsc_dma_info *dma_info, + bool recycle); #endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c new file mode 100644 index 0000000000000000000000000000000000000000..b3aab536fd0e232faa7e9b742ccc8ce36f7818b4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth.h" +#include "common/vport.h" +#include "common/xsc_fs.h" + +enum xsc_vlan_rule_type { + XSC_VLAN_RULE_TYPE_UNTAGGED, + XSC_VLAN_RULE_TYPE_ANY_CTAG_VID, + XSC_VLAN_RULE_TYPE_ANY_STAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, +}; + +static int xsc_vport_context_update_vlans(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, + u16 vid, bool add) +{ + struct net_device *ndev = adapter->netdev; + struct xsc_core_device *xdev = adapter->xdev; + int err; + + err = xsc_modify_nic_vport_vlans(xdev, vid, add); + if (err) + netdev_err(ndev, "Failed to modify vport vid:%d rule_type:%d err:%d\n", + vid, rule_type, err); + return err; +} + +static int xsc_add_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + return xsc_vport_context_update_vlans(adapter, rule_type, vid, true); +} + +static void xsc_del_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + xsc_vport_context_update_vlans(adapter, rule_type, vid, false); +} + +static int xsc_vlan_rx_add_cvid(struct xsc_adapter *adapter, u16 vid) +{ + int err; + + set_bit(vid, adapter->vlan_params.active_cvlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + if (err) + clear_bit(vid, adapter->vlan_params.active_cvlans); + + return err; +} + +static int xsc_vlan_rx_add_svid(struct xsc_adapter *adapter, u16 vid) +{ + struct net_device *netdev = adapter->netdev; + int err; + + set_bit(vid, adapter->vlan_params.active_svlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + if (err) { + clear_bit(vid, adapter->vlan_params.active_svlans); + return err; + } + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; +} + +int xsc_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) + return xsc_vlan_rx_add_cvid(adapter, vid); + else if (be16_to_cpu(proto) == ETH_P_8021AD) + return xsc_vlan_rx_add_svid(adapter, vid); + + return -EOPNOTSUPP; +} + +int xsc_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) { + clear_bit(vid, adapter->vlan_params.active_cvlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + } else if (be16_to_cpu(proto) == ETH_P_8021AD) { + clear_bit(vid, adapter->vlan_params.active_svlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_update_features(dev); + } + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h index e9a5ef745b4eb790b46f72c0460a99afd8e18159..c133bf33271bc4a60490fba49978175bb49ddbd9 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -41,13 +40,12 @@ enum { struct xsc_dma_info { struct page *page; dma_addr_t addr; - u32 refcnt_bias; }; struct xsc_wqe_frag_info { struct xsc_dma_info *di; u32 offset; - bool last_in_page; + u8 last_in_page; }; struct xsc_rq_frag_info { @@ -60,6 +58,8 @@ struct xsc_rq_frags_info { u8 num_frags; u8 log_num_frags; u8 wqe_bulk; + u8 wqe_bulk_min; + u8 frags_max_num; }; struct xsc_cq { @@ -75,12 +75,21 @@ struct xsc_cq { /* control */ struct xsc_core_device *xdev; struct xsc_wq_ctrl wq_ctrl; + u8 rx; } ____cacheline_aligned_in_smp; -#define XSC_PAGE_CACHE_LOG_MAX_RQ_MULT 4 +struct xsc_pcie_lat_work { + struct xsc_core_device *xdev; + struct xsc_adapter *adapter; + struct delayed_work work; + u16 enable; + u32 period; +}; + +#define XSC_PAGE_CACHE_LOG_MAX_RQ_MULT 6 #define XSC_PAGE_CACHE_REDUCE_WORK_INTERVAL 200 /* msecs */ #define XSC_PAGE_CACHE_REDUCE_GRACE_PERIOD 1000 /* msecs */ -#define XSC_PAGE_CACHE_REDUCE_SUCCESS_CNT 20 +#define XSC_PAGE_CACHE_REDUCE_SUCCESS_CNT 4 struct xsc_page_cache_reduce { struct delayed_work reduce_work; @@ -94,22 +103,20 @@ struct xsc_page_cache_reduce { struct xsc_page_cache { struct xsc_dma_info *page_cache; - int head; + u32 head; + u32 tail; u32 sz; - u32 lrs; - u8 log_min_sz; - u8 log_max_sz; - struct xsc_page_cache_reduce reduce; + u32 resv; }; struct xsc_rq; struct xsc_cqe64; typedef void (*xsc_fp_handle_rx_cqe)(struct xsc_cqwq *cqwq, struct xsc_rq *rq, - struct xsc_cqe64 *cqe64); + struct xsc_cqe64 *cqe64); typedef bool (*xsc_fp_post_rx_wqes)(struct xsc_rq *rq); -typedef void (*xsc_fp_dealloc_wqe)(struct xsc_rq *rq); +typedef void (*xsc_fp_dealloc_wqe)(struct xsc_rq *rq, u16 ix); typedef struct sk_buff * (*xsc_fp_skb_from_cqe)(struct xsc_rq *rq, - struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); struct xsc_rq { struct xsc_core_qp cqp; @@ -137,11 +144,11 @@ struct xsc_rq { struct xsc_rq_stats *stats; u32 hw_mtu; - u8 frags_reuse_num; + u32 frags_sz; xsc_fp_handle_rx_cqe handle_rx_cqe; xsc_fp_post_rx_wqes post_wqes; - xsc_fp_dealloc_wqe dealloc_wqes; + xsc_fp_dealloc_wqe dealloc_wqe; struct xsc_page_cache page_cache; } ____cacheline_aligned_in_smp; @@ -240,7 +247,7 @@ static inline u8 get_cqe_opcode(struct xsc_cqe64 *cqe) } static inline void xsc_dump_err_cqe(struct xsc_core_device *dev, - struct xsc_cqe64 *cqe) + struct xsc_cqe64 *cqe) { print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, cqe, sizeof(*cqe), false); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile index 74258e8d41d77a5993b4476c91fc63cd82c05419..f284bbb5e4d9dc8ee04da19d0ed7ef51db7038fc 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile @@ -1,19 +1,16 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. # All rights reserved. -# -# Makefile for the Yunsilicon xsc ethernet driver -# -ccflags-y := -I $(srctree)/drivers/net/ethernet/yunsilicon/xsc/ -ccflags-y += -Wno-implicit-fallthrough + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o xsc_pci-y := main.o eq.o intf.o debugfs.o alloc.o wq.o cq.o qp.o \ - cmd2.o fw.o pagealloc.o port.o mr.o pd.o mad.o xsc_lag.o xsc_pci_ctrl.o\ - pci_irq.o vport.o sriov.o sriov_sysfs.o devlink.o eswitch.o xsc_port_ctrl.o \ - fw/cmd.o \ - fw/xsc_flow.o \ - fw/xsc_res.o \ - fw/osdep.o \ - fw/xsc_mem.o + cmd2.o fw.o port.o mr.o pd.o mad.o xsc_lag.o xsc_pci_ctrl.o\ + pci_irq.o vport.o sriov.o sriov_sysfs.o devlink.o eswitch.o xsc_port_ctrl.o res_obj.o qpts.o\ + fw/cmd.o \ + fw/xsc_flow.o \ + fw/xsc_res.o \ + fw/osdep.o \ + fw/xsc_mem.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c index 093e6438672d9f70b1dbe8f65c7507b696c4b95d..326484b2f108a899f18cc5a9bcad52cbfa82e4fa 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -11,7 +10,7 @@ #include #include #include -#include +#include "common/driver.h" /* Handling for queue buffers -- we allocate a bunch of memory and * register it in a memory region at HCA virtual address 0. If the @@ -20,7 +19,7 @@ */ int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, - struct xsc_buf *buf) + struct xsc_buf *buf) { dma_addr_t t; @@ -30,7 +29,7 @@ int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, buf->npages = 1; buf->page_shift = get_order(size) + PAGE_SHIFT; buf->direct.buf = dma_alloc_coherent(&xdev->pdev->dev, - size, &t, GFP_KERNEL | __GFP_ZERO); + size, &t, GFP_KERNEL | __GFP_ZERO); if (!buf->direct.buf) return -ENOMEM; @@ -55,7 +54,7 @@ int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, for (i = 0; i < buf->nbufs; i++) { buf->page_list[i].buf = dma_alloc_coherent(&xdev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL | __GFP_ZERO); + &t, GFP_KERNEL | __GFP_ZERO); if (!buf->page_list[i].buf) goto err_free; @@ -90,10 +89,10 @@ void xsc_buf_free(struct xsc_core_device *xdev, struct xsc_buf *buf) { int i; - if (buf->nbufs == 1) + if (buf->nbufs == 1) { dma_free_coherent(&xdev->pdev->dev, buf->size, buf->direct.buf, buf->direct.map); - else { + } else { if (BITS_PER_LONG == 64 && buf->direct.buf) vunmap(buf->direct.buf); @@ -140,8 +139,8 @@ void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages) EXPORT_SYMBOL_GPL(xsc_fill_page_frag_array); static void *xsc_dma_zalloc_coherent_node(struct xsc_core_device *xdev, - size_t size, dma_addr_t *dma_handle, - int node) + size_t size, dma_addr_t *dma_handle, + int node) { struct xsc_dev_resource *dev_res = xdev->dev_res; struct device *device = &xdev->pdev->dev; @@ -167,7 +166,7 @@ static void *xsc_dma_zalloc_coherent_node(struct xsc_core_device *xdev, } int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, - struct xsc_frag_buf *buf, int node) + struct xsc_frag_buf *buf, int node) { int i; @@ -184,14 +183,14 @@ int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, int frag_sz = min_t(int, size, PAGE_SIZE); frag->buf = xsc_dma_zalloc_coherent_node(xdev, frag_sz, - &frag->map, node); + &frag->map, node); if (!frag->buf) goto err_free_buf; if (frag->map & ((1 << buf->page_shift) - 1)) { dma_free_coherent(&xdev->pdev->dev, frag_sz, buf->frags[i].buf, buf->frags[i].map); xsc_core_warn(xdev, "unexpected map alignment: %pad, page_shift=%d\n", - &frag->map, buf->page_shift); + &frag->map, buf->page_shift); goto err_free_buf; } size -= frag_sz; @@ -226,7 +225,7 @@ void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf) EXPORT_SYMBOL_GPL(xsc_frag_buf_free); static struct xsc_db_pgdir *xsc_alloc_db_pgdir(struct xsc_core_device *xdev, - int node) + int node) { u32 db_per_page = PAGE_SIZE / cache_line_size(); struct xsc_db_pgdir *pgdir; @@ -244,7 +243,7 @@ static struct xsc_db_pgdir *xsc_alloc_db_pgdir(struct xsc_core_device *xdev, bitmap_fill(pgdir->bitmap, db_per_page); pgdir->db_page = xsc_dma_zalloc_coherent_node(xdev, PAGE_SIZE, - &pgdir->db_dma, node); + &pgdir->db_dma, node); if (!pgdir->db_page) { bitmap_free(pgdir->bitmap); kfree(pgdir); @@ -255,7 +254,7 @@ static struct xsc_db_pgdir *xsc_alloc_db_pgdir(struct xsc_core_device *xdev, } static int xsc_alloc_db_from_pgdir(struct xsc_db_pgdir *pgdir, - struct xsc_db *db) + struct xsc_db *db) { u32 db_per_page = PAGE_SIZE / cache_line_size(); int offset; @@ -333,4 +332,3 @@ void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db) mutex_unlock(&xdev->dev_res->pgdir_mutex); } EXPORT_SYMBOL_GPL(xsc_db_free); - diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c index bb395daf7b36fd952f6a0244a3c9725121d7711f..cb37850e0dcdc1682b3a17cbf1039635aac00d54 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -14,13 +13,11 @@ #include #include #include -#include +#include "common/driver.h" #include -#include -#include - +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" #include "tmp_cmdq_defines.h" -#include enum { CMD_IF_REV = 3, @@ -73,24 +70,18 @@ enum { }; static struct xsc_cmd_work_ent *alloc_cmd(struct xsc_cmd *cmd, - struct xsc_cmd_msg *in, - struct xsc_rsp_msg *out, - xsc_cmd_cbk_t cbk, - void *context, int page_queue) + struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out) { - gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; struct xsc_cmd_work_ent *ent; - ent = kzalloc(sizeof(*ent), alloc_flags); + ent = kzalloc(sizeof(*ent), GFP_KERNEL); if (!ent) return ERR_PTR(-ENOMEM); ent->in = in; ent->out = out; - ent->callback = cbk; - ent->context = context; ent->cmd = cmd; - ent->page_queue = page_queue; return ent; } @@ -212,23 +203,19 @@ static int verify_signature(struct xsc_cmd_work_ent *ent) return 0; } -#ifdef COSIM -static void dump_buf(void *buf, int size, int data_only, int offset) +static void dump_buf(void *buf, int size, int offset) { __be32 *p = buf; int i; for (i = 0; i < size; i += 16) { - pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), - be32_to_cpu(p[1]), be32_to_cpu(p[2]), - be32_to_cpu(p[3])); + xsc_pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3])); p += 4; offset += 16; } - if (!data_only) - pr_debug("\n"); + xsc_pr_debug("\n"); } -#endif const char *xsc_command_str(int command) { @@ -236,23 +223,29 @@ const char *xsc_command_str(int command) case XSC_CMD_OP_QUERY_HCA_CAP: return "QUERY_HCA_CAP"; - case XSC_CMD_OP_SET_HCA_CAP: - return "SET_HCA_CAP"; + case XSC_CMD_OP_ENABLE_HCA: + return "ENABLE_HCA"; + + case XSC_CMD_OP_DISABLE_HCA: + return "DISABLE_HCA"; + + case XSC_CMD_OP_MODIFY_HCA: + return "MODIFY_HCA"; - case XSC_CMD_OP_QUERY_ADAPTER: - return "QUERY_ADAPTER"; + case XSC_CMD_OP_QUERY_CMDQ_VERSION: + return "QUERY_CMDQ_VERSION"; - case XSC_CMD_OP_INIT_HCA: - return "INIT_HCA"; + case XSC_CMD_OP_QUERY_MSIX_TBL_INFO: + return "QUERY_MSIX_TBL_INFO"; - case XSC_CMD_OP_TEARDOWN_HCA: - return "TEARDOWN_HCA"; + case XSC_CMD_OP_FUNCTION_RESET: + return "FUNCTION_RESET"; - case XSC_CMD_OP_QUERY_PAGES: - return "QUERY_PAGES"; + case XSC_CMD_OP_DUMMY: + return "DUMMY_CMD"; - case XSC_CMD_OP_MANAGE_PAGES: - return "MANAGE_PAGES"; + case XSC_CMD_OP_SET_DEBUG_INFO: + return "SET_DEBUG_INFO"; case XSC_CMD_OP_CREATE_MKEY: return "CREATE_MKEY"; @@ -266,6 +259,12 @@ const char *xsc_command_str(int command) case XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS: return "QUERY_SPECIAL_CONTEXTS"; + case XSC_CMD_OP_SET_MPT: + return "SET_MPT"; + + case XSC_CMD_OP_SET_MTT: + return "SET_MTT"; + case XSC_CMD_OP_CREATE_EQ: return "CREATE_EQ"; @@ -332,141 +331,301 @@ const char *xsc_command_str(int command) case XSC_CMD_OP_INIT2INIT_QP: return "INIT2INIT_QP"; - case XSC_CMD_OP_SUSPEND_QP: - return "SUSPEND_QP"; - - case XSC_CMD_OP_UNSUSPEND_QP: - return "UNSUSPEND_QP"; - case XSC_CMD_OP_SQD2SQD_QP: return "SQD2SQD_QP"; - case XSC_CMD_OP_ALLOC_QP_COUNTER_SET: - return "ALLOC_QP_COUNTER_SET"; + case XSC_CMD_OP_QUERY_QP_FLUSH_STATUS: + return "QUERY_QP_FLUSH_STATUS"; - case XSC_CMD_OP_DEALLOC_QP_COUNTER_SET: - return "DEALLOC_QP_COUNTER_SET"; + case XSC_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; - case XSC_CMD_OP_QUERY_QP_COUNTER_SET: - return "QUERY_QP_COUNTER_SET"; + case XSC_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; - case XSC_CMD_OP_CREATE_PSV: - return "CREATE_PSV"; + case XSC_CMD_OP_ACCESS_REG: + return "ACCESS_REG"; - case XSC_CMD_OP_DESTROY_PSV: - return "DESTROY_PSV"; + case XSC_CMD_OP_MODIFY_RAW_QP: + return "MODIFY_RAW_QP"; - case XSC_CMD_OP_QUERY_PSV: - return "QUERY_PSV"; + case XSC_CMD_OP_ENABLE_NIC_HCA: + return "ENABLE_NIC_HCA"; - case XSC_CMD_OP_QUERY_SIG_RULE_TABLE: - return "QUERY_SIG_RULE_TABLE"; + case XSC_CMD_OP_DISABLE_NIC_HCA: + return "DISABLE_NIC_HCA"; - case XSC_CMD_OP_QUERY_BLOCK_SIZE_TABLE: - return "QUERY_BLOCK_SIZE_TABLE"; + case XSC_CMD_OP_MODIFY_NIC_HCA: + return "MODIFY_NIC_HCA"; - case XSC_CMD_OP_CREATE_SRQ: - return "CREATE_SRQ"; + case XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT: + return "QUERY_NIC_VPORT_CONTEXT"; - case XSC_CMD_OP_DESTROY_SRQ: - return "DESTROY_SRQ"; + case XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: + return "MODIFY_NIC_VPORT_CONTEXT"; - case XSC_CMD_OP_QUERY_SRQ: - return "QUERY_SRQ"; + case XSC_CMD_OP_QUERY_VPORT_STATE: + return "QUERY_VPORT_STATE"; - case XSC_CMD_OP_ARM_RQ: - return "ARM_RQ"; + case XSC_CMD_OP_MODIFY_VPORT_STATE: + return "MODIFY_VPORT_STATE"; - case XSC_CMD_OP_RESIZE_SRQ: - return "RESIZE_SRQ"; + case XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT: + return "QUERY_HCA_VPORT_CONTEXT"; - case XSC_CMD_OP_ALLOC_PD: - return "ALLOC_PD"; + case XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: + return "MODIFY_HCA_VPORT_CONTEXT"; - case XSC_CMD_OP_DEALLOC_PD: - return "DEALLOC_PD"; + case XSC_CMD_OP_QUERY_HCA_VPORT_GID: + return "QUERY_HCA_VPORT_GID"; - case XSC_CMD_OP_ALLOC_UAR: - return "ALLOC_UAR"; + case XSC_CMD_OP_QUERY_HCA_VPORT_PKEY: + return "QUERY_HCA_VPORT_PKEY"; - case XSC_CMD_OP_DEALLOC_UAR: - return "DEALLOC_UAR"; + case XSC_CMD_OP_QUERY_VPORT_COUNTER: + return "QUERY_VPORT_COUNTER"; - case XSC_CMD_OP_ATTACH_TO_MCG: - return "ATTACH_TO_MCG"; + case XSC_CMD_OP_QUERY_PRIO_STATS: + return "QUERY_PRIO_STATS"; - case XSC_CMD_OP_DETACH_FROM_MCG: - return "DETACH_FROM_MCG"; + case XSC_CMD_OP_QUERY_PHYPORT_STATE: + return "QUERY_PHYPORT_STATE"; - case XSC_CMD_OP_ALLOC_XRCD: - return "ALLOC_XRCD"; + case XSC_CMD_OP_QUERY_EVENT_TYPE: + return "QUERY_EVENT_TYPE"; - case XSC_CMD_OP_DEALLOC_XRCD: - return "DEALLOC_XRCD"; + case XSC_CMD_OP_QUERY_LINK_INFO: + return "QUERY_LINK_INFO"; - case XSC_CMD_OP_ACCESS_REG: - return "ACCESS_REG"; + case XSC_CMD_OP_LAG_CREATE: + return "LAG_CREATE"; + + case XSC_CMD_OP_LAG_MODIFY: + return "LAG_MODIFY"; + + case XSC_CMD_OP_LAG_DESTROY: + return "LAG_DESTROY"; + + case XSC_CMD_OP_LAG_SET_QOS: + return "LAG_SET_QOS"; + + case XSC_CMD_OP_ENABLE_MSIX: + return "ENABLE_MSIX"; + + case XSC_CMD_OP_IOCTL_FLOW: + return "CFG_FLOW_TABLE"; + + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return "SET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return "GET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return "SET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return "GET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + return "SET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + return "GET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + return "SET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + return "GET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_SET_PFC: + return "SET_PFC"; + + case XSC_CMD_OP_IOCTL_GET_PFC: + return "GET_PFC"; + + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return "SET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return "GET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_SET_SP: + return "SET_SP"; + + case XSC_CMD_OP_IOCTL_GET_SP: + return "GET_SP"; + + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return "SET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return "GET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return "DPU_SET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return "DPU_GET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return "DPU_SET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return "DPU_GET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: + return "ENABLE_RP"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: + return "ENABLE_NP"; + + case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: + return "SET_INIT_ALPHA"; + + case XSC_CMD_OP_IOCTL_SET_G: + return "SET_G"; + + case XSC_CMD_OP_IOCTL_SET_AI: + return "SET_AI"; + + case XSC_CMD_OP_IOCTL_SET_HAI: + return "SET_HAI"; + + case XSC_CMD_OP_IOCTL_SET_TH: + return "SET_TH"; + + case XSC_CMD_OP_IOCTL_SET_BC_TH: + return "SET_BC_TH"; + + case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: + return "SET_CNP_OPCODE"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: + return "SET_CNP_BTH_B"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: + return "SET_CNP_BTH_F"; + + case XSC_CMD_OP_IOCTL_SET_CNP_ECN: + return "SET_CNP_ECN"; + + case XSC_CMD_OP_IOCTL_SET_DATA_ECN: + return "SET_DATA_ECN"; + + case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: + return "SET_CNP_TX_INTERVAL"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: + return "SET_EVT_PERIOD_RSTTIME"; + + case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: + return "SET_CNP_DSCP"; + + case XSC_CMD_OP_IOCTL_SET_CNP_PCP: + return "SET_CNP_PCP"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: + return "SET_EVT_PERIOD_ALPHA"; + + case XSC_CMD_OP_IOCTL_GET_CC_CFG: + return "GET_CC_CFG"; + + case XSC_CMD_OP_IOCTL_GET_CC_STAT: + return "GET_CC_STAT"; + + case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: + return "SET_CLAMP_TGT_RATE"; + + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return "SET_MAX_HAI_FACTOR"; + + case XSC_CMD_OP_IOCTL_SET_HWC: + return "SET_HWCONFIG"; + + case XSC_CMD_OP_IOCTL_GET_HWC: + return "GET_HWCONFIG"; + + case XSC_CMD_OP_SET_MTU: + return "SET_MTU"; + + case XSC_CMD_OP_QUERY_ETH_MAC: + return "QUERY_ETH_MAC"; + + case XSC_CMD_OP_QUERY_HW_STATS: + return "QUERY_HW_STATS"; + + case XSC_CMD_OP_QUERY_PAUSE_CNT: + return "QUERY_PAUSE_CNT"; + + case XSC_CMD_OP_SET_RTT_EN: + return "SET_RTT_EN"; + + case XSC_CMD_OP_GET_RTT_EN: + return "GET_RTT_EN"; + + case XSC_CMD_OP_SET_RTT_QPN: + return "SET_RTT_QPN"; + + case XSC_CMD_OP_GET_RTT_QPN: + return "GET_RTT_QPN"; + + case XSC_CMD_OP_SET_RTT_PERIOD: + return "SET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_PERIOD: + return "GET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_RESULT: + return "GET_RTT_RESULT"; + + case XSC_CMD_OP_GET_RTT_STATS: + return "ET_RTT_STATS"; + + case XSC_CMD_OP_SET_LED_STATUS: + return "SET_LED_STATUS"; + + case XSC_CMD_OP_AP_FEAT: + return "AP_FEAT"; + + case XSC_CMD_OP_PCIE_LAT_FEAT: + return "PCIE_LAT_FEAT"; + + case XSC_CMD_OP_USER_EMU_CMD: + return "USER_EMU_CMD"; default: return "unknown command opcode"; } } -#ifdef COSIM static void dump_command(struct xsc_core_device *xdev, struct xsc_cmd_mailbox *next, struct xsc_cmd_work_ent *ent, int input, int len) { u16 op = be16_to_cpu(((struct xsc_inbox_hdr *)(ent->lay->in))->opcode); - int data_only; int offset = 0; - int dump_len; - data_only = !!(xsc_debug_mask & (1 << XSC_CMD_DATA)); + if (!(xsc_debug_mask & (1 << XSC_CMD_DATA))) + return; - if (data_only) - xsc_core_dbg_mask(xdev, 1 << XSC_CMD_DATA, - "dump command data %s(0x%x) %s\n", - xsc_command_str(op), op, - input ? "INPUT" : "OUTPUT"); - else - xsc_core_dbg(xdev, "dump command %s(0x%x) %s\n", - xsc_command_str(op), op, - input ? "INPUT" : "OUTPUT"); - - if (data_only) { - if (input) { - dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); - offset += sizeof(ent->lay->in); - } else { - dump_buf(ent->rsp_lay->out, sizeof(ent->rsp_lay->out), 1, offset); - offset += sizeof(ent->rsp_lay->out); - } + xsc_core_dbg(xdev, "dump command %s(0x%x) %s\n", xsc_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (input) { + dump_buf(ent->lay, sizeof(*ent->lay), offset); + offset += sizeof(*ent->lay); } else { - if (input) { - dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); - offset += sizeof(*ent->lay); - } else { - dump_buf(ent->rsp_lay, sizeof(*ent->rsp_lay), 0, offset); - offset += sizeof(*ent->rsp_lay); - } + dump_buf(ent->rsp_lay, sizeof(*ent->rsp_lay), offset); + offset += sizeof(*ent->rsp_lay); } while (next && offset < len) { - if (data_only) { - dump_len = min_t(int, XSC_CMD_DATA_BLOCK_SIZE, len - offset); - dump_buf(next->buf, dump_len, 1, offset); - offset += XSC_CMD_DATA_BLOCK_SIZE; - } else { - xsc_core_dbg(xdev, "command block:\n"); - dump_buf(next->buf, sizeof(struct xsc_cmd_prot_block), 0, offset); - offset += sizeof(struct xsc_cmd_prot_block); - } + xsc_core_dbg(xdev, "command block:\n"); + dump_buf(next->buf, sizeof(struct xsc_cmd_prot_block), offset); + offset += sizeof(struct xsc_cmd_prot_block); next = next->next; } - - if (data_only) - pr_debug("\n"); } -#endif static void cmd_work_handler(struct work_struct *work) { @@ -477,17 +636,13 @@ static void cmd_work_handler(struct work_struct *work) struct semaphore *sem; unsigned long flags; - sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + sem = &cmd->sem; down(sem); - if (!ent->page_queue) { - ent->idx = alloc_ent(cmd); - if (ent->idx < 0) { - xsc_core_err(xdev, "failed to allocate command entry\n"); - up(sem); - return; - } - } else { - ent->idx = cmd->max_reg_cmds; + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + xsc_core_err(xdev, "failed to allocate command entry\n"); + up(sem); + return; } ent->token = alloc_token(cmd); @@ -511,16 +666,14 @@ static void cmd_work_handler(struct work_struct *work) set_signature(ent); else lay->sig = 0xff; -#ifdef COSIM dump_command(xdev, ent->in->next, ent, 1, ent->in->len); -#endif ktime_get_ts64(&ent->ts1); /* ring doorbell after the descriptor is valid */ wmb(); - cmd->cmd_pid = (cmd->cmd_pid + 1) % (1<log_sz); + cmd->cmd_pid = (cmd->cmd_pid + 1) % (1 << cmd->log_sz); writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); mmiowb(); spin_unlock_irqrestore(&cmd->doorbell_lock, flags); @@ -578,12 +731,12 @@ static int wait_func(struct xsc_core_device *xdev, struct xsc_cmd_work_ent *ent) err = ent->ret; if (err == -ETIMEDOUT) { - xsc_core_warn(xdev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", - xsc_command_str(msg_to_opcode(ent->in)), - msg_to_opcode(ent->in)); + xsc_core_warn(xdev, "wait for %s(0x%x) response timeout!\n", + xsc_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); } else if (err) { xsc_core_dbg(xdev, "err %d, delivery status %s(%d)\n", err, - deliv_status_to_str(ent->status), ent->status); + deliv_status_to_str(ent->status), ent->status); } return err; @@ -594,8 +747,7 @@ static int wait_func(struct xsc_core_device *xdev, struct xsc_cmd_work_ent *ent) * 2. page queue commands do not support asynchrous completion */ static int xsc_cmd_invoke(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, - struct xsc_rsp_msg *out, xsc_cmd_cbk_t callback, - void *context, int page_queue, u8 *status) + struct xsc_rsp_msg *out, u8 *status) { struct xsc_cmd *cmd = &xdev->cmd; struct xsc_cmd_work_ent *ent; @@ -606,55 +758,46 @@ static int xsc_cmd_invoke(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, u16 op; struct semaphore *sem; - if (callback && page_queue) - return -EINVAL; - - ent = alloc_cmd(cmd, in, out, callback, context, page_queue); + ent = alloc_cmd(cmd, in, out); if (IS_ERR(ent)) return PTR_ERR(ent); - if (!callback) - init_completion(&ent->done); - + init_completion(&ent->done); INIT_WORK(&ent->work, cmd_work_handler); - if (page_queue) { - cmd_work_handler(&ent->work); - } else if (!queue_work(cmd->wq, &ent->work)) { + if (!queue_work(cmd->wq, &ent->work)) { xsc_core_warn(xdev, "failed to queue work\n"); err = -ENOMEM; goto out_free; } - if (!callback) { - err = wait_func(xdev, ent); - if (err == -ETIMEDOUT) - goto out; - t1 = timespec64_to_ktime(ent->ts1); - t2 = timespec64_to_ktime(ent->ts2); - delta = ktime_sub(t2, t1); - ds = ktime_to_ns(delta); - op = be16_to_cpu(((struct xsc_inbox_hdr *)in->first.data)->opcode); - if (op < ARRAY_SIZE(cmd->stats)) { - stats = &cmd->stats[op]; - spin_lock(&stats->lock); - stats->sum += ds; - ++stats->n; - spin_unlock(&stats->lock); - } - xsc_core_dbg_mask(xdev, 1 << XSC_CMD_TIME, - "fw exec time for %s is %lld nsec\n", - xsc_command_str(op), ds); - *status = ent->status; - free_cmd(ent); - } + err = wait_func(xdev, ent); + if (err == -ETIMEDOUT) + goto out; + t1 = timespec64_to_ktime(ent->ts1); + t2 = timespec64_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + op = be16_to_cpu(((struct xsc_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock(&stats->lock); + } + xsc_core_dbg_mask(xdev, 1 << XSC_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + xsc_command_str(op), ds); + *status = ent->status; + free_cmd(ent); return err; -out_free: - free_cmd(ent); out: - sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + sem = &cmd->sem; up(sem); +out_free: + free_cmd(ent); return err; } @@ -745,7 +888,6 @@ static int xsc_copy_from_rsp_msg(void *to, struct xsc_rsp_msg *from, int size) copy = min_t(int, size, XSC_CMD_DATA_BLOCK_SIZE); block = next->buf; if (block->owner_status != 1) { - //xsc_core_warn(xdev, "dam buf not ready\n"); mdelay(10); continue; } @@ -760,7 +902,7 @@ static int xsc_copy_from_rsp_msg(void *to, struct xsc_rsp_msg *from, int size) } static struct xsc_cmd_mailbox *alloc_cmd_box(struct xsc_core_device *xdev, - gfp_t flags) + gfp_t flags) { struct xsc_cmd_mailbox *mailbox; @@ -789,7 +931,7 @@ static void free_cmd_box(struct xsc_core_device *xdev, } static struct xsc_cmd_msg *xsc_alloc_cmd_msg(struct xsc_core_device *xdev, - gfp_t flags, int size) + gfp_t flags, int size) { struct xsc_cmd_mailbox *tmp, *head = NULL; struct xsc_cmd_prot_block *block; @@ -836,7 +978,7 @@ static struct xsc_cmd_msg *xsc_alloc_cmd_msg(struct xsc_core_device *xdev, } static void xsc_free_cmd_msg(struct xsc_core_device *xdev, - struct xsc_cmd_msg *msg) + struct xsc_cmd_msg *msg) { struct xsc_cmd_mailbox *head = msg->next; struct xsc_cmd_mailbox *next; @@ -850,7 +992,7 @@ static void xsc_free_cmd_msg(struct xsc_core_device *xdev, } static struct xsc_rsp_msg *xsc_alloc_rsp_msg(struct xsc_core_device *xdev, - gfp_t flags, int size) + gfp_t flags, int size) { struct xsc_cmd_mailbox *tmp, *head = NULL; struct xsc_cmd_prot_block *block; @@ -897,7 +1039,7 @@ static struct xsc_rsp_msg *xsc_alloc_rsp_msg(struct xsc_core_device *xdev, } static void xsc_free_rsp_msg(struct xsc_core_device *xdev, - struct xsc_rsp_msg *msg) + struct xsc_rsp_msg *msg) { struct xsc_cmd_mailbox *head = msg->next; struct xsc_cmd_mailbox *next; @@ -1111,8 +1253,6 @@ void xsc_cmd_use_events(struct xsc_core_device *xdev) for (i = 0; i < cmd->max_reg_cmds; i++) down(&cmd->sem); - down(&cmd->pages_sem); - flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_EVENTS; @@ -1122,7 +1262,6 @@ void xsc_cmd_use_events(struct xsc_core_device *xdev) kthread_stop(cmd->cq_task); cmd->cq_task = NULL; - up(&cmd->pages_sem); for (i = 0; i < cmd->max_reg_cmds; i++) up(&cmd->sem); } @@ -1136,15 +1275,12 @@ void xsc_cmd_use_polling(struct xsc_core_device *xdev) for (i = 0; i < cmd->max_reg_cmds; i++) down(&cmd->sem); - down(&cmd->pages_sem); - flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_POLLING; cmd->cq_task = kthread_create(cmd_cq_polling, (void *)xdev, "xsc_cmd_cq_polling"); if (cmd->cq_task) wake_up_process(cmd->cq_task); - up(&cmd->pages_sem); for (i = 0; i < cmd->max_reg_cmds; i++) up(&cmd->sem); } @@ -1195,13 +1331,8 @@ static void free_msg(struct xsc_core_device *xdev, struct xsc_cmd_msg *msg) } } -static int is_manage_pages(struct xsc_inbox_hdr *in) -{ - return be16_to_cpu(in->opcode) == XSC_CMD_OP_MANAGE_PAGES; -} - static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, - struct xsc_rsp_msg *out, u16 dummy_cnt, u16 dummy_start_pid) + struct xsc_rsp_msg *out, u16 dummy_cnt, u16 dummy_start_pid) { struct xsc_cmd *cmd = &xdev->cmd; struct xsc_cmd_work_ent **dummy_ent_arr; @@ -1221,7 +1352,7 @@ static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, } for (i = 0; i < dummy_cnt; i++) { - dummy_ent_arr[i] = alloc_cmd(cmd, in, out, NULL, NULL, 0); + dummy_ent_arr[i] = alloc_cmd(cmd, in, out); if (IS_ERR(dummy_ent_arr[i])) { xsc_core_err(xdev, "failed to alloc cmd buffer\n"); err = -ENOMEM; @@ -1245,8 +1376,7 @@ static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, lay = get_inst(cmd, temp_pid); dummy_ent_arr[i]->lay = lay; memset(lay, 0, sizeof(*lay)); - memcpy(lay->in, dummy_ent_arr[i]->in->first.data, - sizeof(dummy_ent_arr[i]->in)); + memcpy(lay->in, dummy_ent_arr[i]->in->first.data, sizeof(dummy_ent_arr[i]->in)); lay->inlen = cpu_to_be32(dummy_ent_arr[i]->in->len); lay->outlen = cpu_to_be32(dummy_ent_arr[i]->out->len); lay->type = XSC_PCI_CMD_XPORT; @@ -1256,7 +1386,7 @@ static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, set_signature(dummy_ent_arr[i]); else lay->sig = 0xff; - temp_pid = (temp_pid + 1) % (1<log_sz); + temp_pid = (temp_pid + 1) % (1 << cmd->log_sz); } /* ring doorbell after the descriptor is valid */ @@ -1267,10 +1397,10 @@ static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, mmiowb(); xsc_core_dbg(xdev, "write 0x%x to command doorbell, idx %u ~ %u\n", cmd->cmd_pid, - dummy_ent_arr[0]->idx, dummy_ent_arr[dummy_cnt-1]->idx); + dummy_ent_arr[0]->idx, dummy_ent_arr[dummy_cnt - 1]->idx); if (wait_for_completion_timeout(&dummy_ent_arr[dummy_cnt - 1]->done, - msecs_to_jiffies(3000)) == 0) { + msecs_to_jiffies(3000)) == 0) { xsc_core_err(xdev, "dummy_cmd %d ent timeout, cmdq fail\n", dummy_cnt - 1); err = -ETIMEDOUT; } else { @@ -1298,7 +1428,7 @@ static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, } static int xsc_dummy_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, - int out_size, u16 dmmy_cnt, u16 dummy_start) + int out_size, u16 dmmy_cnt, u16 dummy_start) { struct xsc_cmd_msg *inb; struct xsc_rsp_msg *outb; @@ -1378,18 +1508,18 @@ static int request_pid_cid_mismatch_restore(struct xsc_core_device *xdev) req_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); req_cid = readl(REG_ADDR(xdev, cmd->reg.req_cid_addr)); - if (req_pid >= (1<log_sz) || req_cid >= (1<log_sz)) { + if (req_pid >= (1 << cmd->log_sz) || req_cid >= (1 << cmd->log_sz)) { xsc_core_err(xdev, "req_pid %d, req_cid %d, out of normal range!!! max value is %d\n", - req_pid, req_cid, (1<log_sz)); + req_pid, req_cid, (1 << cmd->log_sz)); return -1; } if (req_pid == req_cid) return 0; - gap = (req_pid > req_cid) ? (req_pid - req_cid) : ((1<log_sz) + req_pid - req_cid); + gap = (req_pid > req_cid) ? (req_pid - req_cid) : ((1 << cmd->log_sz) + req_pid - req_cid); xsc_core_info(xdev, "Cmdq req_pid %d, req_cid %d, send %d dummy cmds\n", - req_pid, req_cid, gap); + req_pid, req_cid, gap); err = xsc_send_dummy_cmd(xdev, gap, req_cid); if (err) { @@ -1406,12 +1536,9 @@ int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out { struct xsc_cmd_msg *inb; struct xsc_rsp_msg *outb; - int pages_queue; int err; u8 status = 0; - pages_queue = is_manage_pages(in); - inb = alloc_msg(xdev, in_size); if (IS_ERR(inb)) { err = PTR_ERR(inb); @@ -1430,12 +1557,13 @@ int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out goto out_in; } - err = xsc_cmd_invoke(xdev, inb, outb, NULL, NULL, pages_queue, &status); + err = xsc_cmd_invoke(xdev, inb, outb, &status); if (err) goto out_out; if (status) { - xsc_core_err(xdev, "err %d, status %d\n", err, status); + xsc_core_err(xdev, "opcode:%#x, err %d, status %d\n", + msg_to_opcode(inb), err, status); err = status_to_err(status); goto out_out; } @@ -1511,14 +1639,10 @@ static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, struct xs { struct xsc_cmd *cmd = &xdev->cmd; struct xsc_cmd_work_ent *ent; - xsc_cmd_cbk_t callback; - void *context; - int err; struct xsc_inbox_hdr *hdr; if (idx > cmd->max_reg_cmds || (cmd->bitmask & (1 << idx))) { - xsc_core_err(xdev, "idx[%d] exceed max cmds, or has no relative request.\n", - idx); + xsc_core_err(xdev, "idx[%d] exceed max cmds, or has no relative request.\n", idx); return; } ent = cmd->ent_arr[idx]; @@ -1526,37 +1650,21 @@ static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, struct xs ktime_get_ts64(&ent->ts2); memcpy(ent->out->first.data, ent->rsp_lay->out, sizeof(ent->rsp_lay->out)); -#ifdef COSIM dump_command(xdev, ent->out->next, ent, 0, ent->out->len); -#endif if (!cmd->checksum_disabled) ent->ret = verify_signature(ent); else ent->ret = 0; ent->status = 0; -// ent->status = ((struct xsc_outbox_hdr *)ent->rsp_lay->out)->status; hdr = (struct xsc_inbox_hdr *)ent->in->first.data; -#ifdef XSC_DEBUG xsc_core_dbg(xdev, "delivery status:%s(%d), rsp status=%d, opcode %#x, idx:%d,%d, ret=%d\n", - deliv_status_to_str(ent->status), ent->status, - ((struct xsc_outbox_hdr *)ent->rsp_lay->out)->status, - __be16_to_cpu(hdr->opcode), idx, ent->lay->idx, ent->ret); -#endif + deliv_status_to_str(ent->status), ent->status, + ((struct xsc_outbox_hdr *)ent->rsp_lay->out)->status, + __be16_to_cpu(hdr->opcode), idx, ent->lay->idx, ent->ret); free_ent(cmd, ent->idx); - if (ent->callback) { - callback = ent->callback; - context = ent->context; - err = ent->ret; - free_cmd(ent); - callback(err, context); - } else { - complete(&ent->done); - } - if (ent->page_queue) - up(&cmd->pages_sem); - else - up(&cmd->sem); + complete(&ent->done); + up(&cmd->sem); } static int cmd_cq_polling(void *data) @@ -1586,13 +1694,13 @@ static int cmd_cq_polling(void *data) if (cmd->owner_bit != rsp->owner_bit) { //hw update cq doorbell but buf may not ready xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", - cmd->cq_cid, cq_pid); + cmd->cq_cid, cq_pid); continue; } xsc_cmd_comp_handler(xdev, rsp->idx, rsp); - cmd->cq_cid = (cmd->cq_cid + 1) % (1<log_sz); + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); if (cmd->cq_cid == 0) @@ -1667,7 +1775,7 @@ void xsc_cmd_resp_handler(struct xsc_core_device *xdev) } if (cmd->owner_bit != rsp->owner_bit) { xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", - cmd->cq_cid, cq_pid); + cmd->cq_cid, cq_pid); return; } @@ -1707,14 +1815,6 @@ int xsc_cmd_init(struct xsc_core_device *xdev) int err; int i; -// cmd_if_rev = cmdif_rev(dev); -// if (cmd_if_rev != CMD_IF_REV) { -// dev_err(&dev->pdev->dev, -// "Driver cmdif rev(%d) differs from firmware's(%d)\n", -// CMD_IF_REV, cmd_if_rev); -// return -EINVAL; -// } - //sriov need adapt for this process. //now there is 544 cmdq resource, soc using from id 514 if (xsc_core_is_pf(xdev)) { @@ -1768,7 +1868,7 @@ int xsc_cmd_init(struct xsc_core_device *xdev) } cmd->cq_dma = dma_map_single(&xdev->pdev->dev, cmd->cq_buf, PAGE_SIZE, - DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); if (dma_mapping_error(&xdev->pdev->dev, cmd->cq_dma)) { err = -ENOMEM; goto err_map_cmd; @@ -1785,7 +1885,7 @@ int xsc_cmd_init(struct xsc_core_device *xdev) cmd->log_sz = Q_DEPTH_LOG; cmd->log_stride = readl(REG_ADDR(xdev, cmd->reg.element_sz_addr)); - writel(1<log_sz, REG_ADDR(xdev, cmd->reg.q_depth_addr)); + writel(1 << cmd->log_sz, REG_ADDR(xdev, cmd->reg.q_depth_addr)); if (cmd->log_stride != ELEMENT_SIZE_LOG) { dev_err(&xdev->pdev->dev, "firmware failed to init cmdq, log_stride=(%d, %d)\n", cmd->log_stride, ELEMENT_SIZE_LOG); @@ -1810,12 +1910,6 @@ int xsc_cmd_init(struct xsc_core_device *xdev) cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; -// cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; -// if (cmd->cmdif_rev > CMD_IF_REV) { -// err = -ENOTSUPP; -// goto err_map; -// } - spin_lock_init(&cmd->alloc_lock); spin_lock_init(&cmd->token_lock); spin_lock_init(&cmd->doorbell_lock); @@ -1823,7 +1917,6 @@ int xsc_cmd_init(struct xsc_core_device *xdev) spin_lock_init(&cmd->stats[i].lock); sema_init(&cmd->sem, cmd->max_reg_cmds); - sema_init(&cmd->pages_sem, 1); cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); @@ -1833,10 +1926,6 @@ int xsc_cmd_init(struct xsc_core_device *xdev) goto err_map; } -// iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); -// iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); -// u32 *ptr = (u32 *)&cmd->dma; -// IA_WRITE(xdev, CMDQ_PA_REG_ADDR, ptr, sizeof(u64) / sizeof(u32)); writel(cmd_h, REG_ADDR(xdev, cmd->reg.req_buf_h_addr)); writel(cmd_l, REG_ADDR(xdev, cmd->reg.req_buf_l_addr)); @@ -1854,7 +1943,7 @@ int xsc_cmd_init(struct xsc_core_device *xdev) wmb(); xsc_core_dbg(xdev, "descriptor at dma 0x%llx 0x%llx\n", - (unsigned long long)(cmd->dma), (unsigned long long)(cmd->cq_dma)); + (unsigned long long)(cmd->dma), (unsigned long long)(cmd->cq_dma)); cmd->mode = CMD_MODE_POLLING; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c index 712524130295dac14e5f37ad02ad1bfa1f5040dc..49a00f759b5fdecf9ac82c70c64f92d18081b6b4 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c @@ -1,13 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include -#include -#include +#include "common/driver.h" +#include "common/cq.h" #include void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type) @@ -35,7 +34,7 @@ void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type) } int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, - struct xsc_create_cq_mbox_in *in, int inlen) + struct xsc_create_cq_mbox_in *in, int inlen) { int err; struct xsc_cq_table *table = &dev->dev_res->cq_table; @@ -70,8 +69,7 @@ int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, cq->pid = current->pid; err = xsc_debug_cq_add(dev, cq); if (err) - xsc_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", - cq->cqn); + xsc_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); return 0; @@ -115,8 +113,6 @@ int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq) if (out.hdr.status) return xsc_cmd_status_to_err(&out.hdr); - //synchronize_irq(cq->irqn); - xsc_debug_cq_remove(dev, cq); if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); @@ -127,7 +123,7 @@ int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq) EXPORT_SYMBOL(xsc_core_destroy_cq); int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, - struct xsc_query_cq_mbox_out *out) + struct xsc_query_cq_mbox_out *out) { struct xsc_query_cq_mbox_in in; int err; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c index 5c31712f01ca7b5c821f04bd2cf7de59bf6bd5e1..66f0ae2cc65121e00917685a57c28e12803993e8 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c @@ -1,16 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include -#include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/qp.h" +#include "common/cq.h" #include "fw/xsc_tbm.h" enum { @@ -131,12 +130,11 @@ static ssize_t xsc_debugfs_reg_write(struct file *filp, offset += 5; num = 0; while (1) { - cnt = sscanf(&xsc_debugfs_reg_buf[offset], "%x %n", - &value, &tmp); + cnt = sscanf(&xsc_debugfs_reg_buf[offset], "%x %n", &value, &tmp); if (cnt < 2) break; xsc_core_info(xdev, "write: 0x%llx = 0x%x\n", - (reg + sizeof(int) * num), value); + (reg + sizeof(int) * num), value); offset += tmp; buf[num++] = value; if (num == 8) @@ -145,8 +143,9 @@ static ssize_t xsc_debugfs_reg_write(struct file *filp, if (num > 1) { ptr = &buf[0]; IA_WRITE(xdev, reg, ptr, num); - } else if (num == 1) + } else if (num == 1) { REG_WR32(xdev, reg, buf[0]); + } } else { xsc_core_err(xdev, "write \n"); } @@ -165,7 +164,7 @@ static ssize_t xsc_debugfs_reg_write(struct file *filp, xsc_core_info(xdev, "read: 0x%llx num:%d\n", reg, num); for (i = 0; i < num; i++) xsc_core_info(xdev, "read:0x%llx = %#x\n", - (reg + sizeof(int) * i), buf[i]); + (reg + sizeof(int) * i), buf[i]); } else if (cnt == 1) { int value = REG_RD32(xdev, reg); @@ -190,7 +189,7 @@ static const struct file_operations xsc_debugfs_reg_fops = { }; static ssize_t xsc_debugfs_vlan_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { char *buf; int len; @@ -217,8 +216,8 @@ static ssize_t xsc_debugfs_vlan_read(struct file *filp, char __user *buffer, } static ssize_t xsc_debugfs_vlan_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) + const char __user *buffer, + size_t count, loff_t *ppos) { struct xsc_core_device *xdev = filp->private_data; struct xsc_vlan_config config; @@ -235,8 +234,8 @@ static ssize_t xsc_debugfs_vlan_write(struct file *filp, return -ENOSPC; len = simple_write_to_buffer(xsc_debugfs_vlan_buf, - sizeof(xsc_debugfs_vlan_buf) - 1, - ppos, buffer, count); + sizeof(xsc_debugfs_vlan_buf) - 1, + ppos, buffer, count); if (len < 0) return len; @@ -269,9 +268,9 @@ static ssize_t xsc_debugfs_vlan_write(struct file *filp, } cnt = sscanf(&xsc_debugfs_vlan_buf[off], "%u %u %u %s %u %u", - &config.pvid, &config.vid_allow_base, - &config.vid_allow_num, proto, - &config.prio, &config.smac_filter_en); + &config.pvid, &config.vid_allow_base, + &config.vid_allow_num, proto, + &config.prio, &config.smac_filter_en); if (cnt < 3) { xsc_core_err(xdev, "error arguments: \n"); return 0; @@ -290,9 +289,9 @@ static ssize_t xsc_debugfs_vlan_write(struct file *filp, } xsc_core_info(xdev, "%s: vlan_mode=%d vid=%d vlan_allow=%d/%d proto=0x%x prio=%d smac_en=%d", - __func__, config.mode, config.pvid, config.vid_allow_base, - config.vid_allow_num, config.proto, config.prio, - config.smac_filter_en); + __func__, config.mode, config.pvid, config.vid_allow_base, + config.vid_allow_num, config.proto, config.prio, + config.smac_filter_en); return count; } @@ -310,12 +309,10 @@ int xsc_vlan_debugfs_init(struct xsc_core_device *dev) if (dev->dev_res->dbg_root) { pfile = debugfs_create_file("vlan", 0644, - dev->dev_res->dbg_root, dev, - &xsc_debugfs_vlan_fops); + dev->dev_res->dbg_root, dev, + &xsc_debugfs_vlan_fops); if (!pfile) xsc_core_err(dev, "failed to create vlan debugfs\n"); - else - xsc_core_info(dev, "create vlan debugfs ok\n"); } return 0; @@ -335,15 +332,13 @@ int xsc_debugfs_init(struct xsc_core_device *dev) dev->dev_res->dbg_root, dev, &xsc_debugfs_reg_fops); if (!pfile) - xsc_core_err(dev, "failed to create debugfs ops for %s\n", - name); + xsc_core_err(dev, "failed to create debugfs ops for %s\n", name); } else { xsc_core_err(dev, "failed to create debugfs dir for %s\n", name); return -ENOMEM; } xsc_vlan_debugfs_init(dev); - xsc_core_info(dev, "%s.dir_name=%s\r\n", __func__, name); return 0; } @@ -483,8 +478,7 @@ int xsc_cmdif_debugfs_init(struct xsc_core_device *xdev) if (strcmp(namep, "unknown command opcode")) { stats->root = debugfs_create_dir(namep, *cmdif_debugfs); if (!stats->root) { - xsc_core_warn(xdev, "failed adding command %d\n", - i); + xsc_core_warn(xdev, "failed adding command %d\n", i); err = -ENOMEM; goto out; } @@ -815,15 +809,66 @@ void xsc_debug_qp_remove(struct xsc_core_device *dev, struct xsc_core_qp *qp) rem_res_tree(qp->dbg); } +static int set_udp_sport(u32 qpn, u32 sport, struct xsc_core_device *xdev, struct xsc_qp_trace *t) +{ + int err; + struct xsc_ap_feat_mbox_in in; + struct xsc_ap_feat_mbox_out out; + struct timespec64 ts; + struct xsc_qpt_update_msg msg; + + ktime_get_boottime_ts64(&ts); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_AP_FEAT); + in.xsc_ap_feature_opcode = __cpu_to_be16(XSC_AP_FEAT_SET_UDP_SPORT); + in.ap.set_udp_sport.qpn = __cpu_to_be32(qpn); + in.ap.set_udp_sport.udp_sport = __cpu_to_be32(sport); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to set udp_sport, err(%u), status(%u)\n", err, + out.hdr.status); + return -EINVAL; + } + + msg.main_ver = YS_QPTRACE_VER_MAJOR; + msg.sub_ver = YS_QPTRACE_VER_MINOR; + msg.type = YS_QPTRACE_UPDATE_TYPE_SPORT; + msg.data.timestamp = (u64)(u32)ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + msg.data.qpn = qpn; + msg.data.bus = xdev->bus_num; + msg.data.dev = xdev->dev_num; + msg.data.fun = xdev->func_id; + msg.data.update.sport.port_old = t->s_port; + msg.data.update.sport.port_new = __cpu_to_be16(sport); + t->s_port = msg.data.update.sport.port_new; + + qpts_write_one_msg(&msg); + + xsc_core_info(xdev, "Set qpn(%u) udp_sport(%u)\n", qpn, sport); + + return 0; +} + static ssize_t trace_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { - struct xsc_qp_trace *trace_info = filp->private_data; + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; int err; int len; if (*pos) return 0; + if (!qp || !qp->trace_info) + return -EIO; + + trace_info = qp->trace_info; + len = sizeof(struct xsc_qp_trace); err = copy_to_user(buf, trace_info, len); if (err) @@ -833,10 +878,78 @@ static ssize_t trace_read(struct file *filp, char __user *buf, size_t count, lof return len; } +static ssize_t trace_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) +{ + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; + struct xsc_core_device *xdev; + int ret = 0, len; + u32 sport; + char tmp_buf[256] = ""; + + ret = -EIO; + if (!qp || !qp->dbg || !qp->dbg->xdev || !qp->trace_info) { + pr_err("%s error null pointer!\n", __func__); + goto trace_write_out; + } + + trace_info = qp->trace_info; + xdev = qp->dbg->xdev; + + ret = 0; + /* don't allow partial writes */ + if (*pos != 0) { + xsc_core_err(xdev, "Don't allow partial writes!\n"); + goto trace_write_out; + } + + ret = -ENOSPC; + if (count >= sizeof(tmp_buf)) { + xsc_core_err(xdev, "Count out of size of buffer!\n"); + goto trace_write_out; + } + + len = simple_write_to_buffer(tmp_buf, sizeof(tmp_buf) - 1, + pos, buf, count); + ret = len; + if (len < 0) { + xsc_core_err(xdev, "Write to buffer error(%d)!\n", len); + goto trace_write_out; + } + + tmp_buf[len] = '\0'; + + // + // sport 10000 + if (strncmp(tmp_buf, "sport", 5) == 0) { + ret = kstrtouint(&tmp_buf[6], 0, &sport); + if (ret != 0) { + xsc_core_err(xdev, "error arguments: \n"); + ret = -EINVAL; + goto trace_write_out; + } + ret = set_udp_sport(trace_info->lqpn, sport, xdev, trace_info); + if (ret) { + ret = -EIO; + goto trace_write_out; + } + } else { + xsc_core_err(xdev, "invalid arguments: %s\n", tmp_buf); + ret = -EOPNOTSUPP; + goto trace_write_out; + } + + return count; + +trace_write_out: + return ret; +} + static const struct file_operations fops_trace = { .owner = THIS_MODULE, .open = simple_open, .read = trace_read, + .write = trace_write, }; int xsc_create_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) @@ -848,8 +961,8 @@ int xsc_create_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) snprintf(name, sizeof(name), "%d", qp->qpn); - qp->trace = debugfs_create_file(name, 0400, dev->dev_res->qptrace_debugfs, - (void *)qp->trace_info, &fops_trace); + qp->trace = debugfs_create_file(name, 0644, dev->dev_res->qptrace_debugfs, + (void *)qp, &fops_trace); if (!qp->trace) return -1; @@ -913,4 +1026,3 @@ void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq) if (cq->dbg) rem_res_tree(cq->dbg); } - diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c index 8493d61617b930c98b951d1d75dfcde1adffe067..aa6fe1e17919605ca1d362382f1725ea257eb72c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c @@ -1,10 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include +#include "common/xsc_core.h" #include "devlink.h" #ifdef CONFIG_XSC_ESWITCH #include "eswitch.h" @@ -30,8 +29,11 @@ void xsc_devlink_free(struct devlink *devlink) int xsc_devlink_register(struct devlink *devlink, struct device *dev) { int err = 0; - +#ifdef HAVE_DEVLINK_ALLOC_DEV_PARM + err = devlink_register(devlink); +#else err = devlink_register(devlink, dev); +#endif return err; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h index 0dbbadd87f85dd2d7e228761f96b21eb57596729..287e8019e73f0b64d05da5faf131d1ebdd91b1bb 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c index 3a6d17fb7d9e9f25c13237d76bf34a75c6c5b348..b5c2c235b108fa65dd4bd6360d4e2d24a1f752e1 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c @@ -1,16 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ - #include #include -#include -#include +#include "common/driver.h" +#include "common/cq.h" #include "fw/xsc_fw.h" #include "wq.h" -#include +#include "common/xsc_core.h" enum { XSC_EQE_SIZE = sizeof(struct xsc_eqe), @@ -66,17 +64,6 @@ static struct xsc_eqe *next_eqe_sw(struct xsc_eq *eq) return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; } -#ifdef XSC_DEBUG -static const char *eqe_type_str(u8 type) -{ - switch (type) { - case XSC_EVENT_TYPE_COMP: - return "XSC_EVENT_TYPE_COMP"; - default: - return "Unrecognized event"; - } -} -#endif static void eq_update_ci(struct xsc_eq *eq, int arm) { union xsc_eq_doorbell db; @@ -86,8 +73,7 @@ static void eq_update_ci(struct xsc_eq *eq, int arm) db.eq_next_cid = eq->cons_index; db.eq_id = eq->eqn; #ifdef XSC_DEBUG - xsc_core_dbg(eq->dev, "ARM EQ %d ci 0x%x arm %d\n", - eq->eqn, eq->cons_index, arm); + xsc_core_dbg(eq->dev, "ARM EQ %d ci 0x%x arm %d\n", eq->eqn, eq->cons_index, arm); #endif writel(db.val, REG_ADDR(eq->dev, eq->doorbell)); /* We still want ordering, just not swabbing, so add a barrier */ @@ -106,7 +92,7 @@ void xsc_cq_completion(struct xsc_core_device *dev, u32 cqn) rcu_read_unlock(); if (!cq) { - xsc_core_err(dev, "Completion event for bogus CQ 0x%x\n", cqn); + xsc_core_err(dev, "Completion event for bogus CQ, cqn=%d\n", cqn); return; } @@ -133,7 +119,7 @@ void xsc_eq_cq_event(struct xsc_core_device *dev, u32 cqn, int event_type) spin_unlock(&table->lock); if (unlikely(!cq)) { - xsc_core_err(dev, "Async event for bogus CQ 0x%x\n", cqn); + xsc_core_err(dev, "Async event for bogus CQ, cqn=%d\n", cqn); return; } @@ -141,7 +127,6 @@ void xsc_eq_cq_event(struct xsc_core_device *dev, u32 cqn, int event_type) if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); - } static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) @@ -157,8 +142,8 @@ static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) */ rmb(); #ifdef XSC_DEBUG - xsc_core_dbg(eq->dev, "eqn %d, eqe type %s cqn/qpn: %d\n", - eq->eqn, eqe_type_str(eqe->type), eqe->queue_id); + xsc_core_dbg(eq->dev, "eqn=%d, eqe_type=%d, cqn/qpn=%d\n", + eq->eqn, eqe->type, eqe->queue_id); #endif switch (eqe->type) { case XSC_EVENT_TYPE_COMP: @@ -182,7 +167,7 @@ static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) xsc_qp_event(dev, qpn, eqe->type); break; default: - xsc_core_warn(dev, "Unhandle event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); + xsc_core_warn(dev, "Unhandle event %d on EQ %d\n", eqe->type, eq->eqn); break; } @@ -198,7 +183,7 @@ static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) */ if (unlikely(set_ci >= XSC_NUM_SPARE_EQE)) { xsc_core_dbg(dev, "EQ%d eq_num=%d qpn=%d, db_noarm\n", - eq->eqn, set_ci, eqe->queue_id); + eq->eqn, set_ci, eqe->queue_id); eq_update_ci(eq, 0); set_ci = 0; } @@ -207,7 +192,7 @@ static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) eq_update_ci(eq, 1); #ifdef XSC_DEBUG xsc_core_dbg(dev, "EQ%d eq_num=%d qpn=%d, db_arm\n", - eq->eqn, set_ci, (eqe?eqe->queue_id:0)); + eq->eqn, set_ci, (eqe ? eqe->queue_id : 0)); #endif return eqes_found; @@ -238,7 +223,7 @@ static void init_eq_buf(struct xsc_eq *eq) } int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, - int nent, const char *name) + int nent, const char *name) { struct xsc_dev_resource *dev_res = dev->dev_res; u16 msix_vec_offset = dev->msix_vec_base + vecidx; @@ -249,8 +234,7 @@ int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, int hw_npages; eq->nent = roundup_pow_of_two(roundup(nent, XSC_NUM_SPARE_EQE)); - err = xsc_buf_alloc(dev, eq->nent * XSC_EQE_SIZE, PAGE_SIZE, - &eq->buf); + err = xsc_buf_alloc(dev, eq->nent * XSC_EQE_SIZE, PAGE_SIZE, &eq->buf); if (err) return err; @@ -279,7 +263,6 @@ int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, goto err_in; if (out.hdr.status) { - //err = xsc_cmd_status_to_err(&out.hdr); err = -ENOSPC; goto err_in; } @@ -293,7 +276,7 @@ int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, eq->doorbell = dev->regs.event_db; eq->index = vecidx; xsc_core_dbg(dev, "msix%d request vector%d eq%d irq%d\n", - vecidx, msix_vec_offset, eq->eqn, eq->irqn); + vecidx, msix_vec_offset, eq->eqn, eq->irqn); err = request_irq(eq->irqn, xsc_msix_handler, 0, dev_res->irq_info[vecidx].name, eq); @@ -326,7 +309,7 @@ int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq) err = xsc_cmd_destroy_eq(dev, eq->eqn); if (err) xsc_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", - eq->eqn); + eq->eqn); xsc_buf_free(dev, &eq->buf); return err; @@ -357,7 +340,7 @@ int xsc_start_eqs(struct xsc_core_device *dev) int err; err = xsc_create_map_eq(dev, &table->async_eq, XSC_EQ_VEC_ASYNC, - XSC_NUM_ASYNC_EQE, "xsc_async_eq"); + XSC_NUM_ASYNC_EQE, "xsc_async_eq"); if (err) xsc_core_warn(dev, "failed to create async EQ %d\n", err); @@ -373,7 +356,7 @@ void xsc_stop_eqs(struct xsc_core_device *dev) } int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, - struct xsc_query_eq_mbox_out *out, int outlen) + struct xsc_query_eq_mbox_out *out, int outlen) { struct xsc_query_eq_mbox_in in; int err = 0; @@ -383,10 +366,6 @@ int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EQ); in.eqn = eq->eqn; - //err = xsc_cmd_exec(dev, &in, sizeof(in), out, outlen); - //if (err) - // return err; - if (out->hdr.status) err = xsc_cmd_status_to_err(&out->hdr); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c index a9f37f7a81ee2274b82992d084341e6d59034b7c..442cf9703cdf4c818f50107680f342a9c2f9e15f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c @@ -1,22 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include #include -#include +#include "common/vport.h" #include "fw/xsc_tbm.h" #include "eswitch.h" -#include - -u8 xsc_eswitch_mode(struct xsc_eswitch *esw) -{ - return ESW_ALLOWED(esw) ? esw->mode : XSC_ESWITCH_NONE; -} -EXPORT_SYMBOL_GPL(xsc_eswitch_mode); +#include "common/xsc_lag.h" static int xsc_eswitch_check(const struct xsc_core_device *dev) { @@ -40,7 +33,7 @@ xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num) if (idx > esw->total_vports - 1) { xsc_core_dbg(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n", - vport_num, idx); + vport_num, idx); return ERR_PTR(-EINVAL); } @@ -84,9 +77,7 @@ static int esw_mode_to_devlink(u16 xsc_mode, u16 *mode) return 0; } -int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode - , struct netlink_ext_ack *extack - ) +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { struct xsc_core_device *dev = devlink_priv(devlink); struct xsc_eswitch *esw = dev->priv.eswitch; @@ -110,7 +101,10 @@ int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode if (cur_xsc_mode == xsc_mode) goto done; - if (xsc_mode == XSC_ESWITCH_OFFLOADS && cur_xsc_mode != XSC_ESWITCH_LEGACY) { + if ((cur_xsc_mode != XSC_ESWITCH_LEGACY && xsc_mode == XSC_ESWITCH_OFFLOADS) || + (cur_xsc_mode == XSC_ESWITCH_OFFLOADS && xsc_mode == XSC_ESWITCH_LEGACY)) { + xsc_core_err(dev, "%s failed: do not set mode %d to mode %d\n", + __func__, cur_xsc_mode, xsc_mode); mutex_unlock(&esw->mode_lock); return -EOPNOTSUPP; } @@ -120,7 +114,7 @@ int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode xsc_lag_enable(dev); if (esw->mode == XSC_ESWITCH_OFFLOADS) - xsc_set_vf_pp_status(dev, false); + xsc_cmd_modify_hca(dev); done: mutex_unlock(&esw->mode_lock); @@ -147,7 +141,6 @@ int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) static void esw_vport_change_handle_locked(struct xsc_vport *vport) { struct xsc_core_device *dev = vport->dev; - //struct xsc_eswitch *esw = dev->priv.eswitch; u8 mac[ETH_ALEN]; xsc_query_other_nic_vport_mac_address(dev, vport->vport, mac); @@ -165,11 +158,9 @@ static void esw_vport_change_handler(struct work_struct *work) } void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, - struct xsc_vport *vport, - enum xsc_eswitch_vport_event enabled_events) + struct xsc_vport *vport, + enum xsc_eswitch_vport_event enabled_events) { - u16 vport_num = vport->vport; - mutex_lock(&esw->state_lock); if (vport->enabled) goto unlock_out; @@ -182,22 +173,13 @@ void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, vport->enabled_events = enabled_events; vport->enabled = true; - /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well - * in smartNIC as it's a vport group manager. - */ - //if (is_esw_manager_vport(esw, vport_num)) - // vport->info.trusted = true; - -// esw_vport_change_handle_locked(vport); - esw->enabled_vports++; - xsc_core_dbg(esw->dev, "Enabled VPORT(%d)\n", vport_num); unlock_out: mutex_unlock(&esw->state_lock); } void xsc_eswitch_disable_vport(struct xsc_eswitch *esw, - struct xsc_vport *vport) + struct xsc_vport *vport) { u16 vport_num = vport->vport; @@ -223,7 +205,7 @@ void xsc_eswitch_disable_vport(struct xsc_eswitch *esw, } void xsc_eswitch_enable_pf_vf_vports(struct xsc_eswitch *esw, - enum xsc_eswitch_vport_event enabled_events) + enum xsc_eswitch_vport_event enabled_events) { struct xsc_vport *vport; int i; @@ -257,11 +239,6 @@ int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs) { int err; - //if (!ESW_ALLOWED(esw) || !esw->dev.caps.ft_support) { - // xsc_core_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); - // return -EOPNOTSUPP; - //} - lockdep_assert_held(&esw->mode_lock); esw->num_vfs = num_vfs; @@ -278,11 +255,9 @@ int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs) esw->mode = mode; - //xsc_eswitch_event_handlers_register(esw); - xsc_core_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", - mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", - num_vfs, esw->enabled_vports); + mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", + num_vfs, esw->enabled_vports); return 0; @@ -310,17 +285,11 @@ void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf) return; xsc_core_info(esw->dev, "Disable: mode(%s)\n", - esw->mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS"); + esw->mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS"); -// xsc_eswitch_event_handlers_unregister(esw); - - //if (esw->mode == XSC_ESWITCH_LEGACY) - // esw_legacy_disable(esw); old_mode = esw->mode; esw->mode = XSC_ESWITCH_NONE; - //if (clear_vf) - // xsc_eswitch_clear_vf_vports_info(esw); esw->num_vfs = 0; } @@ -341,7 +310,8 @@ int xsc_eswitch_init(struct xsc_core_device *dev) int i, total_vports, err; if (!XSC_VPORT_MANAGER(dev)) { - xsc_core_info(dev, "%s XSC_VPORT_MANAGER check fail\n", __func__); + if (xsc_core_is_pf(dev)) + xsc_core_err(dev, "%s XSC_VPORT_MANAGER check fail\n", __func__); return 0; } @@ -377,6 +347,7 @@ int xsc_eswitch_init(struct xsc_core_device *dev) vport->info.link_state = XSC_VPORT_ADMIN_STATE_AUTO; vport->info.vlan_proto = htons(ETH_P_8021Q); vport->info.roce = true; + vport->dev = dev; INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); @@ -407,6 +378,7 @@ void xsc_eswitch_cleanup(struct xsc_core_device *dev) kfree(dev->priv.eswitch); } +#ifdef XSC_ESW_GUID_ENABLE static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) { ((u8 *)node_guid)[7] = mac[0]; @@ -418,16 +390,21 @@ static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) ((u8 *)node_guid)[1] = mac[4]; ((u8 *)node_guid)[0] = mac[5]; } +#endif int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]) + u16 vport, u8 mac[ETH_ALEN]) { struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); - u64 node_guid; int err = 0; +#ifdef XSC_ESW_GUID_ENABLE + u64 node_guid; +#endif + if (IS_ERR(evport)) return PTR_ERR(evport); + if (is_multicast_ether_addr(mac)) return -EINVAL; @@ -435,26 +412,28 @@ int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, if (evport->info.spoofchk && !is_valid_ether_addr(mac)) xsc_core_warn(esw->dev, - "Set invalid MAC while spoofchk is on, vport(%d)\n", - vport); + "Set invalid MAC while spoofchk is on, vport(%d)\n", + vport); - err = xsc_modify_other_nic_vport_mac_address(esw->dev, vport, mac); + err = xsc_modify_other_nic_vport_mac_address(esw->dev, vport, mac, false); if (err) { - xsc_core_warn(esw->dev, - "Failed to xsc_modify_nic_vport_mac vport(%d) err=(%d)\n", - vport, err); + xsc_core_err(esw->dev, + "Failed to xsc_modify_nic_vport_mac vport(%d) err=(%d)\n", + vport, err); goto unlock; } + ether_addr_copy(evport->info.mac, mac); + +#ifdef XSC_ESW_GUID_ENABLE node_guid_gen_from_mac(&node_guid, mac); err = xsc_modify_other_nic_vport_node_guid(esw->dev, vport, node_guid); if (err) - xsc_core_warn(esw->dev, - "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", - vport, err); - - ether_addr_copy(evport->info.mac, mac); + xsc_core_err(esw->dev, + "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", + vport, err); evport->info.node_guid = node_guid; +#endif #ifdef XSC_ESW_FDB_ENABLE if (evport->enabled && esw->mode == XSC_ESWITCH_LEGACY) @@ -465,9 +444,10 @@ int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, mutex_unlock(&esw->state_lock); return err; } +EXPORT_SYMBOL(xsc_eswitch_set_vport_mac); int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, - u16 vport, u8 *mac) + u16 vport, u8 *mac) { struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); @@ -481,13 +461,13 @@ int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, } int __xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, u16 vlan, - u8 qos, __be16 proto, u8 set_flags) + u8 qos, __be16 proto, u8 set_flags) { return 0; } int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, - u16 vlan, u8 qos, __be16 vlan_proto) + u16 vlan, u8 qos, __be16 vlan_proto) { u8 set_flags = 0; int err = 0; @@ -523,7 +503,7 @@ int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, } int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, - u16 vport, bool spoofchk) + u16 vport, u8 spoofchk) { struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); bool pschk; @@ -542,11 +522,9 @@ int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, pschk = evport->info.spoofchk; evport->info.spoofchk = spoofchk; if (pschk && !is_valid_ether_addr(evport->info.mac)) - xsc_core_warn(esw->dev, - "Spoofchk in set while MAC is invalid, vport(%d)\n", - evport->vport); - //if (evport->enabled && esw->mode == XSC_ESWITCH_LEGACY) - // err = __xsc_eswitch_set_spoofchk(esw, evport); + xsc_core_warn(esw->dev, "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); + if (err) evport->info.spoofchk = pschk; @@ -556,8 +534,8 @@ int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, } static int xsc_eswitch_update_vport_trunk(struct xsc_eswitch *esw, - struct xsc_vport *evport, - unsigned long *old_trunk) + struct xsc_vport *evport, + unsigned long *old_trunk) { DECLARE_BITMAP(diff_vlan_bm, VLAN_N_VID); int err = 0; @@ -567,19 +545,14 @@ static int xsc_eswitch_update_vport_trunk(struct xsc_eswitch *esw, if (!bitmap_weight(diff_vlan_bm, VLAN_N_VID)) return err; - if (err) { - bitmap_copy(evport->info.vlan_trunk_8021q_bitmap, old_trunk, - VLAN_N_VID); - //esw_update_acl_trunk_bitmap(esw, evport->vport); - //esw_acl_egress_lgcy_setup(esw, evport); - //esw_acl_ingress_lgcy_setup(esw, evport); - } + if (err) + bitmap_copy(evport->info.vlan_trunk_8021q_bitmap, old_trunk, VLAN_N_VID); return err; } int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, - int vport, u16 start_vlan, u16 end_vlan) + int vport, u16 start_vlan, u16 end_vlan) { DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); @@ -598,8 +571,8 @@ int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, if (evport->info.vlan || evport->info.qos) { err = -EPERM; xsc_core_warn(esw->dev, - "VGT+ is not allowed when operating in VST mode vport(%d)\n", - vport); + "VGT+ is not allowed when operating in VST mode vport(%d)\n", + vport); goto unlock; } @@ -616,7 +589,7 @@ int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, } int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, - int vport, u16 start_vlan, u16 end_vlan) + int vport, u16 start_vlan, u16 end_vlan) { DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); @@ -658,8 +631,6 @@ int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, goto unlock; } evport->info.trusted = setting; -// if (evport->enabled) -// esw_vport_change_handle_locked(evport); unlock: mutex_unlock(&esw->state_lock); @@ -667,13 +638,13 @@ int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, } int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, - u32 max_rate, u32 min_rate) + u32 max_rate, u32 min_rate) { return 0; } int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, - u16 vport, struct ifla_vf_info *ivi) + u16 vport, struct ifla_vf_info *ivi) { struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); @@ -685,45 +656,39 @@ int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, mutex_lock(&esw->state_lock); ether_addr_copy(ivi->mac, evport->info.mac); - ivi->linkstate = evport->info.link_state; - ivi->vlan = evport->info.vlan; - ivi->qos = evport->info.qos; - ivi->vlan_proto = evport->info.vlan_proto; - ivi->spoofchk = evport->info.spoofchk; - ivi->trusted = evport->info.trusted; - ivi->min_tx_rate = evport->qos.min_rate; - ivi->max_tx_rate = evport->qos.max_rate; + mutex_unlock(&esw->state_lock); return 0; } +EXPORT_SYMBOL(xsc_eswitch_get_vport_config); int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, - u32 group_id) + u32 group_id) { return 0; } int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, - u32 max_rate) + u32 max_rate) { return 0; } int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, - u32 max_rate) + u32 max_rate) { return 0; } int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, - u32 min_rate) + u32 min_rate) { return 0; } int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, - bool other_vport, void *in, int inlen) + bool other_vport, void *in, int inlen) { return 0; } @@ -741,9 +706,8 @@ int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, } int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, - struct xsc_vport *vport, - struct xsc_vport_drop_stats *stats) + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats) { return 0; } - diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h index 50442a0c758706e2d4b646e998c5e00f90a06fb3..a0326058b4b77759581d23ac5c658e00e706c231 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -13,8 +12,8 @@ #include #include #include -#include -#include +#include "common/xsc_core.h" +#include "common/vport.h" struct xsc_vport_drop_stats { u64 rx_dropped; @@ -28,55 +27,56 @@ int xsc_eswitch_enable(struct xsc_eswitch *esw, int mode, int num_vfs); void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf); void xsc_eswitch_disable(struct xsc_eswitch *esw, bool clear_vf); -int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mod - , struct netlink_ext_ack *extack); +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mod, struct netlink_ext_ack *extack); int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); struct xsc_vport *__must_check xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num); +int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_info *ivi); int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]); + u16 vport, u8 mac[ETH_ALEN]); int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, - u16 vport, u8 *mac); + u16 vport, u8 *mac); int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, - u16 vlan, u8 qos, __be16 vlan_proto); + u16 vlan, u8 qos, __be16 vlan_proto); int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, - u16 vport, int link_state); + u16 vport, int link_state); int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, - u16 vport, bool spoofchk); + u16 vport, u8 spoofchk); int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, - u16 vport_num, bool setting); + u16 vport_num, bool setting); int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, - u32 max_rate, u32 min_rate); + u32 max_rate, u32 min_rate); int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, - u32 group_id); + u32 group_id); int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, - u32 max_rate); + u32 max_rate); int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, - u32 max_rate); + u32 max_rate); int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, - u32 min_rate); + u32 min_rate); int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, - int vport, u16 start_vlan, u16 end_vlan); + int vport, u16 start_vlan, u16 end_vlan); int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, - int vport, u16 start_vlan, u16 end_vlan); + int vport, u16 start_vlan, u16 end_vlan); int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, - bool other_vport, - void *in, int inlen); -int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, bool other_vport, - void *out, int outlen); + void *in, int inlen); +int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, + void *out, int outlen); int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, - u16 vport, - struct ifla_vf_stats *vf_stats); + u16 vport, + struct ifla_vf_stats *vf_stats); int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, - struct xsc_vport *vport, - struct xsc_vport_drop_stats *stats); + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats); #define xsc_esw_for_all_vports(esw, i, vport) \ for ((i) = XSC_VPORT_PF; \ (vport) = &(esw)->vports[(i)], \ - (i) <= (esw)->total_vports; (i)++) + (i) < (esw)->total_vports; (i)++) #define xsc_esw_for_each_vf_vport(esw, i, vport, nvfs) \ for ((i) = XSC_VPORT_FIRST_VF; \ @@ -95,7 +95,7 @@ static inline int xsc_eswitch_ecpf_idx(struct xsc_eswitch *esw) } static inline int xsc_eswitch_vport_num_to_index(struct xsc_eswitch *esw, - u16 vport_num) + u16 vport_num) { if (vport_num == XSC_VPORT_ECPF) { if (!xsc_ecpf_vport_exists(esw->dev) && @@ -112,7 +112,7 @@ static inline int xsc_eswitch_vport_num_to_index(struct xsc_eswitch *esw, } static inline u16 xsc_eswitch_index_to_vport_num(struct xsc_eswitch *esw, - int index) + int index) { if (index == xsc_eswitch_uplink_idx(esw)) return XSC_VPORT_UPLINK; @@ -131,5 +131,18 @@ static inline u16 xsc_eswitch_first_host_vport_num(struct xsc_core_device *dev) XSC_VPORT_PF : XSC_VPORT_FIRST_VF; } +static inline u8 xsc_get_eswitch_mode(struct xsc_core_device *dev) +{ + struct xsc_eswitch *esw = dev->priv.eswitch; + + return ESW_ALLOWED(esw) ? esw->mode : XSC_ESWITCH_NONE; +} + +static inline bool xsc_get_pp_bypass_res(struct xsc_core_device *dev) +{ + return (xsc_get_eswitch_mode(dev) == XSC_ESWITCH_OFFLOADS) || + (dev->device_id == XSC_MF_HOST_PF_DEV_ID); +} + #endif /* ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c index 34d7021d77ecfdacff4f28f5200a6ebebeaaa057..d45be76b8d0e6b65719100051bb41eb06689282f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c @@ -1,15 +1,66 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include +#include "common/driver.h" #include -#include +#include "eswitch.h" + +#ifdef RUN_WITH_PSV +int xsc_cmd_query_psv_funcid(struct xsc_core_device *dev, + struct xsc_caps *caps) +{ + struct xsc_cmd_query_hca_cap_mbox_out *out; + struct xsc_cmd_query_hca_cap_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_PSV_FUNCID); + in.hdr.opmod = cpu_to_be16(0x1); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + + /* accordence to xsc_core.h funcid[n] order must be: + * 0: pcie0_vf_begin + * 1: pcie0_vf_end + * 2: pcie0_pf_begin + * 3: pcie0_pf_end + * 4: pcie1_vf_begin + * 5: pcie1_vf_end + * 6: pcie1_pf_begin + * 7: pcie1_pf_end + */ + caps->funcid[0] = be16_to_cpu(out->hca_cap.funcid[0]);//pcie0_vf_begin + caps->funcid[1] = be16_to_cpu(out->hca_cap.funcid[1]);//pcie0_vf_end + caps->funcid[2] = be16_to_cpu(out->hca_cap.funcid[2]);//pcie0_pf_begin + caps->funcid[3] = be16_to_cpu(out->hca_cap.funcid[3]);//pcie0_pf_end + caps->funcid[4] = be16_to_cpu(out->hca_cap.funcid[4]);//pcie1_vf_begin + caps->funcid[5] = be16_to_cpu(out->hca_cap.funcid[5]);//pcie1_vf_end + caps->funcid[6] = be16_to_cpu(out->hca_cap.funcid[6]);//pcie1_pf_begin + caps->funcid[7] = be16_to_cpu(out->hca_cap.funcid[7]);//pcie1_pf_end + caps->funcid_valid = 1; + +out_out: + kfree(out); + + return err; +} +#endif int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, - struct xsc_caps *caps) + struct xsc_caps *caps) { struct xsc_cmd_query_hca_cap_mbox_out *out; struct xsc_cmd_query_hca_cap_mbox_in in; @@ -38,6 +89,37 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, } dev->glb_func_id = be32_to_cpu(out->hca_cap.glb_func_id); + + /* accordence to xsc_core.h funcid[n] order must be: + * 0: pcie0_vf_begin + * 1: pcie0_vf_end + * 2: pcie0_pf_begin + * 3: pcie0_pf_end + * 4: pcie1_vf_begin + * 5: pcie1_vf_end + * 6: pcie1_pf_begin + * 7: pcie1_pf_end + */ + caps->funcid[0] = be16_to_cpu(out->hca_cap.funcid[0]);//pcie0_vf_begin + caps->funcid[1] = be16_to_cpu(out->hca_cap.funcid[1]);//pcie0_vf_end + caps->funcid[2] = be16_to_cpu(out->hca_cap.funcid[2]);//pcie0_pf_begin + caps->funcid[3] = be16_to_cpu(out->hca_cap.funcid[3]);//pcie0_pf_end + caps->funcid[4] = be16_to_cpu(out->hca_cap.funcid[4]);//pcie1_vf_begin + caps->funcid[5] = be16_to_cpu(out->hca_cap.funcid[5]);//pcie1_vf_end + caps->funcid[6] = be16_to_cpu(out->hca_cap.funcid[6]);//pcie1_pf_begin + caps->funcid[7] = be16_to_cpu(out->hca_cap.funcid[7]);//pcie1_pf_end + caps->funcid_valid = 1; + if (xsc_core_is_pf(dev)) { + xsc_core_dbg(dev, "pcie0_vf_range=(%4u, %4u), pcie0_pf_begin=(%4u, %4u)\n", + caps->funcid[0], caps->funcid[1], + caps->funcid[2], caps->funcid[3]); + xsc_core_dbg(dev, "pcie1_vf_range=(%4u, %4u), pcie1_pf_begin=(%4u, %4u)\n", + caps->funcid[4], caps->funcid[5], + caps->funcid[6], caps->funcid[7]); + } + caps->nif_port_num = out->hca_cap.nif_port_num; + caps->hw_feature_flag = be32_to_cpu(out->hca_cap.hw_feature_flag); + caps->raweth_qp_id_base = be16_to_cpu(out->hca_cap.raweth_qp_id_base); caps->raweth_qp_id_end = be16_to_cpu(out->hca_cap.raweth_qp_id_end); caps->raweth_rss_qp_id_base = be16_to_cpu(out->hca_cap.raweth_rss_qp_id_base); @@ -57,7 +139,7 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, caps->mac_port = out->hca_cap.mac_port & 0xff; if (caps->num_ports > XSC_MAX_FW_PORTS) { xsc_core_err(dev, "device has %d ports while the driver supports max %d ports\n", - caps->num_ports, XSC_MAX_FW_PORTS); + caps->num_ports, XSC_MAX_FW_PORTS); err = -EINVAL; goto out_out; } @@ -121,7 +203,7 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, caps->qos = 1; caps->ets = 1; caps->dscp = 1; - caps->max_tc = PRI_NUM; + caps->max_tc = out->hca_cap.max_tc; caps->log_max_qp_depth = out->hca_cap.log_max_qp_depth & 0xff; dev->chip_ver_h = be32_to_cpu(out->hca_cap.chip_ver_h); @@ -139,18 +221,6 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, dev->regs.complete_reg = be64_to_cpu(out->hca_cap.complete_reg); dev->regs.event_db = be64_to_cpu(out->hca_cap.event_db); } - //memset(&ctx_in, 0, sizeof(ctx_in)); - //memset(&ctx_out, 0, sizeof(ctx_out)); - //ctx_in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS); - //err = xsc_cmd_exec(dev, &ctx_in, sizeof(ctx_in), - // &ctx_out, sizeof(ctx_out)); - //if (err) - // goto out_out; - - //if (ctx_out.hdr.status) - // err = xsc_cmd_status_to_err(&ctx_out.hdr); - - //caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey); out_out: kfree(out); @@ -158,80 +228,77 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, return err; } -int xsc_cmd_init_hca(struct xsc_core_device *dev) +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix) { - struct xsc_cmd_init_hca_mbox_in in; - struct xsc_cmd_init_hca_mbox_out out; + struct xsc_cmd_enable_hca_mbox_in in; + struct xsc_cmd_enable_hca_mbox_out out; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_INIT_HCA); - err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = xsc_cmd_status_to_err(&out.hdr); - - return err; -} + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_HCA); + in.pf = dev->pf; + in.pcie = g_xsc_pcie_no; + in.pf_id = dev->pf_id; -int xsc_cmd_teardown_hca(struct xsc_core_device *dev) -{ - struct xsc_cmd_teardown_hca_mbox_in in; - struct xsc_cmd_teardown_hca_mbox_out out; - int err; + in.vf_num = cpu_to_be16(vf_num); + in.max_msix_vec = cpu_to_be16(max_msix); + in.cpu_num = cpu_to_be16(num_online_cpus()); + in.pp_bypass = xsc_get_pp_bypass_res(dev); + in.esw_mode = xsc_get_eswitch_mode(dev); - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_TEARDOWN_HCA); err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = xsc_cmd_status_to_err(&out.hdr); + if (err || out.hdr.status) { + xsc_core_err(dev, + "cpu's msix vec(%u) not enough for all %u vfs, err=%d, status=%d\n", + max_msix, vf_num, err, out.hdr.status); + return -EINVAL; + } return err; } -int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_idx) +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num) { - struct xsc_cmd_enable_hca_mbox_in in; - struct xsc_cmd_enable_hca_mbox_out out; + struct xsc_cmd_disable_hca_mbox_in in; + struct xsc_cmd_disable_hca_mbox_out out; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_HCA); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_HCA); in.pf = dev->pf; - in.pcie = dev->pcie; + in.pcie = g_xsc_pcie_no; in.pf_id = dev->pf_id; - in.vf_id = cpu_to_be16(vf_idx); - err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; + in.vf_num = cpu_to_be16(vf_num); + in.pp_bypass = xsc_get_pp_bypass_res(dev); + in.esw_mode = xsc_get_eswitch_mode(dev); - if (out.hdr.status) - err = xsc_cmd_status_to_err(&out.hdr); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to disable hca, err=%d, status=%d\n", + err, out.hdr.status); + return -EINVAL; + } return err; } -int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_idx) +int xsc_cmd_modify_hca(struct xsc_core_device *dev) { - struct xsc_cmd_disable_hca_mbox_in in; - struct xsc_cmd_disable_hca_mbox_out out; + struct xsc_cmd_modify_hca_mbox_in in; + struct xsc_cmd_modify_hca_mbox_out out; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_HCA); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_HCA); in.pf = dev->pf; - in.pcie = dev->pcie; + in.pcie = g_xsc_pcie_no; in.pf_id = dev->pf_id; - in.vf_id = cpu_to_be16(vf_idx); + in.pp_bypass = xsc_get_pp_bypass_res(dev); + in.esw_mode = xsc_get_eswitch_mode(dev); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; @@ -250,7 +317,7 @@ int xsc_get_board_id(struct xsc_core_device *dev) int i = 0; xsc_core_info(dev, "board_sn=%s, current_board_num=%d\n", - dev->board_sn, xsc_board_num); + dev->board_sn, xsc_board_num); if (strnlen(dev->board_sn, XSC_BOARD_SN_LEN) == 0) return 0; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h index 0e6f193a6f8e95b47267a42d607d76e9f60ce7be..71c7098c3aeecadb8449b529cad68ac78cc70383 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -10,7 +9,7 @@ #include #include -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) #define round_down(x, y) ((x) & ~__round_mask(x, y)) diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c index 7a54bcf1f054017d885d1880452240b507bdbc70..4ea8f4e8eddf96e71e2473bca52a9e44be18e0c4 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c @@ -1,13 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include -#include -#include -#include +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" #include "xsc_reg_struct.h" #include "xsc_fw.h" @@ -97,13 +96,13 @@ static int xsc_cmd_exec_reg_mr(struct xsc_core_device *dev, void *in, void *out) u32 mem_size = be32_to_cpu(req->req.len); u32 pdn = be32_to_cpu(req->req.pdn); u32 key = be32_to_cpu(req->req.mkey); - int pages_num = be32_to_cpu(req->req.pa_num); + int pa_num = be32_to_cpu(req->req.pa_num); u32 *ptr; u64 reg_addr; int i; int reg_stride; - if (pages_num && alloc_mtt_entry(dev, pages_num, &mtt_base)) + if (pa_num && alloc_mtt_entry(dev, pa_num, &mtt_base)) return -EINVAL; mpt_idx = xsc_mkey_to_idx(key); @@ -120,7 +119,7 @@ static int xsc_cmd_exec_reg_mr(struct xsc_core_device *dev, void *in, void *out) get_xsc_res(dev)->mpt_entry[mpt_idx].va = va; get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base = mtt_base; - get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = pages_num; + get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = pa_num; ptr = (u32 *)&mpt_ent; reg_stride = REG_WIDTH_TO_STRIDE(MMC_MPT_TBL_MEM_WIDTH); @@ -131,11 +130,12 @@ static int xsc_cmd_exec_reg_mr(struct xsc_core_device *dev, void *in, void *out) xsc_get_iae_idx(dev)); xsc_core_dbg(dev, "reg mr, write mpt[%u]: va=%llx, mem_size=%u, pdn=%u\n", - mpt_idx, va, mpt_ent.mem_size, mpt_ent.pdn); + mpt_idx, va, mpt_ent.mem_size, mpt_ent.pdn); xsc_core_dbg(dev, "key=%u, mtt_base=%u, acc=%u, page_mode=%u, mem_map_en=%u\n", - mpt_ent.key, mpt_ent.mtt_base, mpt_ent.acc, mpt_ent.page_mode, mpt_ent.mem_map_en); + mpt_ent.key, mpt_ent.mtt_base, mpt_ent.acc, + mpt_ent.page_mode, mpt_ent.mem_map_en); - for (i = 0; i < pages_num; i++) { + for (i = 0; i < pa_num; i++) { u64 pa = req->req.pas[i]; pa = be64_to_cpu(pa); @@ -203,7 +203,7 @@ int xsc_dereg_mr(struct xsc_core_device *xdev, void *in, void *out) } static int xsc_cmd_exec_ioctl_flow(struct xsc_core_device *dev, - void *in, void *out) + void *in, void *out) { struct xsc_ioctl_mbox_in *req; struct xsc_ioctl_mbox_out *resp; @@ -233,7 +233,7 @@ static int xsc_cmd_exec_ioctl_flow(struct xsc_core_device *dev, switch (opmod) { case XSC_IOCTL_OP_ADD: - ret = xsc_flow_add(dev, table, length, tl+1); + ret = xsc_flow_add(dev, table, length, tl + 1); break; default: ret = -EINVAL; @@ -249,7 +249,7 @@ static int xsc_cmd_exec_ioctl_flow(struct xsc_core_device *dev, } int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, - int out_size, int func_id) + int out_size, int func_id) { int opcode, ret = 0; unsigned long flags; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h index 15fc8f1c706ec1471326c6ca229b2b769300cfa0..e4246cb59093acdaeb51d8d77bd11c241d8bec80 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c index cb04ca2351820dc3d29804e9c4c4e2a3f7a53bbd..838f65401586045e0c79bbb0e9fa739f627c4968 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include -#include +#include "common/xsc_core.h" void xsc_lock_init(struct xsc_lock *lock) { @@ -32,7 +31,7 @@ void xsc_mmiowb(void) void xsc_wmb(void) { - /* keep order */ + /* mem barrier for xsc operation */ wmb(); } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h index 922a4467c7059a96c0e0b624bf3950c98e06fcf8..82861d94c0d2aff7e12e409f21a37e95098ce76a 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h @@ -1,13 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef OSDEP_H #define OSDEP_H -#include +#include "common/xsc_core.h" #define xsc_print printk diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h index 9c59657139a6c8ba4f342801e51ad206d811991c..44a1b78489024369aad1e090de06435018374f07 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c index 6218f8b21dfee319d61ecde7fa11fcb29ca45d57..55cba1b038900b31278ecf8506a4ddf1b50241fd 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include -#include -#include +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" #include "xsc_flow.h" @@ -50,12 +49,12 @@ static inline void xsc_dma_wr_success_get(struct xsc_core_device *xdev, u32 *suc u32 *ptr = NULL; ptr = success; - IA_READ(xdev, CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR, ptr, (size/sizeof(u32))); + IA_READ(xdev, CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR, ptr, (size / sizeof(u32))); } int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, - const struct tdi_dma_write_key_bits *key, - const struct tdi_dma_write_action_bits *action) + const struct tdi_dma_write_key_bits *key, + const struct tdi_dma_write_action_bits *action) { u32 i = 0; u32 busy = 0; @@ -75,7 +74,6 @@ int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, dma_wr_num = ((action->entry_num + (XSC_DMA_WR_MAX - 1)) / XSC_DMA_WR_MAX); for (i = 0; i < dma_wr_num; i++) { - if ((action->entry_num % XSC_DMA_WR_MAX) && (i == (dma_wr_num - 1))) data_len = ((action->entry_num % XSC_DMA_WR_MAX) * XSC_DMA_LEN); else @@ -107,7 +105,7 @@ int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, memset(success, 0, sizeof(success)); xsc_dma_wr_success_get(xdev, (u32 *)&success, sizeof(success)); xsc_core_err(xdev, "DMA write time %d status 0x%lx%lx fail.\n", i, - (unsigned long)success[1], (unsigned long)success[0]); + (unsigned long)success[1], (unsigned long)success[0]); return -1; } } @@ -121,8 +119,8 @@ void xsc_dma_read_done_complete(void) } int xsc_flow_table_dma_read_add(struct xsc_core_device *xdev, - const struct tdi_dma_read_key_bits *key, - const struct tdi_dma_read_action_bits *action) + const struct tdi_dma_read_key_bits *key, + const struct tdi_dma_read_action_bits *action) { u32 busy = 0; u32 value = 0; @@ -171,7 +169,7 @@ int xsc_flow_table_dma_read_add(struct xsc_core_device *xdev, } int xsc_flow_add(struct xsc_core_device *xdev, - int table, int length, void *data) + int table, int length, void *data) { int ret = -EINVAL; struct xsc_flow_dma_write_add *dma_wr; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h index 1b3455e5a4fe6e8a12b1e4d43a96070daf6247a9..ec7c7a2c39597be0378975e823cf10d53c84e21f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -15,26 +14,26 @@ /* key */ struct tdi_dma_write_key_bits { - u8 host_id:1; - u16 func_id:11; + uint8_t host_id:1; + uint16_t func_id:11; } __packed; struct tdi_dma_read_key_bits { - u16 tbl_start_addr:16; - u8 tbl_id:7; - u8 host_id:1; - u16 func_id:11; + uint16_t tbl_start_addr:16; + uint8_t tbl_id:7; + uint8_t host_id:1; + uint16_t func_id:11; } __packed; /* action */ struct tdi_dma_write_action_bits { - u32 entry_num:32; - u64 data_addr:64; + uint32_t entry_num:32; + uint64_t data_addr:64; } __packed; struct tdi_dma_read_action_bits { - u16 burst_num:16; - u64 data_addr:64; + uint16_t burst_num:16; + uint64_t data_addr:64; } __packed; /* ioctl data - add */ @@ -60,7 +59,7 @@ struct xsc_logic_in_port_cfg_reg { }; int xsc_flow_add(struct xsc_core_device *xdev, - int table, int length, void *data); + int table, int length, void *data); void xsc_dma_read_done_complete(void); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h index 2c2a0de06cb7ea4d2382ef0bd7f7007baf2d7b8f..3f58d01cbfe92ce99a39e60f4a84b9e8b26edc11 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -9,8 +8,8 @@ #include "osdep.h" -#include -#include +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" struct xsc_free_list { struct list_head list; @@ -53,10 +52,10 @@ int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max); int xsc_dealloc_res(u32 *res, u64 *res_tbl); int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, - u32 base_align); + u32 base_align); int release_to_free_list(struct xsc_free_list_wl *list, u32 release, - u32 num_released); + u32 num_released); int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c index 59b75c3edb3401b79844ad193ca088d207bad495..f037cee206bbd00be19ac21aa0a21e48bb158a24 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c @@ -1,10 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include +#include "common/xsc_core.h" void *xsc_malloc(unsigned int size) { diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h index fa3f51f49d0f433d2a426710590c08cdedb02773..8eab3e6803a3272e5be79218e920a10018a00e61 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c index a3ba44e9fd323460597e9997fcec957d5a156e20..419838e0f54e6797c6890a16c24a16a1b74ae958 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -88,8 +87,8 @@ int xsc_alloc_continuous_msix_vec(struct xsc_core_device *dev, u16 vec_num) } } xsc_core_warn(dev, - "failed to alloc msix vec, vec_range=%d~%d, vec_num=%d\n", - start, end, vec_num); + "failed to alloc msix vec, vec_range=%d~%d, vec_num=%d\n", + start, end, vec_num); return -EINVAL; } @@ -107,10 +106,10 @@ int xsc_free_continuous_msix_vec(struct xsc_core_device *dev) end = xres->msix_vec_end; if (vec_base < start || vec_base > end || - (vec_base + vec_num - 1) > end) { + (vec_base + vec_num - 1) > end) { xsc_core_warn(dev, - "failed to free msix vec, vec_base=%d, vec_num=%d, range=%d~%d\n", - vec_base, vec_num, start, end); + "failed to free msix vec, vec_base=%d, vec_num=%d, range=%d~%d\n", + vec_base, vec_num, start, end); return -EINVAL; } @@ -188,7 +187,6 @@ int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max) clear_bit(bit_num, (unsigned long *)res_tbl); *res = bit_num; return 0; - } int xsc_dealloc_res(u32 *res, u64 *res_tbl) @@ -201,7 +199,7 @@ int xsc_dealloc_res(u32 *res, u64 *res_tbl) } int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, - u32 base_align) + u32 base_align) { struct xsc_free_list *free_node; struct xsc_free_list *next; @@ -224,7 +222,7 @@ int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc new_node->start = free_node->start; new_node->end = start - 1; __list_add(&new_node->list, free_node->list.prev, - &free_node->list); + &free_node->list); } *alloc = start; free_node->start = start + required; @@ -248,8 +246,8 @@ int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc return 0; } -int release_to_free_list(struct xsc_free_list_wl *list, u32 release, - u32 num_released) +int release_to_free_list(struct xsc_free_list_wl *list, uint32_t release, + uint32_t num_released) { struct xsc_free_list *free_node = NULL; struct xsc_free_list *next, *prev; @@ -306,7 +304,7 @@ int release_to_free_list(struct xsc_free_list_wl *list, u32 release, new_node->start = release; new_node->end = release + num_released - 1; __list_add(&new_node->list, free_node->list.prev, - &free_node->list); + &free_node->list); } ret: xsc_release_lock(&list->lock, flags); @@ -339,7 +337,7 @@ int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base) int ret = alloc_from_free_list(&xres->mtt_list, pages_num, mtt_base, 1); xsc_core_dbg(dev, "alloc mtt for %d pages start from %d\n", - pages_num, *mtt_base); + pages_num, *mtt_base); return ret; } @@ -350,7 +348,7 @@ int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base) int ret = release_to_free_list(&xres->mtt_list, mtt_base, pages_num); xsc_core_dbg(dev, "mtt release %d pages start from %d\n", - pages_num, mtt_base); + pages_num, mtt_base); return ret; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_tbm.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_tbm.h index c513d572b25b7a6f6d9afa1cd69fa7d1d9841eb1..818cb056c82376f0e9ab22e153a4002a6f202bb6 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_tbm.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_tbm.h @@ -1,24 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_TBM_H #define XSC_TBM_H -#include +#include "common/xsc_core.h" #define XSC_LAG_NUM_MAX 0x30 -#ifndef PCIE0_PF_NUM -#define PCIE0_PF_NUM 2 -#endif - -#ifndef PCIE1_PF_NUM -#define PCIE1_PF_NUM 0 -#endif - enum { XSC_VLAN_MODE_NONE = 0, XSC_VLAN_MODE_TRUNK, @@ -71,15 +62,5 @@ int xsc_tbm_vlan_config(struct xsc_core_device *dev, struct xsc_logic_port_info *info, struct xsc_vlan_config *config); -static inline bool xsc_vf_pp_init_enable(const struct xsc_core_device *dev) -{ - return dev->vf_pp_init; -} - -static inline void xsc_set_vf_pp_status(struct xsc_core_device *dev, bool status) -{ - dev->vf_pp_init = status; -} - #endif /* XSC_TBM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c index 2177d4b1948a4a751c50fc4a118681ab1298058b..3eb381274538377dda96a7b33304822217aab4ec 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c @@ -1,10 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include +#include "common/xsc_core.h" static void xsc_add_device(struct xsc_interface *intf, struct xsc_priv *priv) { @@ -215,7 +214,7 @@ void xsc_unregister_device(struct xsc_core_device *dev) struct xsc_interface *intf; mutex_lock(&xsc_intf_mutex); - list_for_each_entry(intf, &intf_list, list) + list_for_each_entry_reverse(intf, &intf_list, list) xsc_remove_device(intf, priv); list_del(&priv->dev_list); mutex_unlock(&xsc_intf_mutex); @@ -268,8 +267,8 @@ void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol) EXPORT_SYMBOL(xsc_remove_dev_by_protocol); void xsc_reload_interfaces(struct xsc_core_device *dev, - int protocol1, int protocol2, - bool valid1, bool valid2) + int protocol1, int protocol2, + bool valid1, bool valid2) { bool reload1; bool reload2; @@ -323,7 +322,7 @@ struct xsc_core_device *xsc_get_next_phys_dev(struct xsc_core_device *dev) if (!xsc_core_is_pf(tmp_dev)) continue; - if ((dev != tmp_dev) && (xsc_gen_pci_id(tmp_dev) == pci_id)) { + if (dev != tmp_dev && (xsc_gen_pci_id(tmp_dev) == pci_id)) { res = tmp_dev; break; } @@ -332,4 +331,3 @@ struct xsc_core_device *xsc_get_next_phys_dev(struct xsc_core_device *dev) return res; } EXPORT_SYMBOL(xsc_get_next_phys_dev); - diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/mad.c b/drivers/net/ethernet/yunsilicon/xsc/pci/mad.c index 4ec070c13d852e7dd94aae9eb4c8145c0d785d5f..e920929e078eb45d0701c8694f28252aa27412fc 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/mad.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/mad.c @@ -1,15 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include -#include +#include "common/driver.h" int xsc_core_mad_ifc(struct xsc_core_device *xdev, void *inb, void *outb, - u16 opmod, int port) + u16 opmod, int port) { struct xsc_mad_ifc_mbox_in *in = NULL; struct xsc_mad_ifc_mbox_out *out = NULL; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c index dbad66b4285c5f4c80e5a3851b7c3f4c2ab1468c..3fcf209a4ef017de04389dff15977d17d209cdf3 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c @@ -1,15 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include -#include -#include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" #ifdef CONFIG_XSC_ESWITCH #include "devlink.h" #include "eswitch.h" @@ -19,50 +18,57 @@ #include "xsc_pci_ctrl.h" #ifdef RUN_WITH_PSV -#include "../../../../xscale-fw/include/xscale-fw.h" +#include "xscale-fw.h" #endif unsigned int xsc_debug_mask; module_param_named(debug_mask, xsc_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, - "debug mask: 1=dump cmd data, 2=dump cmd exec time, 3=both. Default=0"); + "debug mask: 1=dump cmd data, 2=dump cmd exec time, 3=both. Default=0"); -bool hw_init = 1; -int pcie_no; -module_param_named(hw_init, hw_init, bool, 0644); -module_param_named(pcie_no, pcie_no, int, 0644); +unsigned int xsc_log_level = XSC_LOG_LEVEL_WARN; +module_param_named(log_level, xsc_log_level, uint, 0644); +MODULE_PARM_DESC(log_level, + "lowest log level to print: 0=debug, 1=info, 2=warning, 3=error. Default=1"); +EXPORT_SYMBOL(xsc_log_level); static bool probe_vf = 1; module_param_named(probe_vf, probe_vf, bool, 0644); MODULE_PARM_DESC(probe_vf, "probe VFs or not, 0 = not probe, 1 = probe. Default = 1"); static bool xsc_hw_reset; +u8 g_xsc_pcie_no = XSC_PCIE_NO_UNSET; +EXPORT_SYMBOL(g_xsc_pcie_no); #define DRIVER_NAME "xsc_pci" #define DRIVER_VERSION "0.1.0" -#define XSC_PCI_VENDOR_ID_OBSOLETE 0x1172 -#define XSC_PCI_VENDOR_ID 0x1f67 -#define XSC_PF1_DEVICE_ID 0x0001 -#define XSC_PF1_VF_DEVICE_ID 0x0002 - -#ifdef USE_VIRTIO -static const struct pci_device_id xsc_pci_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) }, - { 0 } -}; -#else static const struct pci_device_id xsc_pci_id_table[] = { - { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PF1_DEVICE_ID) }, - { PCI_DEVICE(XSC_PCI_VENDOR_ID_OBSOLETE, XSC_PF1_DEVICE_ID) }, - { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PF1_VF_DEVICE_ID), + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PF1_DEVICE_ID_OBSOLETE) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID_OBSOLETE, XSC_PF1_DEVICE_ID_OBSOLETE) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PF1_VF_DEVICE_ID_OBSOLETE), .driver_data = XSC_PCI_DEV_IS_VF}, /* PF1's VF */ + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_SOC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_SOC_PF_DEV_ID) }, { 0 } }; -#endif /* !USE_VIRTIO */ + MODULE_DEVICE_TABLE(pci, xsc_pci_id_table); -#ifndef RUN_WITH_PSV +#define IS_VIRT_FUNCTION(id) ((id)->driver_data == XSC_PCI_DEV_IS_VF) + static bool need_write_reg_directly(void *in) { struct xsc_inbox_hdr *hdr; @@ -83,7 +89,7 @@ static bool need_write_reg_directly(void *in) return false; } -#else +#ifdef RUN_WITH_PSV static u8 phyport_num; static u32 xsc_get_glb_func_id(struct xsc_core_device *dev) @@ -93,22 +99,22 @@ static u32 xsc_get_glb_func_id(struct xsc_core_device *dev) struct xsc_core_device *pf_xdev; if (xsc_core_is_pf(dev)) { - if (xsc_get_pcie_no() == 0) - return XSC_PCIE0_PF_N_FUNC_ID(phyport_num++); + if (g_xsc_pcie_no == XSC_PCIE_NO_HOST) + return pf_index_to_pcie0_funcid(&dev->caps, phyport_num++); else - return XSC_PCIE1_PF_N_FUNC_ID(phyport_num++); + return pf_index_to_pcie1_funcid(&dev->caps, phyport_num++); } else { pf_xdev = pci_get_drvdata(pdev->physfn); vf_bdf = (pdev->bus->number << 8) | pdev->devfn; - if (xsc_get_pcie_no() == 0 && vf_bdf >= pf_xdev->priv.sriov.vf_bdf_base) { + if (g_xsc_pcie_no == XSC_PCIE_NO_HOST && + vf_bdf >= pf_xdev->priv.sriov.vf_bdf_base) { vf_id = vf_bdf - pf_xdev->priv.sriov.vf_bdf_base; if (pf_xdev->pf_id == 0) - return XSC_PCIE0_PF0_VF_N_FUNC_ID(vf_id); + return vf_index_to_pcie0_funcid(&dev->caps, vf_id, 0); else - return XSC_PCIE0_PF1_VF_N_FUNC_ID(vf_id); + return vf_index_to_pcie0_funcid(&dev->caps, vf_id, 1); } } - return 0; } #endif @@ -116,42 +122,52 @@ static u32 xsc_get_glb_func_id(struct xsc_core_device *dev) int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, void *out, int out_size) { -#ifndef RUN_WITH_PSV - int ret; - if (need_write_reg_directly(in)) - ret = xsc_cmd_write_reg_directly(dev, in, in_size, out, out_size, dev->glb_func_id); - else - ret = _xsc_cmd_exec(dev, in, in_size, out, out_size); - - return ret; + return xsc_cmd_write_reg_directly(dev, in, in_size, out, + out_size, dev->glb_func_id); +#ifndef RUN_WITH_PSV + return _xsc_cmd_exec(dev, in, in_size, out, out_size); #else return xsc_cmd_exec_psv(dev, in, in_size, out, out_size, dev->glb_func_id); #endif } EXPORT_SYMBOL(xsc_cmd_exec); -int xsc_get_pcie_no(void) -{ +u8 xsc_devid_to_pcie_no(int dev_id) +{ + u8 pcie_no; + + switch (dev_id) { + case XSC_MC_PF_DEV_ID: + case XSC_MC_VF_DEV_ID: + case XSC_MF_HOST_PF_DEV_ID: + case XSC_MF_HOST_VF_DEV_ID: + case XSC_MS_PF_DEV_ID: + case XSC_MS_VF_DEV_ID: + case XSC_MV_HOST_PF_DEV_ID: + case XSC_MV_HOST_VF_DEV_ID: + pcie_no = 0; + break; + case XSC_MF_SOC_PF_DEV_ID: + case XSC_MV_SOC_PF_DEV_ID: + pcie_no = 1; + break; + default: + pcie_no = 0; + break; + } return pcie_no; } -EXPORT_SYMBOL(xsc_get_pcie_no); static int set_dma_caps(struct pci_dev *pdev) { int err = 0; err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (err) { + if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - } else { - err = dma_set_coherent_mask(&pdev->dev, -#ifdef USE_VIRTIO - DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT)); -#else - DMA_BIT_MASK(64)); -#endif /* !USE_VIRTIO */ - } + else + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); @@ -163,76 +179,82 @@ static void xsc_pci_get_bdf(struct xsc_core_device *dev) { struct pci_dev *pci_dev = dev->pdev; - dev->bus_id = pci_dev->bus->number; - dev->dev_id = PCI_SLOT(pci_dev->devfn); + dev->bus_num = pci_dev->bus->number; + dev->dev_num = PCI_SLOT(pci_dev->devfn); dev->func_id = PCI_FUNC(pci_dev->devfn); + dev->device_id = pci_dev->device; - xsc_core_info(dev, "%s: bdf=%04x.%04x.%04x\n", - __func__, dev->bus_id, dev->dev_id, dev->func_id); + xsc_core_info(dev, "%s: bdf=%04x.%04x.%04x, device_id=0x%04x\n", + __func__, dev->bus_num, dev->dev_num, dev->func_id, dev->device_id); } -static void xsc_pci_calc_pf_port(struct xsc_core_device *dev) +static int xsc_pci_calc_pf_port(struct xsc_core_device *dev) { - dev->pf = xsc_cal_pf_vf_id(dev->glb_func_id, &dev->pf_id, - &dev->pcie, &dev->vf_id) ? 1 : 0; - if (dev->pcie == 0) { - dev->pcie_port = XSC_PHY_PORT_PCIE_N(0); - dev->logic_port = XSC_PCIE0_PF_N_LOGIC_PORT(dev->pf_id); + u8 pcie_no; + if (!funcid_to_pf_vf_index(&dev->caps, dev->glb_func_id, + &dev->pf, &dev->pf_id, &pcie_no, &dev->vf_id)) + return -EINVAL; + + if (pcie_no == 0) { + dev->pcie_port = XSC_PHY_PORT_PCIE_N(0); + dev->logic_port = pf_index_to_pcie0_xscport(&dev->caps, dev->pf_id); } else { dev->pcie_port = XSC_PHY_PORT_PCIE_N(1); - dev->logic_port = XSC_PCIE1_PF_N_LOGIC_PORT(dev->pf_id); + dev->logic_port = pf_index_to_pcie1_xscport(&dev->caps, dev->pf_id); } dev->pf_logic_port = dev->logic_port; - dev->mac_logic_port = dev->mac_port = dev->caps.mac_port; + dev->mac_port = dev->caps.mac_port; + dev->mac_logic_port = dev->mac_port; xsc_core_dbg(dev, - "glb_func=%d, pcie_port=%d, pf_logic_port=%d, mac_port=%d, board_id=%d\n", - dev->glb_func_id, dev->pcie_port, dev->logic_port, - dev->mac_port, dev->board_id); + "glb_func=%d, pcie_port=%d, pf_logic_port=%d, mac_port=%d, board_id=%d\n", + dev->glb_func_id, dev->pcie_port, dev->logic_port, + dev->mac_port, dev->board_id); + + return 0; } void xsc_pci_get_vf_info(struct xsc_core_device *dev, struct xsc_vf_info *info) { + if (!dev || !info || !check_caps_funcid_valid(&dev->caps)) + xsc_core_err(dev, "%s input err\n", __func__); + if (info->phy_port == 0) { - if (info->pf_id == 0) { - info->logic_port = XSC_PCIE0_PF0_VF_N_LOGIC_PORT(info->vf_id); - info->func_id = XSC_PCIE0_PF0_VF_N_FUNC_ID(info->vf_id); - } else { - info->logic_port = XSC_PCIE0_PF1_VF_N_LOGIC_PORT(info->vf_id); - info->func_id = XSC_PCIE0_PF1_VF_N_FUNC_ID(info->vf_id); - } + info->logic_port = vf_index_to_pcie0_xscport(&dev->caps, info->vf_id, info->pf_id); + info->func_id = vf_index_to_pcie0_funcid(&dev->caps, info->vf_id, info->pf_id); } else { - if (info->pf_id == 0) { - info->logic_port = XSC_PCIE1_PF0_VF_N_LOGIC_PORT(info->vf_id); - info->func_id = XSC_PCIE1_PF0_VF_N_FUNC_ID(info->vf_id); - } else { - info->logic_port = XSC_PCIE1_PF1_VF_N_LOGIC_PORT(info->vf_id); - info->func_id = XSC_PCIE1_PF1_VF_N_FUNC_ID(info->vf_id); - } + info->logic_port = vf_index_to_pcie1_xscport(&dev->caps, info->vf_id, info->pf_id); + info->func_id = vf_index_to_pcie1_funcid(&dev->caps, info->vf_id, info->pf_id); } } EXPORT_SYMBOL(xsc_pci_get_vf_info); -static void xsc_pci_calc_vf_port(struct xsc_core_device *dev) +static int xsc_pci_calc_vf_port(struct xsc_core_device *dev) { - dev->pf = xsc_cal_pf_vf_id(dev->glb_func_id, &dev->pf_id, &dev->pcie, &dev->vf_id); - if (unlikely(dev->pcie == 1)) - return; + u8 pcie_no; - if (dev->pf_id == 0) - dev->logic_port = XSC_PCIE0_PF0_VF_N_LOGIC_PORT(dev->vf_id); - else - dev->logic_port = XSC_PCIE0_PF1_VF_N_LOGIC_PORT(dev->vf_id); + if (!funcid_to_pf_vf_index(&dev->caps, dev->glb_func_id, + &dev->pf, &dev->pf_id, &pcie_no, &dev->vf_id)) + return -EINVAL; + if (unlikely(pcie_no == 1)) + return -EINVAL; + + dev->logic_port = vf_index_to_pcie0_xscport(&dev->caps, dev->vf_id, dev->pf_id); dev->pcie_port = XSC_PHY_PORT_PCIE_N(0); - dev->pf_logic_port = XSC_PCIE0_PF_N_LOGIC_PORT(dev->pf_id); - dev->mac_logic_port = dev->mac_port = dev->caps.mac_port; + dev->pf_logic_port = pf_index_to_pcie0_xscport(&dev->caps, dev->pf_id); + dev->mac_port = dev->caps.mac_port; + dev->mac_logic_port = dev->mac_port; xsc_core_dbg(dev, - "vf%d_logic_port=%d, glb_func_id=%d, pf%d_logic_port=%d, mac_logic_port=%d, board_id=%d\n", - dev->vf_id, dev->logic_port, dev->glb_func_id, dev->pf_id, - dev->pf_logic_port, dev->mac_logic_port, dev->board_id); + "vf%d_logic_port=%d, glb_func_id=%d, pf%d_logic_port=%d\n", + dev->vf_id, dev->logic_port, dev->glb_func_id, dev->pf_id, + dev->pf_logic_port); + xsc_core_dbg(dev, "mac_logic_port=%d, board_id=%d\n", + dev->mac_logic_port, dev->board_id); + + return 0; } static int xsc_pci_enable_device(struct xsc_core_device *dev) @@ -347,18 +369,10 @@ int xsc_dev_init(struct xsc_core_device *dev) goto err_debugfs_init; } - err = xsc_pagealloc_init(dev); - if (err) { - xsc_core_err(dev, "xsc_pagealloc_init failed %d\n", err); - goto err_pagealloc_init; - } - xsc_init_reg_addr(dev); return 0; -err_pagealloc_init: - xsc_debugfs_fini(dev); err_debugfs_init: xsc_dev_res_cleanup(dev); err_res_init: @@ -367,9 +381,6 @@ int xsc_dev_init(struct xsc_core_device *dev) void xsc_dev_cleanup(struct xsc_core_device *dev) { -// xsc_pagealloc_stop(dev); -// xsc_reclaim_startup_pages(dev); - xsc_pagealloc_cleanup(dev); // iounmap(dev->iseg); xsc_debugfs_fini(dev); xsc_dev_res_cleanup(dev); @@ -383,13 +394,9 @@ static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id void __iomem *bar_base = NULL; char name[16]; -#ifdef USE_VIRTIO - snprintf(name, sizeof(name), "%s", "xsc-virtio"); -#else snprintf(name, sizeof(name), "%s", "xsc-pci"); if (id->vendor == XSC_PCI_VENDOR_ID_OBSOLETE) bar_num = 2; -#endif mutex_init(&dev->pci_status_mutex); dev->priv.numa_node = dev_to_node(&pdev->dev); @@ -406,7 +413,7 @@ static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id err = pci_request_region(pdev, bar_num, name); if (err) { xsc_core_err(dev, "failed to request %s pci_region=%d: err=%d\n", - name, bar_num, err); + name, bar_num, err); goto err_disable; } @@ -424,7 +431,7 @@ static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id goto err_clr_master; } else { xsc_core_info(dev, "ioremap bar%d base address=0x%llx\n", bar_num, - (unsigned long long)bar_base); + (unsigned long long)bar_base); } err = pci_save_state(pdev); @@ -434,11 +441,7 @@ static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id } dev->bar_num = bar_num; -#ifdef USE_VIRTIO - dev->bar0 = bar_base; -#else dev->bar2 = bar_base; -#endif /* !USE_VIRTIO */ xsc_pci_get_bdf(dev); xsc_init_reg_addr(dev); @@ -461,12 +464,12 @@ static void xsc_pci_fini(struct xsc_core_device *dev) struct pci_dev *pdev = dev->pdev; void __iomem *bar_base = NULL; -#ifdef USE_VIRTIO - bar_base = dev->bar0; -#else - bar_base = dev->bar2; +#ifdef RUN_WITH_PSV + xsc_stop_fw(dev); #endif + bar_base = dev->bar2; + if (bar_base) pci_iounmap(pdev, bar_base); pci_clear_master(pdev); @@ -501,7 +504,7 @@ static int xsc_check_cmdq_version(struct xsc_core_device *dev) if (be16_to_cpu(out->cmdq_ver) != CMDQ_VERSION) { xsc_core_err(dev, "cmdq version check failed, expecting version %d, actual version %d\n", - CMDQ_VERSION, be16_to_cpu(out->cmdq_ver)); + CMDQ_VERSION, be16_to_cpu(out->cmdq_ver)); err = -EINVAL; goto out_out; } @@ -550,13 +553,20 @@ static int xsc_init_once(struct xsc_core_device *dev) { int err; +#ifndef COSIM err = xsc_cmd_init(dev); if (err) { xsc_core_err(dev, "Failed initializing command interface, aborting\n"); goto err_cmd_init; } - +#endif #ifdef RUN_WITH_PSV + err = xsc_cmd_query_psv_funcid(dev, &dev->caps); + if (err) { + xsc_core_err(dev, "Failed to query psv funcid, err=%d\n", err); + goto err_cmdq_ver_chk; + } + dev->glb_func_id = xsc_get_glb_func_id(dev); #endif @@ -584,14 +594,29 @@ static int xsc_init_once(struct xsc_core_device *dev) goto err_cmdq_ver_chk; } if (xsc_core_is_pf(dev)) { - xsc_pci_calc_pf_port(dev); + err = xsc_pci_calc_pf_port(dev); + if (err) { + xsc_core_err(dev, "Failed to xsc_pci_calc_pf_port\n"); + goto err_cmdq_ver_chk; + } err = xsc_create_res(dev); if (err) { xsc_core_err(dev, "Failed to create resource, err=%d\n", err); goto err_cmdq_ver_chk; } } else { - xsc_pci_calc_vf_port(dev); + err = xsc_pci_calc_vf_port(dev); + if (err) { + xsc_core_err(dev, "Failed to xsc_pci_calc_vf_port\n"); + goto err_cmdq_ver_chk; + } + if (!dev->pdev->physfn) { + err = xsc_create_res(dev); + if (err) { + xsc_core_err(dev, "Failed to create resource, err=%d\n", err); + goto err_cmdq_ver_chk; + } + } } xsc_init_cq_table(dev); @@ -614,21 +639,23 @@ static int xsc_init_once(struct xsc_core_device *dev) #endif return 0; +#ifdef CONFIG_XSC_SRIOV #ifdef CONFIG_XSC_ESWITCH err_eswitch_init: xsc_sriov_cleanup(dev); #endif -#ifdef CONFIG_XSC_SRIOV err_sriov_init: -#endif xsc_eq_cleanup(dev); xsc_cleanup_qp_table(dev); xsc_cleanup_cq_table(dev); if (xsc_core_is_pf(dev)) xsc_destroy_res(dev); +#endif err_cmdq_ver_chk: +#ifndef COSIM xsc_cmd_cleanup(dev); err_cmd_init: +#endif return err; } @@ -645,7 +672,9 @@ static int xsc_cleanup_once(struct xsc_core_device *dev) xsc_cleanup_cq_table(dev); if (xsc_core_is_pf(dev)) xsc_destroy_res(dev); +#ifndef COSIM xsc_cmd_cleanup(dev); +#endif return 0; } @@ -653,8 +682,6 @@ static int xsc_load(struct xsc_core_device *dev) { int err; - xsc_pagealloc_start(dev); - err = xsc_irq_eq_create(dev); if (err) { xsc_core_err(dev, "xsc_irq_eq_create failed %d\n", err); @@ -668,7 +695,6 @@ static int xsc_load(struct xsc_core_device *dev) goto err_sriov; } #endif - return 0; #ifdef CONFIG_XSC_SRIOV @@ -676,7 +702,6 @@ static int xsc_load(struct xsc_core_device *dev) xsc_irq_eq_destroy(dev); #endif err_irq_eq_create: - xsc_pagealloc_stop(dev); return err; } @@ -687,7 +712,6 @@ static int xsc_unload(struct xsc_core_device *dev) xsc_sriov_detach(dev); #endif xsc_irq_eq_destroy(dev); - xsc_pagealloc_stop(dev); return 0; } @@ -785,7 +809,7 @@ int xsc_unload_one(struct xsc_core_device *dev, bool cleanup) mutex_lock(&dev->intf_state_mutex); if (!test_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state)) { xsc_core_warn(dev, "%s: interface is down, NOP\n", - __func__); + __func__); if (cleanup) xsc_cleanup_once(dev); goto out; @@ -814,7 +838,6 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, int err; #ifdef CONFIG_XSC_ESWITCH struct devlink *devlink; - devlink = xsc_devlink_alloc(); if (!devlink) { dev_err(&pci_dev->dev, "devlink alloc failed\n"); @@ -823,22 +846,24 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, xdev = devlink_priv(devlink); #else /* allocate core structure and fill it out */ - xdev = kzalloc(sizeof(struct xsc_core_device), GFP_KERNEL); + xdev = kzalloc(sizeof(*xdev), GFP_KERNEL); if (!xdev) return -ENOMEM; #endif xdev->pdev = pci_dev; xdev->device = &pci_dev->dev; + if (g_xsc_pcie_no == XSC_PCIE_NO_UNSET) + g_xsc_pcie_no = xsc_devid_to_pcie_no(pci_dev->device); priv = &xdev->priv; - xdev->coredev_type = pci_dev->is_virtfn ? + xdev->coredev_type = (IS_VIRT_FUNCTION(id)) ? XSC_COREDEV_VF : XSC_COREDEV_PF; xsc_core_info(xdev, "%s: dev_type=%d is_vf=%d\n", - __func__, xdev->coredev_type, pci_dev->is_virtfn); + __func__, xdev->coredev_type, pci_dev->is_virtfn); #ifdef CONFIG_XSC_SRIOV priv->sriov.probe_vf = probe_vf; - if (pci_dev->is_virtfn && !probe_vf) { + if ((IS_VIRT_FUNCTION(id)) && !probe_vf) { xsc_core_err(xdev, "VFs are not binded to xsc driver\n"); return 0; } @@ -853,13 +878,10 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, } #ifdef RUN_WITH_PSV - if (!xsc_hw_reset) { - xsc_hw_reset = true; - err = xsc_start_fw(xdev); - if (err) { - xsc_core_err(xdev, "PSV: failed to start fw.\n"); - goto err_start_fw; - } + err = xsc_start_fw(xdev); + if (err) { + xsc_core_err(xdev, "PSV: failed to start fw.\n"); + goto err_start_fw; } #endif @@ -871,7 +893,7 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, if (xsc_fpga_not_supported(xdev)) { err = -EOPNOTSUPP; - goto err_dev_init; + goto err_version_check; } err = xsc_load_one(xdev, true); @@ -883,9 +905,11 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, return 0; err_load: +err_version_check: xsc_dev_cleanup(xdev); err_dev_init: #ifdef RUN_WITH_PSV + xsc_stop_fw(xdev); err_start_fw: #endif xsc_pci_fini(xdev); @@ -935,6 +959,8 @@ static int __init xsc_init(void) xsc_register_debugfs(); + qpts_init(); + err = xsc_port_ctrl_init(); if (err) { pr_err("failed to initialize port control\n"); @@ -954,6 +980,7 @@ static int __init xsc_init(void) goto err_register; } + xsc_init_delayed_release(); return 0; err_register: @@ -962,20 +989,22 @@ static int __init xsc_init(void) xsc_port_ctrl_fini(); err_port_ctrl: xsc_unregister_debugfs(); + qpts_fini(); return err; } static void __exit xsc_fini(void) { + xsc_stop_delayed_release(); pci_unregister_driver(&xsc_pci_driver); xsc_pci_ctrl_fini(); xsc_port_ctrl_fini(); xsc_unregister_debugfs(); + qpts_fini(); } module_init(xsc_init); module_exit(xsc_fini); -MODULE_DESCRIPTION("Yunsilicon XSC PCI driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.0"); + diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c index 64317142fa9a3e0317f3b23ddd4070284bafee82..a8a8d44c8a8bad0b493f936926f1764db3d06a02 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c @@ -1,13 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include -#include -#include +#include "common/driver.h" +#include "common/xsc_cmd.h" int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) { @@ -27,12 +26,12 @@ int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) err = xsc_create_mkey(dev, &in, &out); #endif if (err) { - xsc_core_dbg(dev, "cmd exec faile %d\n", err); + xsc_core_err(dev, "cmd exec faile %d\n", err); return err; } if (out.hdr.status) { - xsc_core_dbg(dev, "status %d\n", out.hdr.status); + xsc_core_err(dev, "status %d\n", out.hdr.status); return xsc_cmd_status_to_err(&out.hdr); } @@ -69,8 +68,133 @@ int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) } EXPORT_SYMBOL(xsc_core_destroy_mkey); +#ifdef REG_MR_VIA_CMDQ +int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 *mtt_base) +{ + struct xsc_set_mpt_mbox_in *in; + struct xsc_set_mpt_mbox_out out; + struct xsc_register_mr_request *req = &in_cmd->req; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + return err; + } + in->mpt_item.pdn = req->pdn; + in->mpt_item.pa_num = req->pa_num; + in->mpt_item.len = req->len; + in->mpt_item.mkey = req->mkey; + in->mpt_item.acc = req->acc; + in->mpt_item.page_mode = req->page_mode; + in->mpt_item.map_en = req->map_en; + in->mpt_item.va_base = req->va_base; + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MPT); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err) { + xsc_core_err(dev, "set mpt failed\n"); + kfree(in); + return err; + } + *mtt_base = be32_to_cpu(out.mtt_base); + kfree(in); + return 0; +} + +int xsc_set_mtt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 mtt_base) +{ +#define PA_NUM_PER_CMD 1024 + struct xsc_set_mtt_mbox_in *seg_in; + struct xsc_set_mtt_mbox_out seg_out; + struct xsc_register_mr_request *req = &in_cmd->req; + int tot_pg_num = be32_to_cpu(req->pa_num); + int seg_idx, tot_seg_num, seg_pa_num; + int pa_idx_base = 0; + int i; + int in_len; + int err; + + tot_seg_num = (tot_pg_num & 0x7FF) ? ((tot_pg_num >> 10) + 1) : + (tot_pg_num >> 10); + for (seg_idx = 0; seg_idx < tot_seg_num; seg_idx++) { + seg_pa_num = (seg_idx != tot_seg_num - 1) ? PA_NUM_PER_CMD : + (tot_pg_num - ((tot_seg_num - 1) << 10)); + in_len = (seg_pa_num << 3) + sizeof(*seg_in); + seg_in = kzalloc(in_len, GFP_KERNEL); + if (!seg_in) { + err = -ENOMEM; + return err; + } + seg_in->mtt_setting.mtt_base = cpu_to_be32(mtt_base); + seg_in->mtt_setting.pa_num = cpu_to_be32(seg_pa_num); + for (i = 0; i < seg_pa_num; i++) + seg_in->mtt_setting.pas[i] = req->pas[pa_idx_base + i]; + seg_in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTT); + + memset(&seg_out, 0, sizeof(seg_out)); + xsc_core_dbg(dev, "set mtt seg %d, pa_num %d, pa_idx_base %d, tot_seg %d\n", + seg_idx, seg_pa_num, pa_idx_base, tot_seg_num); + err = xsc_cmd_exec(dev, seg_in, in_len, &seg_out, sizeof(seg_out)); + if (err) { + xsc_core_err(dev, "set mtt seg %d failed\n", seg_idx); + kfree(seg_in); + return err; + } + kfree(seg_in); + pa_idx_base += seg_pa_num; + mtt_base += seg_pa_num; + } + return 0; +} + +int xsc_dereg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = in_cmd->req.mkey; + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + return 0; +} + +int xsc_reg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in) +{ + u32 mtt_base; + int err; + + err = xsc_set_mpt_via_cmdq(dev, in, &mtt_base); + if (err) { + xsc_core_err(dev, "set mpt via cmdq failed\n"); + return err; + } + + err = xsc_set_mtt_via_cmdq(dev, in, mtt_base); + if (err) { + xsc_core_err(dev, "set mtt via cmdq failed\n"); + goto set_mtt_err; + } + return 0; + +set_mtt_err: + err = xsc_dereg_mr_via_cmdq(dev, in); + if (err) + xsc_core_err(dev, "dereg error mr failed\n"); + return err; +} +#endif + int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, - struct xsc_register_mr_mbox_in *in, int inlen) + struct xsc_register_mr_mbox_in *in, int inlen) { struct xsc_register_mr_mbox_out out; int err; @@ -78,16 +202,16 @@ int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_REG_MR); #ifdef REG_MR_VIA_CMDQ - err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + err = xsc_reg_mr_via_cmdq(dev, in); #else err = xsc_reg_mr(dev, in, &out); #endif if (err) { - xsc_core_dbg(dev, "cmd exec failed %d\n", err); + xsc_core_err(dev, "cmd exec failed %d\n", err); return err; } if (out.hdr.status) { - xsc_core_dbg(dev, "status %d\n", out.hdr.status); + xsc_core_err(dev, "status %d\n", out.hdr.status); return xsc_cmd_status_to_err(&out.hdr); } @@ -110,11 +234,11 @@ int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr) err = xsc_dereg_mr(dev, &in, &out); #endif if (err) { - xsc_core_dbg(dev, "cmd exec failed %d\n", err); + xsc_core_err(dev, "cmd exec failed %d\n", err); return err; } if (out.hdr.status) { - xsc_core_dbg(dev, "status %d\n", out.hdr.status); + xsc_core_err(dev, "status %d\n", out.hdr.status); return xsc_cmd_status_to_err(&out.hdr); } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pagealloc.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pagealloc.c deleted file mode 100644 index e393670cffe4c7dc2f7e4b704ba000c925bb5186..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/pagealloc.c +++ /dev/null @@ -1,410 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#include -#include -#include - -enum { - XSC_PAGES_CANT_GIVE = 0, - XSC_PAGES_GIVE = 1, - XSC_PAGES_TAKE = 2 -}; - -struct xsc_pages_req { - struct xsc_core_device *xdev; - u32 func_id; - s16 npages; - struct work_struct work; -}; - -struct fw_page { - struct rb_node rb_node; - u64 addr; - struct page *page; - u16 func_id; -}; - -struct xsc_query_pages_inbox { - struct xsc_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct xsc_query_pages_outbox { - struct xsc_outbox_hdr hdr; - u8 reserved[2]; - __be16 func_id; - __be16 init_pages; - __be16 num_pages; -}; - -struct xsc_manage_pages_inbox { - struct xsc_inbox_hdr hdr; - __be16 rsvd0; - __be16 func_id; - __be16 rsvd1; - __be16 num_entries; - u8 rsvd2[16]; - __be64 pas[0]; -}; - -struct xsc_manage_pages_outbox { - struct xsc_outbox_hdr hdr; - u8 rsvd0[2]; - __be16 num_entries; - u8 rsvd1[20]; - __be64 pas[0]; -}; - -static int insert_page(struct xsc_core_device *xdev, u64 addr, struct page *page, u16 func_id) -{ - struct rb_root *root = &xdev->dev_res->page_root; - struct rb_node **new = &root->rb_node; - struct rb_node *parent = NULL; - struct fw_page *nfp; - struct fw_page *tfp; - - while (*new) { - parent = *new; - tfp = rb_entry(parent, struct fw_page, rb_node); - if (tfp->addr < addr) - new = &parent->rb_left; - else if (tfp->addr > addr) - new = &parent->rb_right; - else - return -EEXIST; - } - - nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); - if (!nfp) - return -ENOMEM; - - nfp->addr = addr; - nfp->page = page; - nfp->func_id = func_id; - - rb_link_node(&nfp->rb_node, parent, new); - rb_insert_color(&nfp->rb_node, root); - - return 0; -} - -static struct page *remove_page(struct xsc_core_device *xdev, u64 addr) -{ - struct rb_root *root = &xdev->dev_res->page_root; - struct rb_node *tmp = root->rb_node; - struct page *result = NULL; - struct fw_page *tfp; - - while (tmp) { - tfp = rb_entry(tmp, struct fw_page, rb_node); - if (tfp->addr < addr) { - tmp = tmp->rb_left; - } else if (tfp->addr > addr) { - tmp = tmp->rb_right; - } else { - rb_erase(&tfp->rb_node, root); - result = tfp->page; - kfree(tfp); - break; - } - } - - return result; -} - -static int xsc_cmd_query_pages(struct xsc_core_device *xdev, u16 *func_id, - s16 *pages, s16 *init_pages) -{ - struct xsc_query_pages_inbox in; - struct xsc_query_pages_outbox out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_PAGES); - err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - //return xsc_cmd_status_to_err(&out.hdr); - return -EIO; - - if (pages) - *pages = be16_to_cpu(out.num_pages); - if (init_pages) - *init_pages = be16_to_cpu(out.init_pages); - *func_id = be16_to_cpu(out.func_id); - - return err; -} - -static int give_pages(struct xsc_core_device *xdev, u16 func_id, int npages, - int notify_fail) -{ - struct xsc_manage_pages_inbox *in; - struct xsc_manage_pages_outbox out; - struct page *page; - int inlen; - u64 addr; - int err; - int i; - - inlen = sizeof(*in) + npages * sizeof(in->pas[0]); - in = xsc_vzalloc(inlen); - if (!in) { - xsc_core_warn(xdev, "vzalloc failed %d\n", inlen); - return -ENOMEM; - } - memset(&out, 0, sizeof(out)); - - for (i = 0; i < npages; i++) { - page = alloc_page(GFP_HIGHUSER); - if (!page) { - err = -ENOMEM; - xsc_core_warn(xdev, "failed to allocate page\n"); - goto out_alloc; - } - addr = dma_map_page(&xdev->pdev->dev, page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&xdev->pdev->dev, addr)) { - xsc_core_warn(xdev, "failed dma mapping page\n"); - __free_page(page); - err = -ENOMEM; - goto out_alloc; - } - err = insert_page(xdev, addr, page, func_id); - if (err) { - xsc_core_err(xdev, "failed to track allocated page\n"); - dma_unmap_page(&xdev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - err = -ENOMEM; - goto out_alloc; - } - in->pas[i] = cpu_to_be64(addr); - } - - in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(XSC_PAGES_GIVE); - in->func_id = cpu_to_be16(func_id); - in->num_entries = cpu_to_be16(npages); - err = xsc_cmd_exec(xdev, in, inlen, &out, sizeof(out)); - xsc_core_dbg(xdev, "err %d\n", err); - if (err) { - xsc_core_warn(xdev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); - goto out_alloc; - } - xdev->dev_res->fw_pages += npages; - - if (out.hdr.status) { - //err = xsc_cmd_status_to_err(&out.hdr); - err = -EIO; - if (err) { - xsc_core_warn(xdev, - "func_id 0x%x, npages %d, status %d\n", - func_id, npages, out.hdr.status); - goto out_alloc; - } - } - - xsc_core_dbg(xdev, "err %d\n", err); - - goto out_free; - -out_alloc: - if (notify_fail) { - memset(in, 0, inlen); - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(XSC_PAGES_CANT_GIVE); - if (xsc_cmd_exec(xdev, in, sizeof(*in), &out, sizeof(out))) - xsc_core_warn(xdev, "\n"); - } - for (i--; i >= 0; i--) { - addr = be64_to_cpu(in->pas[i]); - page = remove_page(xdev, addr); - if (!page) { - xsc_core_err(xdev, "BUG: can't remove page at addr 0x%llx\n", - addr); - continue; - } - dma_unmap_page(&xdev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - } - -out_free: - xsc_vfree(in); - return err; -} - -static int reclaim_pages(struct xsc_core_device *xdev, u32 func_id, int npages, - int *nclaimed) -{ - struct xsc_manage_pages_inbox in; - struct xsc_manage_pages_outbox *out; - struct page *page; - int num_claimed; - int outlen; - u64 addr; - int err; - int i; - - memset(&in, 0, sizeof(in)); - outlen = sizeof(*out) + npages * sizeof(out->pas[0]); - out = xsc_vzalloc(outlen); - if (!out) - return -ENOMEM; - - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MANAGE_PAGES); - in.hdr.opmod = cpu_to_be16(XSC_PAGES_TAKE); - in.func_id = cpu_to_be16(func_id); - in.num_entries = cpu_to_be16(npages); - xsc_core_dbg(xdev, "npages %d, outlen %d\n", npages, outlen); - err = xsc_cmd_exec(xdev, &in, sizeof(in), out, outlen); - if (err) { - xsc_core_err(xdev, "failed recliaming pages\n"); - goto out_free; - } - xdev->dev_res->fw_pages -= npages; - - if (out->hdr.status) { - //err = xsc_cmd_status_to_err(&out->hdr); - err = -EIO; - goto out_free; - } - - num_claimed = be16_to_cpu(out->num_entries); - if (nclaimed) - *nclaimed = num_claimed; - - for (i = 0; i < num_claimed; i++) { - addr = be64_to_cpu(out->pas[i]); - page = remove_page(xdev, addr); - if (!page) { - xsc_core_warn(xdev, "FW reported unknown DMA address 0x%llx\n", addr); - } else { - dma_unmap_page(&xdev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - } - } - -out_free: - xsc_vfree(out); - return err; -} - -static void pages_work_handler(struct work_struct *work) -{ - struct xsc_pages_req *req = container_of(work, struct xsc_pages_req, work); - struct xsc_core_device *xdev = req->xdev; - int err = 0; - - if (req->npages < 0) - err = reclaim_pages(xdev, req->func_id, -1 * req->npages, NULL); - else if (req->npages > 0) - err = give_pages(xdev, req->func_id, req->npages, 1); - - if (err) - xsc_core_warn(xdev, "%s fail %d\n", req->npages < 0 ? - "reclaim" : "give", err); - - kfree(req); -} - -void xsc_core_req_pages_handler(struct xsc_core_device *xdev, u16 func_id, - s16 npages) -{ - struct xsc_pages_req *req; - - req = kzalloc(sizeof(*req), GFP_ATOMIC); - if (!req) - return; - - req->xdev = xdev; - req->func_id = func_id; - req->npages = npages; - INIT_WORK(&req->work, pages_work_handler); - queue_work(xdev->dev_res->pg_wq, &req->work); -} - -int xsc_satisfy_startup_pages(struct xsc_core_device *xdev) -{ - s16 init_pages; - u16 func_id; - int err; - - err = xsc_cmd_query_pages(xdev, &func_id, NULL, &init_pages); - if (err) - return err; - - xsc_core_dbg(xdev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); - - return give_pages(xdev, func_id, init_pages, 0); -} - -static int optimal_reclaimed_pages(void) -{ - struct xsc_cmd_prot_block *block; - struct xsc_cmd_layout *lay; - int ret; - - ret = (sizeof(lay->in) + sizeof(block->data) - - sizeof(struct xsc_manage_pages_outbox)) / 8; - - return ret; -} - -int xsc_reclaim_startup_pages(struct xsc_core_device *xdev) -{ - unsigned long end = jiffies + msecs_to_jiffies(5000); - struct fw_page *fwp; - struct rb_node *p; - int err; - - do { - p = rb_first(&xdev->dev_res->page_root); - if (p) { - fwp = rb_entry(p, struct fw_page, rb_node); - err = reclaim_pages(xdev, fwp->func_id, optimal_reclaimed_pages(), NULL); - if (err) { - xsc_core_warn(xdev, "failed reclaiming pages (%d)\n", err); - return err; - } - } - if (time_after(jiffies, end)) { - xsc_core_warn(xdev, "FW did not return all pages. giving up...\n"); - break; - } - } while (p); - - return 0; -} - -int xsc_pagealloc_init(struct xsc_core_device *xdev) -{ - xdev->dev_res->page_root = RB_ROOT; - xdev->dev_res->pg_wq = create_singlethread_workqueue("xsc_page_allocator"); - if (!xdev->dev_res->pg_wq) - return -ENOMEM; - - return 0; -} - -void xsc_pagealloc_cleanup(struct xsc_core_device *xdev) -{ - destroy_workqueue(xdev->dev_res->pg_wq); -} - -int xsc_pagealloc_start(struct xsc_core_device *xdev) -{ - return 0; -} - -void xsc_pagealloc_stop(struct xsc_core_device *xdev) -{ - flush_workqueue(xdev->dev_res->pg_wq); -} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c index 8b667bf13d7184678128a87b4a59bd776266f371..ed3a459a8498e6c28fdde35dc58018eceeebac8b 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c @@ -1,16 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ +#include #include #include #include #include -#include -#include -#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" #ifdef CONFIG_RFS_ACCEL #include #endif @@ -65,10 +65,10 @@ static int xsc_dma_read_msix_init(struct xsc_core_device *xdev) int vecid = 0; snprintf(dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", - name, pci_name(xdev->pdev)); + name, pci_name(xdev->pdev)); irqn = pci_irq_vector(xdev->pdev, XSC_DMA_READ_DONE_VEC); err = request_irq(irqn, xsc_dma_read_msix_handler, 0, - dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, (void *)xdev); + dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, (void *)xdev); vecid = (xdev->msix_vec_base + XSC_DMA_READ_DONE_VEC); value = ((1 << 12) | (vecid & 0xfff)); @@ -79,7 +79,7 @@ static int xsc_dma_read_msix_init(struct xsc_core_device *xdev) static void xsc_dma_read_msix_fini(struct xsc_core_device *xdev) { - if ((xdev->caps.msix_enable) && xsc_core_is_pf(xdev)) + if (xdev->caps.msix_enable && xsc_core_is_pf(xdev)) free_irq(pci_irq_vector(xdev->pdev, XSC_DMA_READ_DONE_VEC), xdev); } @@ -107,8 +107,8 @@ static void xsc_msix_ctrl_tbl_func_init(struct xsc_core_device *xdev, u32 func_i } static int xsc_msix_mask_tbl_bit_write(struct xsc_core_device *xdev, - u32 vector_id, - u32 mask_or_unmask) + u32 vector_id, + u32 mask_or_unmask) { u32 v; struct xsc_core_device *pf_xdev; @@ -153,8 +153,8 @@ static void xsc_msix_check_vtr_tbl_idle(struct xsc_core_device *xdev, u32 *idle) } int xsc_msix_vector_tbl_write(struct xsc_core_device *xdev, u32 vector_id, - u32 laddr, u32 uaddr, u32 data, - u32 func_id, u32 vector_en) + u32 laddr, u32 uaddr, u32 data, + u32 func_id, u32 vector_en) { int ret = 0; u32 idle = 0; @@ -167,9 +167,11 @@ int xsc_msix_vector_tbl_write(struct xsc_core_device *xdev, u32 vector_id, pf_xdev = xdev; if (vector_id >= BIT(xdev->caps.log_max_msix) || - func_id > XSC_FUNC_ID_END || vector_en > 1) { + !check_caps_funcid_valid(&xdev->caps) || + func_id >= get_xsc_funcid_end(&xdev->caps) || + vector_en > 1) { xsc_core_err(xdev, "%s: invalid input params: func_id=%d\n", - __func__, func_id); + __func__, func_id); return -EINVAL; } @@ -216,7 +218,8 @@ int xsc_read_msix_tbl_info(struct xsc_core_device *xdev, u16 index, struct msi_m static int xsc_msix_ctrl_tbl_init(struct xsc_core_device *xdev) { - if (xdev->glb_func_id >= XSC_FUNC_ID_END) + if (!check_caps_funcid_valid(&xdev->caps) || + xdev->glb_func_id >= get_xsc_funcid_end(&xdev->caps)) return -1; xsc_msix_ctrl_tbl_func_init(xdev, xdev->glb_func_id); @@ -227,7 +230,8 @@ static int xsc_msix_ctrl_tbl_init(struct xsc_core_device *xdev) static int xsc_msix_ctrl_tbl_fini(struct xsc_core_device *xdev) { - if (xdev->glb_func_id >= XSC_FUNC_ID_END) + if (!check_caps_funcid_valid(&xdev->caps) || + xdev->glb_func_id >= get_xsc_funcid_end(&xdev->caps)) return -1; xsc_msix_ctrl_tbl_func_fini(xdev, xdev->glb_func_id); @@ -306,8 +310,11 @@ static int xsc_msix_vector_tbl_ops(struct xsc_core_device *xdev, u32 vector_en) index = 0; for_each_pci_msi_entry(entry, pdev) { err = xsc_msix_vector_tbl_write(xdev, i, - msgs[index].address_lo, msgs[index].address_hi, msgs[index].data, - xdev->glb_func_id, vector_en); + msgs[index].address_lo, + msgs[index].address_hi, + msgs[index].data, + xdev->glb_func_id, + vector_en); i++; index++; } @@ -501,7 +508,7 @@ xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector) { struct xsc_eq *eq = xsc_eq_get(dev, vector); - if (!unlikely(eq)) + if (unlikely(!eq)) return NULL; return eq->mask; @@ -541,8 +548,9 @@ static int xsc_alloc_irq_vectors(struct xsc_core_device *dev) table->eq_vec_comp_base = nvec_base; table->num_comp_vectors = nvec - nvec_base; dev->msix_vec_base = dev->caps.msix_base; - xsc_core_info(dev, "alloc msix_vec_num=%d, vec_base_num=%d, max_msix_num=%d, msix_vec_base=%d\n", - nvec, nvec_base, dev->caps.msix_num, dev->msix_vec_base); + xsc_core_info(dev, + "alloc msix_vec_num=%d, vec_base_num=%d, max_msix_num=%d, msix_vec_base=%d\n", + nvec, nvec_base, dev->caps.msix_num, dev->msix_vec_base); return 0; @@ -589,7 +597,7 @@ static int xsc_alloc_irq_vectors(struct xsc_core_device *dev) table->eq_vec_comp_base = nvec_base; table->num_comp_vectors = nvec - nvec_base; xsc_core_info(dev, "alloc irq vector=%d, vec_base=%d, max_eq_nums=%d, log_max_eq=%d\n", - nvec, nvec_base, dev->caps.max_num_eqs, dev->caps.log_max_eq); + nvec, nvec_base, dev->caps.max_num_eqs, dev->caps.log_max_eq); return 0; @@ -610,7 +618,7 @@ static void xsc_free_irq_vectors(struct xsc_core_device *dev) } int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, - unsigned int *irqn) + unsigned int *irqn) { struct xsc_eq_table *table = &dev->dev_res->eq_table; struct xsc_eq *eq, *n; @@ -674,7 +682,7 @@ static int alloc_comp_eqs(struct xsc_core_device *dev) snprintf(name, XSC_MAX_IRQ_NAME, "xsc_comp%d", i); err = xsc_create_map_eq(dev, eq, - i + table->eq_vec_comp_base, nent, name); + i + table->eq_vec_comp_base, nent, name); if (err) { kfree(eq); goto clean; @@ -717,7 +725,7 @@ int xsc_request_irq_for_cmdq(struct xsc_core_device *dev, u8 vecidx) writel(dev->msix_vec_base + vecidx, REG_ADDR(dev, dev->cmd.reg.msix_vec_addr)); snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", - "xsc_cmd", pci_name(dev->pdev)); + "xsc_cmd", pci_name(dev->pdev)); dev->cmd.irqn = pci_irq_vector(dev->pdev, vecidx); return request_irq(dev->cmd.irqn, xsc_cmd_handler, 0, dev_res->irq_info[vecidx].name, dev); @@ -750,7 +758,7 @@ int xsc_request_irq_for_event(struct xsc_core_device *dev) struct xsc_dev_resource *dev_res = dev->dev_res; snprintf(dev_res->irq_info[XSC_VEC_CMD_EVENT].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", - "xsc_eth_event", pci_name(dev->pdev)); + "xsc_eth_event", pci_name(dev->pdev)); return request_irq(pci_irq_vector(dev->pdev, XSC_VEC_CMD_EVENT), xsc_event_handler, 0, dev_res->irq_info[XSC_VEC_CMD_EVENT].name, dev); @@ -831,7 +839,7 @@ int xsc_irq_eq_create(struct xsc_core_device *dev) goto err_request_event_irq; } - if ((dev->caps.msix_enable) && xsc_core_is_pf(dev)) { + if (dev->caps.msix_enable && xsc_core_is_pf(dev)) { err = xsc_dma_read_msix_init(dev); if (err) { xsc_core_err(dev, "dma read msix init failed %d.\n", err); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c index a997bcbe785617360ac1b779635b1da90c556ada..37db01d1742f8c6bf91d0e965df3491e971ed2aa 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include -#include +#include "common/driver.h" int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn) { diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/port.c b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c index 2beb815b5f6a24416e440250139176e85b9561c4..5689d5c2366313c5a782d37e7c69faa2eebd9305 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/port.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c @@ -1,16 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include -#include -#include +#include "common/driver.h" +#include "common/port.h" int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, - int size_in, void *data_out, int size_out, - u16 reg_num, int arg, int write) + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) { struct xsc_access_reg_mbox_in *in = NULL; struct xsc_access_reg_mbox_out *out = NULL; @@ -30,7 +29,7 @@ int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, in->arg = cpu_to_be32(arg); in->register_id = cpu_to_be16(reg_num); err = xsc_cmd_exec(xdev, in, sizeof(*in) + size_in, out, - sizeof(*out) + size_out); + sizeof(*out) + size_out); if (err) goto ex2; @@ -69,7 +68,7 @@ int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps) in.port_num = port_num; err = xsc_core_access_reg(xdev, &in, sizeof(in), &out, - sizeof(out), XSC_REG_PCAP, 0, 1); + sizeof(out), XSC_REG_PCAP, 0, 1); return err; } @@ -82,7 +81,7 @@ static int xsc_query_module_num(struct xsc_core_device *dev, int *module_num) } static int xsc_query_module_id(struct xsc_core_device *dev, int module_num, - u8 *module_id) + u8 *module_id) { struct xsc_reg_mcia in; struct xsc_reg_mcia out; @@ -96,14 +95,14 @@ static int xsc_query_module_id(struct xsc_core_device *dev, int module_num, in.size = 1; err = xsc_core_access_reg(dev, &in, sizeof(in), &out, - sizeof(out), XSC_REG_MCIA, 0, 0); + sizeof(out), XSC_REG_MCIA, 0, 0); if (err) return err; status = out.status; if (status) { xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", - status); + status); return -EIO; } ptr = out.dword_0; @@ -156,7 +155,7 @@ static void xsc_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) } static int xsc_query_mcia(struct xsc_core_device *dev, - struct xsc_module_eeprom_query_params *params, u8 *data) + struct xsc_module_eeprom_query_params *params, u8 *data) { struct xsc_reg_mcia in; struct xsc_reg_mcia out; @@ -173,14 +172,14 @@ static int xsc_query_mcia(struct xsc_core_device *dev, in.size = size; err = xsc_core_access_reg(dev, &in, sizeof(in), &out, - sizeof(out), XSC_REG_MCIA, 0, 0); + sizeof(out), XSC_REG_MCIA, 0, 0); if (err) return err; status = out.status; if (status) { xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", - status); + status); return -EIO; } @@ -191,7 +190,7 @@ static int xsc_query_mcia(struct xsc_core_device *dev, } int xsc_query_module_eeprom(struct xsc_core_device *dev, - u16 offset, u16 size, u8 *data) + u16 offset, u16 size, u8 *data) { struct xsc_module_eeprom_query_params query = {0}; u8 module_id; @@ -229,4 +228,3 @@ int xsc_query_module_eeprom(struct xsc_core_device *dev, return xsc_query_mcia(dev, &query, data); } EXPORT_SYMBOL_GPL(xsc_query_module_eeprom); - diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c index 37d4460adbec641797b3c75b8fa7923d299ae727..42788be7ae90914ac84831bb4c26393586ccf79c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c @@ -1,15 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include #include -#include -#include -#include +#include "common/qp.h" +#include "common/driver.h" +#include +#include "common/xsc_core.h" #define GROUP_DESTROY_FLAG_SHFIT 15 #define GROUP_DESTROY_FLAG_MASK (1 << (GROUP_DESTROY_FLAG_SHFIT)) @@ -22,8 +22,118 @@ enum { GROUP_MODE_PER_DEST_IP, }; +struct xsc_qp_rsc { + struct list_head node; + struct xsc_core_qp *qp; + struct xsc_core_device *xdev; +}; + +struct { + struct list_head head; + spinlock_t lock; /* protect delayed_release_list */ + struct task_struct *poll_task; + struct wait_queue_head wq; + int wait_flag; +} delayed_release_list; + +enum { + SLEEP, + WAKEUP, + EXIT, +}; + +static bool xsc_qp_flush_finished(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_query_qp_flush_status_mbox_in in; + struct xsc_query_qp_flush_status_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_QP_FLUSH_STATUS); + in.qpn = cpu_to_be32(qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status != 0) { + xsc_core_dbg(xdev, "qp[%d] flush incomplete.\n", qpn); + return false; + } + + return true; +} + +static int xsc_qp_flush_check(void *arg) +{ + struct xsc_qp_rsc *entry; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + + spin_lock(&delayed_release_list.lock); + entry = list_first_entry_or_null(&delayed_release_list.head, + struct xsc_qp_rsc, node); + if (!entry) { + spin_unlock(&delayed_release_list.lock); + wait_event_interruptible(delayed_release_list.wq, + delayed_release_list.wait_flag != SLEEP); + if (delayed_release_list.wait_flag == EXIT) + break; + delayed_release_list.wait_flag = SLEEP; + continue; + } + list_del(&entry->node); + spin_unlock(&delayed_release_list.lock); + + if (!xsc_qp_flush_finished(entry->xdev, entry->qp->qpn)) { + spin_lock(&delayed_release_list.lock); + list_add_tail(&entry->node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + } else { + complete(&entry->qp->delayed_release); + kfree(entry); + } + } + + return 0; +} + +void xsc_init_delayed_release(void) +{ + INIT_LIST_HEAD(&delayed_release_list.head); + spin_lock_init(&delayed_release_list.lock); + init_waitqueue_head(&delayed_release_list.wq); + delayed_release_list.wait_flag = SLEEP; + delayed_release_list.poll_task = kthread_create(xsc_qp_flush_check, NULL, "qp flush check"); + if (delayed_release_list.poll_task) + wake_up_process(delayed_release_list.poll_task); +} + +void xsc_stop_delayed_release(void) +{ + delayed_release_list.wait_flag = EXIT; + wake_up(&delayed_release_list.wq); + if (delayed_release_list.poll_task) + kthread_stop(delayed_release_list.poll_task); +} + +void xsc_add_to_delayed_release_list(struct xsc_core_device *xdev, struct xsc_core_qp *qp) +{ + struct xsc_qp_rsc *qp_rsc; + + qp_rsc = kzalloc(sizeof(*qp_rsc), GFP_KERNEL); + if (!qp_rsc) + return; + qp_rsc->qp = qp; + qp_rsc->xdev = xdev; + spin_lock(&delayed_release_list.lock); + list_add_tail(&qp_rsc->node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + delayed_release_list.wait_flag = WAKEUP; + wake_up(&delayed_release_list.wq); +} + int create_resource_common(struct xsc_core_device *xdev, - struct xsc_core_qp *qp) + struct xsc_core_qp *qp) { struct xsc_qp_table *table = &xdev->dev_res->qp_table; int err; @@ -43,7 +153,7 @@ int create_resource_common(struct xsc_core_device *xdev, EXPORT_SYMBOL_GPL(create_resource_common); void destroy_resource_common(struct xsc_core_device *xdev, - struct xsc_core_qp *qp) + struct xsc_core_qp *qp) { struct xsc_qp_table *table = &xdev->dev_res->qp_table; unsigned long flags; @@ -83,9 +193,9 @@ void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type) } int xsc_core_create_qp(struct xsc_core_device *xdev, - struct xsc_core_qp *qp, - struct xsc_create_qp_mbox_in *in, - int inlen) + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen) { struct xsc_create_qp_mbox_out out; struct xsc_destroy_qp_mbox_in din; @@ -95,17 +205,17 @@ int xsc_core_create_qp(struct xsc_core_device *xdev, int exec = 1; ktime_get_boottime_ts64(&ts); - memset(&dout, 0, sizeof(dout)); in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); -#ifdef XSC_CHIP_RDMA_UNSUPPORTED - if ((in->req.qp_type == XSC_QUEUE_TYPE_RDMA_MAD) || - (in->req.qp_type == XSC_QUEUE_TYPE_RDMA_RC)) { - exec = 0; - qp->qpn = 0; + if (!is_support_rdma(xdev)) { + if (in->req.qp_type == XSC_QUEUE_TYPE_RDMA_MAD || + in->req.qp_type == XSC_QUEUE_TYPE_RDMA_RC) { + exec = 0; + qp->qpn = 0; + } } -#endif + if (exec) { err = xsc_cmd_exec(xdev, in, inlen, &out, sizeof(out)); if (err) { @@ -121,7 +231,7 @@ int xsc_core_create_qp(struct xsc_core_device *xdev, xsc_core_dbg(xdev, "qpn = %x\n", qp->qpn); } - qp->trace_info = kzalloc(sizeof(struct xsc_qp_trace), GFP_KERNEL); + qp->trace_info = kzalloc(sizeof(*qp->trace_info), GFP_KERNEL); if (!qp->trace_info) { err = -ENOMEM; goto err_cmd; @@ -139,7 +249,7 @@ int xsc_core_create_qp(struct xsc_core_device *xdev, err = xsc_debug_qp_add(xdev, qp); if (err) xsc_core_dbg(xdev, "failed adding QP 0x%x to debug file system\n", - qp->qpn); + qp->qpn); atomic_inc(&xdev->num_qps); return 0; @@ -157,7 +267,7 @@ int xsc_core_create_qp(struct xsc_core_device *xdev, EXPORT_SYMBOL_GPL(xsc_core_create_qp); int xsc_core_destroy_qp(struct xsc_core_device *xdev, - struct xsc_core_qp *qp) + struct xsc_core_qp *qp) { struct xsc_destroy_qp_mbox_in in; struct xsc_destroy_qp_mbox_out out; @@ -174,12 +284,14 @@ int xsc_core_destroy_qp(struct xsc_core_device *xdev, memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); in.qpn = cpu_to_be32(qp->qpn); -#ifdef XSC_CHIP_RDMA_UNSUPPORTED - if ((qp->qp_type == XSC_QUEUE_TYPE_RDMA_MAD) || - (qp->qp_type == XSC_QUEUE_TYPE_RDMA_RC)) { - exec = 0; + + if (!is_support_rdma(xdev)) { + if (qp->qp_type == XSC_QUEUE_TYPE_RDMA_MAD || + qp->qp_type == XSC_QUEUE_TYPE_RDMA_RC) { + exec = 0; + } } -#endif + if (exec) { err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); if (err) @@ -194,9 +306,9 @@ int xsc_core_destroy_qp(struct xsc_core_device *xdev, EXPORT_SYMBOL_GPL(xsc_core_destroy_qp); int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, - enum xsc_qp_state new_state, - struct xsc_modify_qp_mbox_in *in, int sqd_event, - struct xsc_core_qp *qp) + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp) { static const u16 optab[XSC_QP_NUM_STATE][XSC_QP_NUM_STATE] = { [XSC_QP_STATE_RST] = { @@ -241,6 +353,7 @@ int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state struct xsc_modify_qp_mbox_out out; int err = 0; u16 op; + u8 pf_id; if (cur_state >= XSC_QP_NUM_STATE || new_state >= XSC_QP_NUM_STATE || !optab[cur_state][new_state]) @@ -250,20 +363,24 @@ int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state op = optab[cur_state][new_state]; in->hdr.opcode = cpu_to_be16(op); in->qpn = cpu_to_be32(qp->qpn); - // TODO not support host2soc qp group + in->no_need_wait = 1; if (new_state == XSC_QP_STATE_RTR) { if (qp->qp_type_internal == XSC_QUEUE_TYPE_RDMA_RC && - ((in->ctx.ip_type == 0 && in->ctx.dip[0] == in->ctx.sip[0]) || - (in->ctx.ip_type != 0 && - memcmp(in->ctx.dip, in->ctx.sip, sizeof(in->ctx.sip)) == 0))) - in->ctx.qp_out_port = NIF_PORT_NUM + xsc_get_pcie_no(); - else if (in->ctx.lag_sel_en == 0) - in->ctx.qp_out_port = XSC_PF_VF_GET_PF_ID(xdev->glb_func_id); - else + ((in->ctx.ip_type == 0 && in->ctx.dip[0] == in->ctx.sip[0]) || + (in->ctx.ip_type != 0 && + memcmp(in->ctx.dip, in->ctx.sip, sizeof(in->ctx.sip)) == 0))) { + in->ctx.qp_out_port = xdev->caps.nif_port_num + g_xsc_pcie_no; + } else if (in->ctx.lag_sel_en == 0) { + if (funcid_to_pf_index(&xdev->caps, xdev->glb_func_id, &pf_id)) + in->ctx.qp_out_port = pf_id; + else + return -EINVAL; + } else { in->ctx.qp_out_port = in->ctx.lag_sel; + } - in->ctx.pcie_no = xsc_get_pcie_no(); + in->ctx.pcie_no = g_xsc_pcie_no; in->ctx.func_id = cpu_to_be16(xdev->glb_func_id); } @@ -271,9 +388,20 @@ int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state if (err) return err; + if ((op == XSC_CMD_OP_2RST_QP || op == XSC_CMD_OP_2ERR_QP) && out.hdr.status) { + xsc_core_dbg(xdev, "qp %d flush incomplete in fw.\n", qp->qpn); + init_completion(&qp->delayed_release); + xsc_add_to_delayed_release_list(xdev, qp); + out.hdr.status = 0; + while ((err = wait_for_completion_interruptible(&qp->delayed_release)) + == -ERESTARTSYS) + xsc_core_dbg(xdev, "qp %d wait for completion is interrupted, err = %d\n", + qp->qpn, err); + } + if (new_state == XSC_QP_STATE_RTR) { - qp->trace_info->main_ver = 1; - qp->trace_info->sub_ver = 0; + qp->trace_info->main_ver = YS_QPTRACE_VER_MAJOR; + qp->trace_info->sub_ver = YS_QPTRACE_VER_MINOR; qp->trace_info->qp_type = qp->qp_type; qp->trace_info->s_port = in->ctx.src_udp_port; qp->trace_info->d_port = cpu_to_be16(4791); @@ -287,15 +415,14 @@ int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state qp->trace_info->d_addr.d_addr4 = in->ctx.dip[0]; } else { memcpy(qp->trace_info->s_addr.s_addr6, in->ctx.sip, - sizeof(qp->trace_info->s_addr.s_addr6)); + sizeof(qp->trace_info->s_addr.s_addr6)); memcpy(qp->trace_info->d_addr.d_addr6, in->ctx.dip, - sizeof(qp->trace_info->d_addr.d_addr6)); + sizeof(qp->trace_info->d_addr.d_addr6)); } err = xsc_create_qptrace(xdev, qp); if (err) return err; - } return xsc_cmd_status_to_err(&out.hdr); @@ -303,7 +430,7 @@ int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state EXPORT_SYMBOL_GPL(xsc_core_qp_modify); int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, - struct xsc_query_qp_mbox_out *out, int outlen) + struct xsc_query_qp_mbox_out *out, int outlen) { struct xsc_query_qp_mbox_in in; int err; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c new file mode 100644 index 0000000000000000000000000000000000000000..6adc1345f91c185e3336b86aae09edb9c0e8309e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 - 2022, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/driver.h" + +#define QPTS_ELEMENT_MAX_NUM 0x4000 //16384 = 16k + +static struct proc_dir_entry *g_entry; +static DECLARE_WAIT_QUEUE_HEAD(g_ring_buff_wait); +static struct xsc_qpt_update_msg *g_ring_buff; +static spinlock_t g_ring_buff_lock; + +static unsigned long R; +static unsigned long R_cur; +static unsigned long W; +static unsigned long open_once_flag; + +static int read_buff(struct xsc_qpt_update_msg *msg) +{ + spin_lock_irq(&g_ring_buff_lock); + if (R_cur == W) { + spin_unlock_irq(&g_ring_buff_lock); + return 0; + } + + *msg = g_ring_buff[R_cur]; + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + spin_unlock_irq(&g_ring_buff_lock); + + return 1; +} + +static void write_buff(struct xsc_qpt_update_msg *msg) +{ + spin_lock_irq(&g_ring_buff_lock); + g_ring_buff[W] = *msg; + W = (W + 1) % QPTS_ELEMENT_MAX_NUM; + if (R == W) + R = (R + 1) % QPTS_ELEMENT_MAX_NUM; + + if (R_cur == W) + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + + spin_unlock_irq(&g_ring_buff_lock); + + wake_up_interruptible(&g_ring_buff_wait); +} + +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg) +{ + if (!msg) + return -1; + + write_buff(msg); + + return 0; +} +EXPORT_SYMBOL(qpts_write_one_msg); + +static int qpts_open(struct inode *inode, struct file *file) +{ + spin_lock_irq(&g_ring_buff_lock); + if (open_once_flag) { + spin_unlock_irq(&g_ring_buff_lock); + return -1; + } + open_once_flag = 1; + R_cur = R; + spin_unlock_irq(&g_ring_buff_lock); + + return 0; +} + +static int qpts_release(struct inode *inode, struct file *file) +{ + spin_lock_irq(&g_ring_buff_lock); + open_once_flag = 0; + spin_unlock_irq(&g_ring_buff_lock); + + wake_up_poll(&g_ring_buff_wait, EPOLLHUP); + + return 0; +} + +static ssize_t qpts_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + int error = -EINVAL, i = 0; + struct xsc_qpt_update_msg qpt_msg = {0}; + + if ((file->f_flags & O_NONBLOCK) && R_cur == W) + goto out; + + if (!buf || !count) { + pr_err("%s error, null buffer or count!\n", __func__); + goto out; + } + + error = wait_event_interruptible(g_ring_buff_wait, (R_cur != W)); + if (error) + goto out; + + while (!error && i < count && read_buff(&qpt_msg)) { + error = copy_to_user(buf, &qpt_msg, sizeof(qpt_msg)); + buf += sizeof(qpt_msg); + i += sizeof(qpt_msg); + } + + if (!error) + error = i; + +out: + return error; +} + +static __poll_t qpts_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &g_ring_buff_wait, wait); + + if (R_cur != W) + return EPOLLIN | EPOLLRDNORM; + + return 0; +} + +const struct proc_ops qpts_ops = { + .proc_open = qpts_open, + .proc_read = qpts_read, + .proc_poll = qpts_poll, + .proc_release = qpts_release, +}; + +int qpts_init(void) +{ + g_ring_buff = kcalloc(QPTS_ELEMENT_MAX_NUM, sizeof(struct xsc_qpt_update_msg), GFP_KERNEL); + if (!g_ring_buff) + return -ENOMEM; + + spin_lock_init(&g_ring_buff_lock); + + g_entry = proc_create_data("qpts_kmsg", 0400, NULL, &qpts_ops, NULL); + if (!g_entry) { + pr_err("Could not create /proc/qpts_kmsg file!\n"); + goto error_qpts_init; + } + + return 0; + +error_qpts_init: + kfree(g_ring_buff); + g_ring_buff = NULL; + return -1; +} + +void qpts_fini(void) +{ + remove_proc_entry("qpts_kmsg", NULL); + + kfree(g_ring_buff); + g_ring_buff = NULL; + g_entry = NULL; +} + diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..495371b2193c6d418718cc2aa82ff670bf555015 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/res_obj.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" + +static int xsc_alloc_obj(struct xsc_res_obj *obj, struct xsc_bdf_file *file, + void (*release_func)(void *), unsigned long key, + char *data, unsigned int datalen) +{ + obj->release_method = release_func; + obj->file = file; + obj->datalen = datalen; + if (datalen) { + obj->data = kmalloc(datalen, GFP_KERNEL); + if (!obj->data) + return -ENOMEM; + memcpy(obj->data, data, datalen); + } + + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->obj_lock); + radix_tree_insert(&file->obj_tree, key, (void *)obj); + spin_unlock(&file->obj_lock); + radix_tree_preload_end(); + + return 0; +} + +static inline void xsc_free_obj(struct xsc_bdf_file *file, unsigned long key, + struct xsc_res_obj **obj) +{ + *obj = radix_tree_delete(&file->obj_tree, key); + if (!*obj) + return; + if ((*obj)->datalen) + kfree((*obj)->data); +} + +static void xsc_send_cmd_dealloc_pd(struct xsc_core_device *xdev, unsigned int pdn) +{ + struct xsc_dealloc_pd_mbox_in in; + struct xsc_dealloc_pd_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dealloc pd %d\n", pdn); +} + +static void xsc_free_pd_obj(void *obj) +{ + struct xsc_pd_obj *pd_obj = container_of(obj, struct xsc_pd_obj, obj); + struct xsc_bdf_file *file = pd_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_dealloc_pd(file->xdev, pd_obj->pdn); + key = xsc_idx_to_key(RES_OBJ_PD, pd_obj->pdn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(pd_obj->obj.file->xdev, "free pd obj: %d\n", pd_obj->pdn); + kfree(pd_obj); +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, + unsigned int pdn, char *data, unsigned int datalen) +{ + struct xsc_pd_obj *pd_obj; + unsigned long key; + int ret; + + pd_obj = kzalloc(sizeof(*pd_obj), GFP_KERNEL); + if (!pd_obj) + return -ENOMEM; + + pd_obj->pdn = pdn; + key = xsc_idx_to_key(RES_OBJ_PD, pdn); + ret = xsc_alloc_obj(&pd_obj->obj, file, xsc_free_pd_obj, key, data, datalen); + if (ret) { + kfree(pd_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc pd %d obj\n", pdn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pd_obj); + +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn) +{ + struct xsc_pd_obj *pd_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PD, pdn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pd_obj = container_of(obj, struct xsc_pd_obj, obj); + kfree(pd_obj); + xsc_core_dbg(file->xdev, "destroy pd %d obj\n", pdn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pd_obj); + +static void xsc_send_cmd_destroy_mkey(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_destroy_mkey_mbox_in in; + struct xsc_destroy_mkey_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mkey); +#ifdef REG_MR_VIA_CMDQ + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); +#else + ret = xsc_destroy_mkey(xdev, &in, &out); +#endif + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy mkey %d\n", mkey); +} + +static void xsc_send_cmd_dereg_mr(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = cpu_to_be32(mkey); +#ifdef REG_MR_VIA_CMDQ + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); +#else + ret = xsc_dereg_mr(xdev, &in, &out); +#endif + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dereg mr %d\n", mkey); +} + +static void xsc_free_mr_obj(void *obj) +{ + struct xsc_mr_obj *mr_obj = container_of(obj, struct xsc_mr_obj, obj); + struct xsc_bdf_file *file = mr_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mr_obj->mkey); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_mkey(file->xdev, mr_obj->mkey); + xsc_send_cmd_dereg_mr(file->xdev, mr_obj->mkey); + + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free mr obj: %d\n", mr_obj->mkey); + kfree(mr_obj); +} + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, + unsigned int mkey, char *data, unsigned int datalen) +{ + struct xsc_mr_obj *mr_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + int ret; + + mr_obj = kzalloc(sizeof(*mr_obj), GFP_KERNEL); + if (!mr_obj) + return -ENOMEM; + + mr_obj->mkey = mkey; + ret = xsc_alloc_obj(&mr_obj->obj, file, xsc_free_mr_obj, key, data, datalen); + if (ret) { + kfree(mr_obj); + return ret; + } + + xsc_core_dbg(file->xdev, "alloc mr %d obj\n", mkey); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_mr_obj); + +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey) +{ + struct xsc_mr_obj *mr_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + mr_obj = container_of(obj, struct xsc_mr_obj, obj); + kfree(mr_obj); + xsc_core_dbg(file->xdev, "destroy mr %d obj\n", mkey); +} +EXPORT_SYMBOL_GPL(xsc_destroy_mr_obj); + +static void xsc_send_cmd_destroy_cq(struct xsc_core_device *xdev, unsigned int cqn) +{ + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cqn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy cq %d\n", cqn); +} + +static void xsc_free_cq_obj(void *obj) +{ + struct xsc_cq_obj *cq_obj = container_of(obj, struct xsc_cq_obj, obj); + struct xsc_bdf_file *file = cq_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cq_obj->cqn); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_cq(file->xdev, cq_obj->cqn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free cq obj: %d\n", cq_obj->cqn); + kfree(cq_obj); +} + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen) +{ + struct xsc_cq_obj *cq_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + int ret; + + cq_obj = kzalloc(sizeof(*cq_obj), GFP_KERNEL); + if (!cq_obj) + return -ENOMEM; + + cq_obj->cqn = cqn; + ret = xsc_alloc_obj(&cq_obj->obj, file, xsc_free_cq_obj, key, data, datalen); + if (ret) { + kfree(cq_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc cq %d obj\n", cqn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_cq_obj); + +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn) +{ + struct xsc_cq_obj *cq_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + cq_obj = container_of(obj, struct xsc_cq_obj, obj); + kfree(cq_obj); + xsc_core_dbg(file->xdev, "destroy cq %d obj\n", cqn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_cq_obj); + +static void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_modify_qp_mbox_in in; + struct xsc_modify_qp_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_2RST_QP); + in.qpn = cpu_to_be32(qpn); + in.no_need_wait = 0; + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to modify qp %d to rst\n", qpn); +} + +static void xsc_send_cmd_destroy_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qpn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy qp %d\n", qpn); +} + +static void xsc_free_qp_obj(void *obj) +{ + struct xsc_qp_obj *qp_obj = container_of(obj, struct xsc_qp_obj, obj); + struct xsc_bdf_file *file = qp_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_2rst_qp(file->xdev, qp_obj->qpn); + xsc_send_cmd_destroy_qp(file->xdev, qp_obj->qpn); + + key = xsc_idx_to_key(RES_OBJ_QP, qp_obj->qpn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free qp obj: %d\n", qp_obj->qpn); + kfree(qp_obj); +} + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen) +{ + struct xsc_qp_obj *qp_obj; + unsigned long key; + int ret; + + qp_obj = kzalloc(sizeof(*qp_obj), GFP_KERNEL); + if (!qp_obj) + return -ENOMEM; + + qp_obj->qpn = qpn; + key = xsc_idx_to_key(RES_OBJ_QP, qpn); + ret = xsc_alloc_obj(&qp_obj->obj, file, xsc_free_qp_obj, key, data, datalen); + if (ret) { + kfree(qp_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc qp %d obj\n", qpn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_qp_obj); + +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn) +{ + struct xsc_qp_obj *qp_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_QP, qpn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + qp_obj = container_of(obj, struct xsc_qp_obj, obj); + kfree(qp_obj); + xsc_core_dbg(file->xdev, "destroy qp %d obj\n", qpn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_qp_obj); + +static void xsc_send_cmd_del_pct(struct xsc_core_device *xdev, + unsigned int priority) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_data_tl *tl; + struct xsc_flow_pct_v4_del *pct_v4; + unsigned int inlen; + unsigned int outlen; + int ret; + + inlen = sizeof(struct xsc_ioctl_mbox_in) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return; + + outlen = sizeof(struct xsc_ioctl_mbox_out) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + out = kzalloc(outlen, GFP_KERNEL); + if (!out) { + kfree(in); + return; + } + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_IOCTL_FLOW); + in->len = sizeof(struct xsc_ioctl_data_tl) + sizeof(struct xsc_flow_pct_v4_del); + in->len = cpu_to_be16(in->len); + tl = (struct xsc_ioctl_data_tl *)in->data; + tl->opmod = XSC_IOCTL_OP_DEL; + tl->table = XSC_FLOW_TBL_PCT_V4; + tl->length = sizeof(struct xsc_flow_pct_v4_del); + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + pct_v4->priority = priority; + out->len = in->len; + ret = xsc_cmd_exec(xdev, in, inlen, out, outlen); + if (ret || out->hdr.status != 0) + xsc_core_err(xdev, "failed to del pct %d\n", priority); + + kfree(in); + kfree(out); +} + +static void xsc_free_pct_obj(void *obj) +{ + struct xsc_pct_obj *pct_obj = container_of(obj, struct xsc_pct_obj, obj); + struct xsc_bdf_file *file = pct_obj->obj.file; + struct xsc_res_obj *_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, pct_obj->pct_idx); + + xsc_send_cmd_del_pct(file->xdev, pct_obj->pct_idx); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free pct obj, priority:%d\n", pct_obj->pct_idx); + kfree(pct_obj); +} + +/* both pct4 and pct6 are allocated in the same tcam table, so we can delete pct6 + * by pct4 method + */ +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen) +{ + struct xsc_pct_obj *pct_obj; + int ret; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + pct_obj = kzalloc(sizeof(*pct_obj), GFP_KERNEL); + if (!pct_obj) + return -ENOMEM; + + pct_obj->pct_idx = priority; + ret = xsc_alloc_obj(&pct_obj->obj, file, xsc_free_pct_obj, key, data, datalen); + if (ret) + kfree(pct_obj); + xsc_core_dbg(file->xdev, "alloc pct %d obj\n", priority); + return ret; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pct_obj); + +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority) +{ + struct xsc_pct_obj *pct_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pct_obj = container_of(obj, struct xsc_pct_obj, obj); + kfree(pct_obj); + xsc_core_dbg(file->xdev, "destroy pct %d obj\n", priority); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pct_obj); + +void xsc_close_bdf_file(struct xsc_bdf_file *file) +{ + struct radix_tree_iter iter; + void **slot; + struct xsc_res_obj *obj; + + xsc_core_warn(file->xdev, "release bdf file:%lx\n", file->key); + spin_lock(&file->obj_lock); + radix_tree_for_each_slot(slot, &file->obj_tree, &iter, 0) { + obj = (struct xsc_res_obj *)(*slot); + obj->release_method(obj); + } + spin_unlock(&file->obj_lock); +} +EXPORT_SYMBOL_GPL(xsc_close_bdf_file); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c index 4011e998cb1465f7fd7671437be0c9f119ba8fe1..eb95df4eae3ec2148da0cef7183ee1bb0454853c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c @@ -1,33 +1,71 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_lag.h" +#include "common/vport.h" #ifdef CONFIG_XSC_ESWITCH #include "eswitch.h" #endif #include "fw/xsc_tbm.h" +#include "xsc_pci_ctrl.h" + +//static int sriov_restore_guids(struct xsc_core_device *dev, int vf) +//{ +// struct xsc_core_sriov *sriov = &dev->priv.sriov; +// struct xsc_hca_vport_context *in; +// int err = 0; +// +// /* Restore sriov guid and policy settings */ +// if (sriov->vfs_ctx[vf].node_guid || +// sriov->vfs_ctx[vf].port_guid || +// sriov->vfs_ctx[vf].policy != XSC_POLICY_INVALID) { +// in = kzalloc(sizeof(*in), GFP_KERNEL); +// if (!in) +// return -ENOMEM; +// +// in->node_guid = sriov->vfs_ctx[vf].node_guid; +// in->port_guid = sriov->vfs_ctx[vf].port_guid; +// in->vport_state_policy = sriov->vfs_ctx[vf].policy; +// in->field_select = +// !!(in->port_guid) * XSC_HCA_VPORT_SEL_PORT_GUID | +// !!(in->node_guid) * XSC_HCA_VPORT_SEL_NODE_GUID | +// !!(in->vport_state_policy) * XSC_HCA_VPORT_SEL_STATE_POLICY; +// +// err = xsc_modify_hca_vport_context(dev, 1, 1, vf + 1, in); +// if (err) +// xsc_core_warn(dev, "modify VF%d vport context failed\n", vf); +// +// kfree(in); +// } +// +// return err; +//} static int xsc_device_enable_sriov(struct xsc_core_device *dev, int num_vfs) { struct xsc_core_sriov *sriov = &dev->priv.sriov; - int err; u16 vf; + u16 max_msix = 0; + int err; + + max_msix = xsc_get_irq_matrix_global_available(dev); + xsc_core_info(dev, "global_available=%u\n", max_msix); + err = xsc_cmd_enable_hca(dev, num_vfs, max_msix); + if (err) + return err; if (!XSC_ESWITCH_MANAGER(dev)) goto enable_vfs; #ifdef CONFIG_XSC_ESWITCH err = xsc_eswitch_enable(dev->priv.eswitch, XSC_ESWITCH_LEGACY, - num_vfs); + num_vfs); if (err) { - xsc_core_warn(dev, - "failed to enable eswitch SRIOV (%d)\n", err); + xsc_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } #endif @@ -44,36 +82,30 @@ static int xsc_device_enable_sriov(struct xsc_core_device *dev, int num_vfs) } xsc_lag_disable(dev); - for (vf = 0; vf < num_vfs; vf++) { - err = xsc_cmd_enable_hca(dev, vf); - if (err) { - xsc_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); - continue; - } - + for (vf = 0; vf < num_vfs; vf++) sriov->vfs_ctx[vf].enabled = 1; - xsc_core_info(dev, "enabled VF%d ok\n", vf); - } xsc_lag_enable(dev); return 0; } static void xsc_device_disable_sriov(struct xsc_core_device *dev, - int num_vfs, bool clear_vf) + int num_vfs, bool clear_vf) { struct xsc_core_sriov *sriov = &dev->priv.sriov; int vf, err; + err = xsc_cmd_disable_hca(dev, (u16)num_vfs); + if (err) { + xsc_core_warn(dev, "failed to disable hca, num_vfs=%d, err=%d\n", + num_vfs, err); + return; + } + for (vf = num_vfs - 1; vf >= 0; vf--) { if (!sriov->vfs_ctx[vf].enabled) continue; - err = xsc_cmd_disable_hca(dev, (u16)vf); - if (err) { - xsc_core_warn(dev, "failed to disable VF %d\n", vf); - continue; - } sriov->vfs_ctx[vf].enabled = 0; } @@ -95,8 +127,8 @@ static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) if (num_vfs > dev->caps.max_vfs) { xsc_core_warn(dev, - "invalid sriov param, num_vfs(%d) > total_vfs(%d)\n", - num_vfs, dev->caps.max_vfs); + "invalid sriov param, num_vfs(%d) > total_vfs(%d)\n", + num_vfs, dev->caps.max_vfs); return -EINVAL; } @@ -104,9 +136,8 @@ static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) if (num_vfs == pci_num_vf(dev->pdev)) return 0; - xsc_core_warn(dev, - "VFs already enabled. Disable before enabling %d VFs\n", - num_vfs); + xsc_core_warn(dev, "VFs already enabled. Disable before enabling %d VFs\n", + num_vfs); return -EBUSY; } @@ -123,7 +154,7 @@ static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) xsc_core_warn(dev, "pci_enable_sriov failed : %d\n", err); xsc_device_disable_sriov(dev, num_vfs, true); } - xsc_set_vf_pp_status(dev, true); + return err; } @@ -134,8 +165,8 @@ static void xsc_sriov_disable(struct pci_dev *pdev) xsc_core_info(dev, "%s: num_vfs=%d\n", __func__, num_vfs); pci_disable_sriov(pdev); + xsc_device_disable_sriov(dev, num_vfs, true); - xsc_set_vf_pp_status(dev, true); } int xsc_core_sriov_configure(struct pci_dev *pdev, int num_vfs) @@ -145,7 +176,7 @@ int xsc_core_sriov_configure(struct pci_dev *pdev, int num_vfs) int err = 0; xsc_core_info(dev, "%s: requested num_vfs %d\n", - __func__, num_vfs); + __func__, num_vfs); if (num_vfs) err = xsc_sriov_enable(pdev, num_vfs); @@ -164,6 +195,9 @@ int xsc_sriov_attach(struct xsc_core_device *dev) struct xsc_core_sriov *sriov; if (!xsc_core_is_pf(dev)) { + if (!pdev->physfn) /*for vf passthrough vm*/ + return 0; + pf_xdev = pci_get_drvdata(pdev->physfn); sriov = &pf_xdev->priv.sriov; @@ -189,6 +223,27 @@ void xsc_sriov_detach(struct xsc_core_device *dev) static u16 xsc_get_max_vfs(struct xsc_core_device *dev) { +// u16 host_total_vfs; +// const u32 *out; + +// if (xsc_core_is_ecpf_esw_manager(dev)) { +// out = xsc_esw_query_functions(dev); + +// // Old FW doesn't support getting total_vfs from host params +// // but supports getting from pci_sriov. +// +// if (IS_ERR(out)) +// goto done; + +// host_total_vfs = XSC_GET(query_esw_functions_out, out, +// host_params_context.host_total_vfs); + +// kvfree(out); +// if (host_total_vfs) +// return host_total_vfs; +// } + +//done: /* In RH6.8 and lower pci_sriov_get_totalvfs might return -EINVAL * return in that case 1 */ @@ -197,7 +252,7 @@ static u16 xsc_get_max_vfs(struct xsc_core_device *dev) } static int xsc_sriov_pci_cfg_info(struct xsc_core_device *dev, - struct xsc_pci_sriov *iov) + struct xsc_pci_sriov *iov) { int pos; struct pci_dev *pdev = dev->pdev; @@ -205,7 +260,7 @@ static int xsc_sriov_pci_cfg_info(struct xsc_core_device *dev, pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (!pos) { xsc_core_err(dev, "%s: failed to find SRIOV capability in device\n", - __func__); + __func__); return -ENODEV; } @@ -239,14 +294,14 @@ int xsc_sriov_init(struct xsc_core_device *dev) err = xsc_sriov_pci_cfg_info(dev, iov); if (err) { xsc_core_warn(dev, "%s: pci not support sriov, err=%d\n", - __func__, err); + __func__, err); return 0; } total_vfs = pci_sriov_get_totalvfs(pdev); if (unlikely(iov->total_vfs == 0)) { xsc_core_warn(dev, "%s: pci not support sriov, total_vfs=%d, cur_vfs=%d\n", - __func__, iov->total_vfs, sriov->num_vfs); + __func__, iov->total_vfs, sriov->num_vfs); return 0; } sriov->max_vfs = xsc_get_max_vfs(dev); @@ -261,9 +316,9 @@ int xsc_sriov_init(struct xsc_core_device *dev) return -ENOMEM; xsc_core_info(dev, "%s: total_vfs=%d, cur_vfs=%d, vf_bdf_base=0x%02x\n", - __func__, total_vfs, sriov->num_vfs, sriov->vf_bdf_base); + __func__, total_vfs, sriov->num_vfs, sriov->vf_bdf_base); xsc_core_info(dev, "%s: vf_offset=%d, stride=%d, vf_device_id=0x%x\n", - __func__, iov->offset, iov->stride, iov->vf_device); + __func__, iov->offset, iov->stride, iov->vf_device); err = xsc_sriov_sysfs_init(dev); if (err) { xsc_core_warn(dev, "failed to init SRIOV sysfs (%d)\n", err); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c index 545c5deae255d3cd4177ba082176ea39f93ea587..fec4474d44a8c0ee6a0d5c6442df54de6ca21ca7 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c @@ -1,14 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include #include -#include -#include +#include "common/xsc_core.h" +#include "common/vport.h" #ifdef CONFIG_XSC_ESWITCH #include "eswitch.h" #endif @@ -50,9 +49,9 @@ static ssize_t vf_attr_store(struct kobject *kobj, struct vf_group_attributes { struct attribute attr; - ssize_t (*show)(struct xsc_vgroup *group, struct vf_group_attributes *attr, + ssize_t (*show)(struct xsc_vgroup *g, struct vf_group_attributes *attr, char *buf); - ssize_t (*store)(struct xsc_vgroup *group, struct vf_group_attributes *attr, + ssize_t (*store)(struct xsc_vgroup *g, struct vf_group_attributes *attr, const char *buf, size_t count); }; @@ -120,7 +119,7 @@ static ssize_t port_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, return -EINVAL; for (i = 0; i < 8; i++) - guid += ((u64)tmp[i] << ((7 - i)*8)); + guid += ((u64)tmp[i] << ((7 - i) * 8)); in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) @@ -162,7 +161,7 @@ static ssize_t node_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, err = show_nic_node_guid(dev, g->vf, &guid); if (err) { xsc_core_warn(dev, "failed to query node guid for vf %d (%d)\n", - g->vf, err); + g->vf, err); return err; } @@ -194,12 +193,12 @@ static ssize_t node_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, return -EINVAL; for (i = 0; i < 8; i++) - guid += ((u64)tmp[i] << ((7 - i)*8)); + guid += ((u64)tmp[i] << ((7 - i) * 8)); err = modify_nic_node_guid(dev, g->vf, guid); if (err) { xsc_core_warn(dev, "failed to modify node guid for vf %d (%d)\n", - g->vf, err); + g->vf, err); return err; } @@ -231,7 +230,7 @@ static ssize_t policy_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, err = xsc_query_hca_vport_context(dev, 1, 1, g->vf, rep); if (err) { xsc_core_warn(dev, "failed to query port policy for vf %d (%d)\n", - g->vf, err); + g->vf, err); goto free; } p = policy_str(rep->vport_state_policy); @@ -324,8 +323,7 @@ static ssize_t mac_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, static ssize_t vlan_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, char *buf) { - return sprintf(buf, - "usage: write to set VF Vlan,Qos and Vlan Protocol\n"); + return sprintf(buf, ": set VF Vlan, Qos, Vlan Proto(default 802.1Q)\n"); } static ssize_t vlan_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, @@ -356,7 +354,7 @@ static ssize_t vlan_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, } err = xsc_eswitch_set_vport_vlan(dev->priv.eswitch, g->vf + 1, - vlan_id, qos, vlan_proto); + vlan_id, qos, vlan_proto); return err ? err : count; } @@ -476,12 +474,12 @@ static ssize_t max_tx_rate_store(struct xsc_sriov_vf *g, min_tx_rate = esw->vports[g->vf + 1].info.min_rate; mutex_unlock(&esw->state_lock); - err = kstrtou32(buf, 10, &max_tx_rate); + err = kstrtouint(buf, 10, &max_tx_rate); if (err != 1) return -EINVAL; err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, - max_tx_rate, min_tx_rate); + max_tx_rate, min_tx_rate); return err ? err : count; } @@ -507,12 +505,12 @@ static ssize_t min_tx_rate_store(struct xsc_sriov_vf *g, max_tx_rate = esw->vports[g->vf + 1].info.max_rate; mutex_unlock(&esw->state_lock); - err = kstrtou32(buf, 10, &min_tx_rate); + err = kstrtouint(buf, 10, &min_tx_rate); if (err != 1) return -EINVAL; err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, - max_tx_rate, min_tx_rate); + max_tx_rate, min_tx_rate); return err ? err : count; } @@ -520,8 +518,7 @@ static ssize_t min_pf_tx_rate_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, char *buf) { - return sprintf(buf, - "usage: write to set PF min rate\n"); + return sprintf(buf, "usage: write to set PF min rate\n"); } static ssize_t min_pf_tx_rate_store(struct xsc_sriov_vf *g, @@ -538,12 +535,12 @@ static ssize_t min_pf_tx_rate_store(struct xsc_sriov_vf *g, max_tx_rate = esw->vports[g->vf].info.max_rate; mutex_unlock(&esw->state_lock); - err = kstrtou32(buf, 10, &min_tx_rate); + err = kstrtouint(buf, 10, &min_tx_rate); if (err != 1) return -EINVAL; err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf, - max_tx_rate, min_tx_rate); + max_tx_rate, min_tx_rate); return err ? err : count; } @@ -564,7 +561,7 @@ static ssize_t group_store(struct xsc_sriov_vf *g, u32 group_id; int err; - err = kstrtou32(buf, 10, &group_id); + err = kstrtouint(buf, 10, &group_id); if (err != 1) return -EINVAL; @@ -593,7 +590,7 @@ static ssize_t max_tx_rate_group_store(struct xsc_vgroup *g, u32 max_rate; int err; - err = kstrtou32(buf, 10, &max_rate); + err = kstrtouint(buf, 10, &max_rate); if (err != 1) return -EINVAL; @@ -619,7 +616,7 @@ static ssize_t min_tx_rate_group_store(struct xsc_vgroup *g, u32 min_rate; int err; - err = kstrtou32(buf, 10, &min_rate); + err = kstrtouint(buf, 10, &min_rate); if (err != 1) return -EINVAL; @@ -629,8 +626,8 @@ static ssize_t min_tx_rate_group_store(struct xsc_vgroup *g, } #define _sprintf(p, buf, format, arg...) \ - ((PAGE_SIZE - (int)(p - buf)) <= 0 ? 0 : \ - scnprintf(p, PAGE_SIZE - (int)(p - buf), format, ## arg)) + ((PAGE_SIZE - (int)((p) - (buf))) <= 0 ? 0 : \ + scnprintf((p), PAGE_SIZE - (int)((p) - (buf)), format, ## arg)) static ssize_t trunk_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, @@ -670,12 +667,12 @@ static ssize_t trunk_store(struct xsc_sriov_vf *g, if (!strcmp(op, "add")) err = xsc_eswitch_add_vport_trunk_range(dev->priv.eswitch, - g->vf + 1, - start_vid, end_vid); + g->vf + 1, + start_vid, end_vid); else if (!strcmp(op, "rem")) err = xsc_eswitch_del_vport_trunk_range(dev->priv.eswitch, - g->vf + 1, - start_vid, end_vid); + g->vf + 1, + start_vid, end_vid); else return -EINVAL; @@ -791,7 +788,7 @@ static ssize_t stats_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, #endif /* CONFIG_XSC_ESWITCH */ static ssize_t num_vfs_store(struct device *device, struct device_attribute *attr, - const char *buf, size_t count) + const char *buf, size_t count) { struct pci_dev *pdev = container_of(device, struct pci_dev, dev); int req_vfs; @@ -809,10 +806,10 @@ static ssize_t num_vfs_store(struct device *device, struct device_attribute *att } static ssize_t num_vfs_show(struct device *device, struct device_attribute *attr, - char *buf) + char *buf) { struct pci_dev *pdev = container_of(device, struct pci_dev, dev); - struct xsc_core_device *dev = pci_get_drvdata(pdev); + struct xsc_core_device *dev = pci_get_drvdata(pdev); struct xsc_core_sriov *sriov = &dev->priv.sriov; return sprintf(buf, "%d\n", sriov->num_vfs); @@ -978,7 +975,7 @@ void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev) } int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, - u32 group_id, struct kobject *group_kobj) + u32 group_id, struct kobject *group_kobj) { #ifdef CONFIG_XSC_ESWITCH struct xsc_core_sriov *sriov = &dev->priv.sriov; @@ -996,7 +993,7 @@ int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, } void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, - struct kobject *group_kobj) + struct kobject *group_kobj) { #ifdef CONFIG_XSC_ESWITCH kobject_put(group_kobj); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h index d8e5e5f97034f9e9eaff7fa96ef12a06bdf3e875..38a7f5b4f7610954085d7473ec67eb6f958a1ba2 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c index 4915fe2b1a6be3d12983dca0dd54ee6b48136a3d..0edda05262d4c5ac0cdfcabaabfb10e5ce118722 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c @@ -1,17 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include #include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "eswitch.h" +#include "common/xsc_fs.h" static int _xsc_query_vport_state(struct xsc_core_device *dev, u8 opmod, - u16 vport, void *out, int outlen) + u16 vport, void *out, int outlen) { struct xsc_query_vport_state_in in; @@ -36,7 +36,7 @@ u8 xsc_query_vport_state(struct xsc_core_device *dev, u8 opmod, u16 vport) } int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u8 opmod, - u16 vport, u8 other_vport, u8 state) + u16 vport, u8 other_vport, u8 state) { struct xsc_modify_vport_state_in in; struct xsc_modify_vport_state_out out; @@ -53,8 +53,8 @@ int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u8 opmod, } static int __xsc_query_nic_vport_context(struct xsc_core_device *dev, - u16 vport, void *out, int outlen, - int force_other) + u16 vport, void *out, int outlen, + int force_other) { struct xsc_query_nic_vport_context_in in; @@ -68,25 +68,26 @@ static int __xsc_query_nic_vport_context(struct xsc_core_device *dev, } static int xsc_query_nic_vport_context(struct xsc_core_device *dev, u16 vport, - void *out, int outlen) + void *out, int outlen) { return __xsc_query_nic_vport_context(dev, vport, out, outlen, 0); } static int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, - int inlen) + int inlen) { struct xsc_modify_nic_vport_context_out out; struct xsc_modify_nic_vport_context_in *tmp; memset(&out, 0, sizeof(out)); tmp = (struct xsc_modify_nic_vport_context_in *)in; - tmp->hdr.opcode = XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT; + tmp->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + return xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); } int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, - u16 vport, u8 *min_inline) + u16 vport, u8 *min_inline) { struct xsc_query_nic_vport_context_out out; int err; @@ -100,12 +101,13 @@ int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_nic_vport_min_inline); void xsc_query_min_inline(struct xsc_core_device *dev, - u8 *min_inline_mode) + u8 *min_inline_mode) { switch (dev->caps.wqe_inline_mode) { case XSC_CAP_INLINE_MODE_VPORT_CONTEXT: if (!xsc_query_nic_vport_min_inline(dev, 0, min_inline_mode)) break; + fallthrough; case XSC_CAP_INLINE_MODE_L2: *min_inline_mode = XSC_INLINE_MODE_L2; break; @@ -117,7 +119,7 @@ void xsc_query_min_inline(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_min_inline); int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, - u16 vport, u8 min_inline) + u16 vport, u8 min_inline) { struct xsc_modify_nic_vport_context_in in; @@ -131,8 +133,8 @@ int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, } static int __xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr, - int force_other) + u16 vport, u8 *addr, + int force_other) { struct xsc_query_nic_vport_context_out out; u8 *out_addr; @@ -142,7 +144,7 @@ static int __xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, out_addr = out.nic_vport_ctx.permanent_address; err = __xsc_query_nic_vport_context(dev, vport, &out, sizeof(out), - force_other); + force_other); if (!err) ether_addr_copy(addr, out_addr); @@ -150,37 +152,50 @@ static int __xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, } int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr) + u16 vport, u8 *addr) { return __xsc_query_nic_vport_mac_address(dev, vport, addr, 1); } EXPORT_SYMBOL_GPL(xsc_query_other_nic_vport_mac_address); int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr) + u16 vport, u8 *addr) { return __xsc_query_nic_vport_mac_address(dev, vport, addr, 0); } EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_address); static int __xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr, - int force_other) + u16 vport, u8 *addr, int force_other, bool perm_mac) { struct xsc_modify_nic_vport_context_in in; int err; - u8 *perm_mac; + u8 *mac_addr; + u16 caps = 0; + u16 caps_mask = 0; memset(&in, 0, sizeof(in)); - in.field_select.permanent_address = 1; - in.vport_number = vport; - if (vport || force_other) + if (perm_mac) { + in.field_select.permanent_address = 1; + mac_addr = in.nic_vport_ctx.permanent_address; + } else { + in.field_select.current_address = 1; + mac_addr = in.nic_vport_ctx.current_address; + } + + if (force_other) { in.other_vport = 1; + in.vport_number = cpu_to_be16(vport); + } - perm_mac = in.nic_vport_ctx.permanent_address; + if (xsc_get_pp_bypass_res(dev)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + in.caps = cpu_to_be16(caps); + in.caps_mask = cpu_to_be16(caps_mask); - ether_addr_copy(perm_mac, addr); + ether_addr_copy(mac_addr, addr); err = xsc_modify_nic_vport_context(dev, &in, sizeof(in)); @@ -188,18 +203,18 @@ static int __xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, } int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr) + u16 vport, u8 *addr, bool perm_mac) { - return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 1); + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 1, perm_mac); } -EXPORT_SYMBOL_GPL(xsc_modify_other_nic_vport_mac_address); +EXPORT_SYMBOL(xsc_modify_other_nic_vport_mac_address); int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, - u16 vport, u8 *addr) + u16 vport, u8 *addr, bool perm_mac) { - return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 0); + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 0, perm_mac); } -EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mac_address); +EXPORT_SYMBOL(xsc_modify_nic_vport_mac_address); int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu) { @@ -231,10 +246,10 @@ int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu) EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mtu); int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, - u16 vport, - enum xsc_list_type list_type, - u8 addr_list[][ETH_ALEN], - int *list_size) + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size) { struct xsc_query_nic_vport_context_in in; struct xsc_query_nic_vport_context_out *out; @@ -252,7 +267,7 @@ int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, if (req_list_size > max_list_size) { xsc_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", - req_list_size, max_list_size); + req_list_size, max_list_size); req_list_size = max_list_size; } @@ -287,9 +302,9 @@ int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_list); int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, - enum xsc_list_type list_type, - u8 addr_list[][ETH_ALEN], - int list_size) + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size) { struct xsc_modify_nic_vport_context_out out; struct xsc_modify_nic_vport_context_in *in; @@ -330,7 +345,7 @@ int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mac_list); int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, - unsigned long *vlans) + unsigned long *vlans) { struct xsc_query_nic_vport_context_in in; struct xsc_query_nic_vport_context_out *out; @@ -361,7 +376,7 @@ int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, req_list_size = out->nic_vport_ctx.allowed_list_size; for (i = 0; i < req_list_size; i++) { - u16 *vlan_addr = (u16 *)out->nic_vport_ctx.current_uc_mac_address[i]; + u16 *vlan_addr = (u16 *)&out->nic_vport_ctx.current_uc_mac_address[i]; bitmap_set(vlans, (*vlan_addr & 0xfff), 1); } @@ -372,48 +387,45 @@ int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, EXPORT_SYMBOL_GPL(xsc_query_nic_vport_vlans); int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, - u16 vlans[], - int list_size) + u16 vid, bool add) { struct xsc_modify_nic_vport_context_out out; struct xsc_modify_nic_vport_context_in *in; - int max_list_size; int in_sz; int err; - int i; - - max_list_size = 1 << dev->caps.log_max_vlan_list; + u16 *vlan_addr; - if (list_size > max_list_size) - return -ENOSPC; - - in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + - list_size * 8; + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; in = kzalloc(in_sz, GFP_KERNEL); if (!in) return -ENOMEM; - in->hdr.opcode = XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT; + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); in->field_select.addresses_list = 1; + in->nic_vport_ctx.vlan_allowed = add; in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; - in->nic_vport_ctx.allowed_list_size = list_size; - for (i = 0; i < list_size; i++) { - u16 *vlan_addr = (u16 *)in->nic_vport_ctx.current_uc_mac_address[i]; - *vlan_addr = vlans[i]; - } + vlan_addr = (u16 *)in->nic_vport_ctx.current_uc_mac_address[0]; + *vlan_addr = cpu_to_be16(vid); memset(&out, 0, sizeof(out)); err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); kfree(in); - return err; + + if (err || out.hdr.status) { + xsc_core_err(dev, "Failes to modify vlan err=%d out.status=%u", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; } EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_vlans); int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, - u64 *system_image_guid) + u64 *system_image_guid) { struct xsc_query_nic_vport_context_out out; @@ -427,7 +439,7 @@ int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_nic_vport_system_image_guid); int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, - u64 *node_guid) + u64 *node_guid) { struct xsc_query_nic_vport_context_out out; @@ -441,8 +453,8 @@ int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, EXPORT_SYMBOL_GPL(xsc_query_nic_vport_node_guid); static int __xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, - u16 vport, u64 node_guid, - int force_other) + u16 vport, u64 node_guid, + int force_other) { struct xsc_modify_nic_vport_context_in in; int err; @@ -467,19 +479,19 @@ static int __xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, } int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, - u16 vport, u64 node_guid) + u16 vport, u64 node_guid) { return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 0); } int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, - u16 vport, u64 node_guid) + u16 vport, u64 node_guid) { return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 1); } int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, - u16 *qkey_viol_cntr) + u16 *qkey_viol_cntr) { struct xsc_query_nic_vport_context_out out; @@ -493,8 +505,8 @@ int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_nic_vport_qkey_viol_cntr); int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, - u8 port_num, u16 vf_num, u16 gid_index, - union ib_gid *gid) + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid) { int in_sz = sizeof(struct xsc_query_hca_vport_gid_in); int out_sz = sizeof(struct xsc_query_hca_vport_gid_out); @@ -509,7 +521,7 @@ int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, is_group_manager = dev->caps.vport_group_manager; tbsz = dev->caps.port[port_num].gid_table_len; xsc_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", - vf_num, gid_index, tbsz); + vf_num, gid_index, tbsz); if (gid_index > tbsz && gid_index != 0xffff) return -EINVAL; @@ -559,8 +571,8 @@ int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, EXPORT_SYMBOL_GPL(xsc_query_hca_vport_gid); int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, - u8 port_num, u16 vf_num, u16 pkey_index, - u16 *pkey) + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey) { int in_sz = sizeof(struct xsc_query_hca_vport_pkey_in); int out_sz = sizeof(struct xsc_query_hca_vport_pkey_out); @@ -584,7 +596,7 @@ int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, else nout = 1; - out_sz += nout*sizeof(*pkey); + out_sz += nout * sizeof(*pkey); in = kzalloc(in_sz, GFP_KERNEL); out = kzalloc(out_sz, GFP_KERNEL); @@ -624,9 +636,9 @@ int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, EXPORT_SYMBOL_GPL(xsc_query_hca_vport_pkey); int xsc_query_hca_vport_context(struct xsc_core_device *dev, - u8 other_vport, u8 port_num, - u16 vf_num, - struct xsc_hca_vport_context *rep) + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep) { struct xsc_query_hca_vport_context_out *out = NULL; struct xsc_query_hca_vport_context_in in; @@ -670,7 +682,7 @@ int xsc_query_hca_vport_context(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_hca_vport_context); int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, - u64 *node_guid) + u64 *node_guid) { struct xsc_hca_vport_context *rep; int err; @@ -689,10 +701,10 @@ int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_hca_vport_node_guid); int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, - u16 vport, - int *promisc_uc, - int *promisc_mc, - int *promisc_all) + u16 vport, + int *promisc_uc, + int *promisc_mc, + int *promisc_all) { struct xsc_query_nic_vport_context_out *out; int err; @@ -716,9 +728,9 @@ int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_query_nic_vport_promisc); int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, - int promisc_uc, - int promisc_mc, - int promisc_all) + int promisc_uc, + int promisc_mc, + int promisc_all) { struct xsc_modify_nic_vport_context_in *in; int err; @@ -741,8 +753,8 @@ int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_promisc); int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, - int vf, u8 port_num, void *out, - size_t out_sz) + int vf, u8 port_num, void *out, + size_t out_sz) { struct xsc_query_vport_counter_in *in; int is_group_manager; @@ -777,9 +789,9 @@ int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, EXPORT_SYMBOL_GPL(xsc_query_vport_counter); int xsc_modify_hca_vport_context(struct xsc_core_device *dev, - u8 other_vport, u8 port_num, - int vf, - struct xsc_hca_vport_context *req) + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req) { struct xsc_modify_hca_vport_context_in in; struct xsc_modify_hca_vport_context_out out; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c index 7de6aacf0b6f6478c9145fe6e8a914a6d1efa54d..0a51f574ff41a7a4c66bf20240c8fe87d6ccc77f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ -#include -#include -#include +#include "common/driver.h" +#include "common/device.h" +#include "common/xsc_core.h" #include "wq.h" u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq) @@ -21,8 +20,8 @@ static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) } int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, - u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, - struct xsc_wq_ctrl *wq_ctrl) + u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl) { u8 log_wq_stride = ele_log_size; u8 log_wq_sz = q_log_size; @@ -35,11 +34,10 @@ int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param } err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), - &wq_ctrl->buf, - param->buf_numa_node); + &wq_ctrl->buf, + param->buf_numa_node); if (err) { - xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", - err); + xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } @@ -57,8 +55,8 @@ int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param EXPORT_SYMBOL_GPL(xsc_eth_cqwq_create); int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, - u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, - struct xsc_wq_ctrl *wq_ctrl) + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl) { u8 log_wq_stride = ele_log_size; u8 log_wq_sz = q_log_size; @@ -72,7 +70,7 @@ int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *par } err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), - &wq_ctrl->buf, param->buf_numa_node); + &wq_ctrl->buf, param->buf_numa_node); if (err) { xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h index c6fc5a73cd4ae3601d81e3d9cc8a027f73dcddbe..8811ef1bf0f772472c583dad59349c9ce84c90b1 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h @@ -1,14 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef __XSC_WQ_H__ #define __XSC_WQ_H__ -#include -#include +#include "common/cq.h" +#include "common/qp.h" struct xsc_wq_param { int buf_numa_node; @@ -44,7 +43,7 @@ enum xsc_res_type { u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq); int xsc_buf_alloc_node(struct xsc_core_device *dev, int size, - struct xsc_frag_buf *buf, int node); + struct xsc_frag_buf *buf, int node); /*api for eth driver*/ int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, @@ -52,14 +51,14 @@ int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param struct xsc_wq_ctrl *wq_ctrl); int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, - u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, - struct xsc_wq_ctrl *wq_ctrl); + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl); void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl); static inline void xsc_init_fbc_offset(struct xsc_buf_list *frags, - u8 log_stride, u8 log_sz, - u16 strides_offset, - struct xsc_frag_buf_ctrl *fbc) + u8 log_stride, u8 log_sz, + u16 strides_offset, + struct xsc_frag_buf_ctrl *fbc) { fbc->frags = frags; fbc->log_stride = log_stride; @@ -71,14 +70,14 @@ static inline void xsc_init_fbc_offset(struct xsc_buf_list *frags, } static inline void xsc_init_fbc(struct xsc_buf_list *frags, - u8 log_stride, u8 log_sz, - struct xsc_frag_buf_ctrl *fbc) + u8 log_stride, u8 log_sz, + struct xsc_frag_buf_ctrl *fbc) { xsc_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); } static inline void *xsc_frag_buf_get_wqe(struct xsc_frag_buf_ctrl *fbc, - u32 ix) + u32 ix) { unsigned int frag; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c index 89c5303ede3a4098e7e5cfbddcf39c5005cadd44..f24dfaccd646b0ba865a761893f7b8bf4f54eda3 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c @@ -1,19 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #include -#include -#include -#include +#include "common/xsc_core.h" +#include "common/driver.h" #include -#include +#include "common/xsc_lag.h" -#include -#include -#include +#include "common/xsc_hsi.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" #include #include @@ -79,45 +77,54 @@ int xsc_cmd_create_lag(struct xsc_lag *ldev, u8 flags) bool mp_lag = flags & XSC_LAG_FLAG_MULTIPATH; bool roce_lag = flags & XSC_LAG_FLAG_ROCE; bool sriov_lag = flags & XSC_LAG_FLAG_SRIOV; + bool kernel_bond = flags & XSC_BOND_FLAG_KERNEL; u16 lag_id = XSC_LAG_PORT_START; u8 lag_num = XSC_LAG_NUM_MAX; - if (!(flags & XSC_LAG_MODE_FLAGS)) + if (!(flags & XSC_LAG_MODE_FLAGS) && !kernel_bond) return -EINVAL; in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_CREATE); - in.req.mp_lag = mp_lag; - in.req.roce_lag = roce_lag; - in.req.lag_id = cpu_to_be16(lag_id); - in.req.lag_num = lag_num; - in.req.lag_start = cpu_to_be16(lag_id); - in.req.lag_sel_mode = mp_lag ? XSC_LAG_HASH_L34 : xsc_lag_hashtype_convert(xdev0, ldev); - in.req.remap_port1 = remap_port1; - in.req.remap_port2 = remap_port2; - - xsc_core_info(xdev0, "modify lag: lag_id = %d, mp_lag=%d, roce_lag=%d, sriov_lag=%d, lag_sel_mode = %d\n", - lag_id, mp_lag, roce_lag, sriov_lag, in.req.lag_sel_mode); + if (!kernel_bond) { + in.req.mp_lag = mp_lag; + in.req.roce_lag = roce_lag; + in.req.lag_id = cpu_to_be16(lag_id); + in.req.lag_num = lag_num; + in.req.lag_start = cpu_to_be16(lag_id); + in.req.lag_sel_mode = + mp_lag ? XSC_LAG_HASH_L34 : xsc_lag_hashtype_convert(xdev0, ldev); + in.req.remap_port1 = remap_port1; + in.req.remap_port2 = remap_port2; + + xsc_core_info(xdev0, "create lag: lag_id = %d, mp_lag=%d, roce_lag=%d, sriov_lag=%d, lag_sel_mode = %d\n", + lag_id, mp_lag, roce_lag, sriov_lag, in.req.lag_sel_mode); + } else { + in.req.kernel_bond = true; + xsc_core_info(xdev0, "create kernel bond\n"); + } memcpy(info_mac0->netdev_addr, netdev0->dev_addr, ETH_ALEN); memcpy(info_mac1->netdev_addr, netdev1->dev_addr, ETH_ALEN); memcpy(info_mac0->gw_dmac, tracker->gw_dmac0, ETH_ALEN); memcpy(info_mac1->gw_dmac, tracker->gw_dmac1, ETH_ALEN); - info_mac0->info_mac.mac_logic_port = xdev0->mac_logic_port; - info_mac0->info_mac.logic_port = xdev0->logic_port; - info_mac0->info_mac.glb_func_id = xdev0->glb_func_id; - info_mac1->info_mac.mac_logic_port = xdev1->mac_logic_port; - info_mac1->info_mac.logic_port = xdev1->logic_port; - info_mac1->info_mac.glb_func_id = xdev1->glb_func_id; + info_mac0->info_mac.mac_logic_port = cpu_to_be16(xdev0->mac_logic_port); + info_mac0->info_mac.logic_port = cpu_to_be16(xdev0->logic_port); + info_mac0->info_mac.glb_func_id = cpu_to_be16(xdev0->glb_func_id); + info_mac1->info_mac.mac_logic_port = cpu_to_be16(xdev1->mac_logic_port); + info_mac1->info_mac.logic_port = cpu_to_be16(xdev1->logic_port); + info_mac1->info_mac.glb_func_id = cpu_to_be16(xdev1->glb_func_id); ret = xsc_cmd_exec(xdev0, &in, sizeof(in), &out, sizeof(out)); - if (ret) { - xsc_core_err(xdev0, "%s:error\n", __func__); - return ret; + if (ret || out.hdr.status) { + xsc_core_err(xdev0, "Failed to create lag, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; } - ldev->lag_id = lag_id; + if (!kernel_bond) + ldev->lag_id = lag_id; return ret; } @@ -147,42 +154,50 @@ int xsc_cmd_modify_lag(struct xsc_lag *ldev) in.req.lag_sel_mode = mp_lag ? XSC_LAG_HASH_L34 : xsc_lag_hashtype_convert(xdev0, ldev); xsc_core_info(xdev0, "modify lag: lag_id = %d, mp_lag=%d, roce_lag=%d, sriov_lag=%d, lag_sel_mode = %d\n", - lag_id, mp_lag, roce_lag, sriov_lag, in.req.lag_sel_mode); + lag_id, mp_lag, roce_lag, sriov_lag, in.req.lag_sel_mode); ret = xsc_cmd_exec(xdev0, &in, sizeof(in), &out, sizeof(out)); - if (ret) { - xsc_core_err(xdev0, "%s:error\n", __func__); - return ret; + if (ret || out.hdr.status) { + xsc_core_err(xdev0, "Failed to modify lag, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; } return ret; } -int xsc_cmd_destroy_lag(struct xsc_lag *ldev) +int xsc_cmd_destroy_lag(struct xsc_lag *ldev, u8 bond_flags) { struct xsc_core_device *xdev0 = ldev->pf[0].xdev; u8 flags = ldev->flags; int ret = -1; + bool kernel_bond = bond_flags & XSC_BOND_FLAG_KERNEL; struct xsc_destroy_lag_mbox_in in = {}; struct xsc_destroy_lag_mbox_out out = {}; - if (!(flags & XSC_LAG_MODE_FLAGS)) + if (!(flags & XSC_LAG_MODE_FLAGS) && !kernel_bond) return -EINVAL; - xsc_core_info(xdev0, "destroy lag: lag_id = %d\n", ldev->lag_id); - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_DESTROY); - in.req.lag_id = ldev->lag_id; + if (!kernel_bond) { + in.req.lag_id = ldev->lag_id; + xsc_core_info(xdev0, "destroy lag: lag_id = %d\n", ldev->lag_id); + } else { + in.req.kernel_bond = true; + xsc_core_info(xdev0, "destroy kernel bond\n"); + } ret = xsc_cmd_exec(xdev0, &in, sizeof(in), &out, sizeof(out)); - if (ret) { - xsc_core_err(xdev0, "%s:error\n", __func__); - return ret; + if (ret || out.hdr.status) { + xsc_core_err(xdev0, "Failed to destroy lag, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; } - ldev->lag_id = 0xff; + if (!kernel_bond) + ldev->lag_id = 0xff; return ret; } @@ -209,8 +224,7 @@ static int xsc_lag_set_qos(struct xsc_core_device *xdev, u16 lag_id, u8 member_b req->lag_id = cpu_to_be16(lag_id); req->member_bitmap = member_bitmap; req->lag_del = lag_del; - req->pcie_no = xsc_get_pcie_no(); - req->esw_mode = xdev->priv.eswitch->mode; + req->pcie_no = g_xsc_pcie_no; in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_SET_QOS); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); @@ -241,22 +255,22 @@ static bool xsc_lag_check_prereq(struct xsc_lag *ldev) } static void xsc_infer_tx_affinity_mapping(struct xsc_core_device *xdev0, struct xsc_lag *ldev, - u8 *port1, u8 *port2) + u8 *port1, u8 *port2) { struct lag_tracker *tracker = &ldev->tracker; *port1 = MAC_INVALID; *port2 = MAC_INVALID; if (tracker->netdev_state[0].tx_enabled && - tracker->netdev_state[0].link_up) + tracker->netdev_state[0].link_up) *port1 = MAC_0_LOGIC; if (tracker->netdev_state[1].tx_enabled && - tracker->netdev_state[1].link_up) + tracker->netdev_state[1].link_up) *port2 = MAC_1_LOGIC; xsc_core_info(xdev0, "tx_affinity_mapping: port1 = %d, port2 = %d\n", - *port1, *port2); + *port1, *port2); } static int xsc_create_lag(struct xsc_lag *ldev, u8 flags) @@ -265,10 +279,10 @@ static int xsc_create_lag(struct xsc_lag *ldev, u8 flags) int err; xsc_infer_tx_affinity_mapping(xdev0, ldev, &ldev->v2p_map[0], - &ldev->v2p_map[1]); + &ldev->v2p_map[1]); xsc_core_info(xdev0, "xsc create lag 1:%d port 2:%d", - ldev->v2p_map[0], ldev->v2p_map[1]); + ldev->v2p_map[0], ldev->v2p_map[1]); err = xsc_cmd_create_lag(ldev, flags); if (err) @@ -304,16 +318,16 @@ void xsc_modify_lag(struct xsc_lag *ldev) int err; xsc_infer_tx_affinity_mapping(xdev0, ldev, &v2p_port1, - &v2p_port2); + &v2p_port2); if (v2p_port1 != ldev->v2p_map[0] || - v2p_port2 != ldev->v2p_map[1] || - tracker->hash_type != tracker->old_hash_type) { + v2p_port2 != ldev->v2p_map[1] || + tracker->hash_type != tracker->old_hash_type) { ldev->v2p_map[0] = v2p_port1; ldev->v2p_map[1] = v2p_port2; xsc_core_info(xdev0, "modify lag map port 1:%d port 2:%d", - ldev->v2p_map[0], ldev->v2p_map[1]); + ldev->v2p_map[0], ldev->v2p_map[1]); err = xsc_cmd_modify_lag(ldev); if (err) { @@ -334,7 +348,7 @@ static void xsc_deactivate_lag(struct xsc_lag *ldev) if (xsc_lag_set_qos(xdev0, ldev->lag_id, 0, true)) xsc_core_err(xdev0, "failed to set QoS for LAG %u\n", ldev->lag_id); - if (xsc_cmd_destroy_lag(ldev)) + if (xsc_cmd_destroy_lag(ldev, XSC_BOND_FLAG_LAG)) xsc_core_err(xdev0, "Failed to deactivate LAG; driver restart required, Make sure all VFs are unbound prior to LAG activation or deactivation\n"); ldev->flags &= ~XSC_LAG_MODE_FLAGS; @@ -346,8 +360,7 @@ static void xsc_lag_remove_ib_devices(struct xsc_lag *ldev) for (i = 0; i < XSC_MAX_PORTS; i++) if (ldev->pf[i].xdev) - xsc_remove_dev_by_protocol(ldev->pf[i].xdev, - XSC_INTERFACE_PROTOCOL_IB); + xsc_remove_dev_by_protocol(ldev->pf[i].xdev, XSC_INTERFACE_PROTOCOL_IB); } static void xsc_lag_add_ib_devices(struct xsc_lag *ldev) @@ -356,8 +369,7 @@ static void xsc_lag_add_ib_devices(struct xsc_lag *ldev) for (i = 0; i < XSC_MAX_PORTS; i++) if (ldev->pf[i].xdev) - xsc_add_dev_by_protocol(ldev->pf[i].xdev, - XSC_INTERFACE_PROTOCOL_IB); + xsc_add_dev_by_protocol(ldev->pf[i].xdev, XSC_INTERFACE_PROTOCOL_IB); } static void xsc_do_bond(struct xsc_lag *ldev) @@ -365,19 +377,35 @@ static void xsc_do_bond(struct xsc_lag *ldev) struct xsc_core_device *xdev0 = ldev->pf[0].xdev; struct xsc_core_device *xdev1 = ldev->pf[1].xdev; struct lag_tracker tracker; - bool do_bond; + bool do_lag; bool roce_lag; + int ret = 0; if (!xdev0 || !xdev1) return; - roce_lag = xsc_is_roce_lag_allowed(ldev); - mutex_lock(&lag_mutex); tracker = ldev->tracker; mutex_unlock(&lag_mutex); - do_bond = tracker.is_bonded && + if (tracker.is_kernel_bonded_change) { + if (tracker.is_kernel_bonded && !__xsc_bond_is_active(ldev)) { + ret = xsc_cmd_create_lag(ldev, XSC_BOND_FLAG_KERNEL); + ldev->flags |= XSC_BOND_FLAG_KERNEL; + + xsc_core_info(xdev0, "Create kernel bond, ret = %d\n", ret); + } else if (!tracker.is_kernel_bonded && __xsc_bond_is_active(ldev)) { + ret = xsc_cmd_destroy_lag(ldev, XSC_BOND_FLAG_KERNEL); + ldev->flags &= ~XSC_BOND_FLAG_KERNEL; + + xsc_core_info(xdev0, "Destroy kernel bond, ret = %d\n", ret); + } + tracker.is_kernel_bonded_change = false; + + return; + } + + do_lag = tracker.is_hw_bonded && !tracker.lag_disable && ldev->pf[0].netdev && ldev->pf[1].netdev && @@ -385,20 +413,30 @@ static void xsc_do_bond(struct xsc_lag *ldev) ldev->pf[1].netdev == tracker.ndev[1] && xsc_lag_check_prereq(ldev); - xsc_core_info(xdev0, "do_bond = %d, is_bonded = %d, lag_disable = %d, lag_check = %d\n", - do_bond, tracker.is_bonded, tracker.lag_disable, xsc_lag_check_prereq(ldev)); + roce_lag = xsc_is_roce_lag_allowed(ldev); + + if (roce_lag && + (!radix_tree_empty(&xdev0->priv_device.bdf_tree) || + !radix_tree_empty(&xdev1->priv_device.bdf_tree))) { + xsc_core_err(xdev0, "Failed to create roce lag because the ib device is open\n"); + return; + } + + xsc_core_info(xdev0, "do_lag = %d, is_hw_bonded = %d, lag_disable = %d, lag_check = %d\n", + do_lag, tracker.is_hw_bonded, tracker.lag_disable, + xsc_lag_check_prereq(ldev)); - if (do_bond && !__xsc_lag_is_active(ldev)) { + if (do_lag && !__xsc_lag_is_active(ldev)) { if (roce_lag) { xsc_lag_remove_ib_devices(ldev); - xsc_activate_lag(ldev, XSC_LAG_FLAG_ROCE); + xsc_activate_lag(ldev, (XSC_LAG_FLAG_ROCE)); xsc_add_dev_by_protocol(xdev0, XSC_INTERFACE_PROTOCOL_IB); } else { xsc_activate_lag(ldev, XSC_LAG_FLAG_SRIOV); } - } else if (do_bond && __xsc_lag_is_active(ldev)) { + } else if (do_lag && __xsc_lag_is_active(ldev)) { xsc_modify_lag(ldev); - } else if (!do_bond && __xsc_lag_is_active(ldev)) { + } else if (!do_lag && __xsc_lag_is_active(ldev)) { if (roce_lag) xsc_remove_dev_by_protocol(xdev0, XSC_INTERFACE_PROTOCOL_IB); @@ -413,13 +451,13 @@ static void xsc_do_bond_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct xsc_lag *ldev = container_of(delayed_work, struct xsc_lag, - bond_work); + bond_work); int status; status = mutex_trylock(&xsc_intf_mutex); if (!status) { /* 1 sec delay. */ - queue_delayed_work(ldev->wq, &ldev->bond_work, HZ/2); + queue_delayed_work(ldev->wq, &ldev->bond_work, HZ / 2); return; } @@ -448,7 +486,7 @@ static struct xsc_lag *xsc_lag_dev_alloc(void) } static void xsc_lag_dev_add_xdev(struct xsc_lag *ldev, - struct xsc_core_device *xdev) + struct xsc_core_device *xdev) { unsigned int fn = PCI_FUNC(xdev->pdev->devfn) % XSC_MAX_PORTS; @@ -462,7 +500,7 @@ static void xsc_lag_dev_add_xdev(struct xsc_lag *ldev, } int xsc_lag_dev_get_netdev_idx(struct xsc_lag *ldev, - struct net_device *ndev) + struct net_device *ndev) { int i; @@ -492,12 +530,12 @@ enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond) } static bool xsc_lag_eval_bonding_conds(struct xsc_lag *ldev, - struct lag_tracker *tracker, - struct net_device *upper) + struct lag_tracker *tracker, + struct net_device *upper) { int bond_status = 0, num_slaves = 0, idx; struct net_device *ndev_tmp; - bool is_bonded; + bool is_hw_bonded = false, is_kernel_bonded = false; struct xsc_core_device *xdev0 = ldev->pf[0].xdev; rcu_read_lock(); @@ -511,13 +549,13 @@ static bool xsc_lag_eval_bonding_conds(struct xsc_lag *ldev, rcu_read_unlock(); xsc_core_info(xdev0, "num_slaves = %d, bond_status = %d\n", - num_slaves, bond_status); + num_slaves, bond_status); /* None of this lagdev's netdevs are slaves of this master. */ - if (tracker->is_bonded && - (!tracker->netdev_state[0].link_up || !tracker->netdev_state[0].tx_enabled) && - (!tracker->netdev_state[1].link_up || !tracker->netdev_state[1].tx_enabled)) { - tracker->is_bonded = false; + if (tracker->is_hw_bonded && + (!tracker->netdev_state[0].link_up || !tracker->netdev_state[0].tx_enabled) && + (!tracker->netdev_state[1].link_up || !tracker->netdev_state[1].tx_enabled)) { + tracker->is_hw_bonded = false; return true; } @@ -529,16 +567,27 @@ static bool xsc_lag_eval_bonding_conds(struct xsc_lag *ldev, * of the same lag master, and only them. * Lag mode must be activebackup or hash. */ - if (!tracker->is_bonded) - is_bonded = (num_slaves == XSC_MAX_PORTS) && - (bond_status == 0x3) && - ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) || - (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)); + if (!tracker->is_kernel_bonded && !tracker->is_hw_bonded) + is_kernel_bonded = (num_slaves == XSC_MAX_PORTS) && + (bond_status == 0x3); + + if (!tracker->is_hw_bonded) + is_hw_bonded = (num_slaves == XSC_MAX_PORTS) && + (bond_status == 0x3) && + ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) || + (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)); + + xsc_core_info(xdev0, "is_hw_bonded = %d, is_kernel_bonded = %d\n", is_hw_bonded, + is_kernel_bonded); - xsc_core_info(xdev0, "is_bonded = %d\n", is_bonded); + if (tracker->is_hw_bonded != is_hw_bonded) { + tracker->is_hw_bonded = is_hw_bonded; + return true; + } - if (tracker->is_bonded != is_bonded) { - tracker->is_bonded = is_bonded; + if (tracker->is_kernel_bonded != is_kernel_bonded) { + tracker->is_kernel_bonded = is_kernel_bonded; + tracker->is_kernel_bonded_change = true; return true; } @@ -546,9 +595,9 @@ static bool xsc_lag_eval_bonding_conds(struct xsc_lag *ldev, } static bool xsc_handle_changeupper_event(struct xsc_lag *ldev, - struct lag_tracker *tracker, - struct net_device *ndev, - struct netdev_notifier_changeupper_info *info) + struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) { enum netdev_lag_tx_type tx_type = NETDEV_LAG_TX_TYPE_UNKNOWN; struct netdev_lag_upper_info *lag_upper_info; @@ -572,9 +621,9 @@ static bool xsc_handle_changeupper_event(struct xsc_lag *ldev, } static bool xsc_handle_changelowerstate_event(struct xsc_lag *ldev, - struct lag_tracker *tracker, - struct net_device *ndev, - struct netdev_notifier_changelowerstate_info *info) + struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changelowerstate_info *info) { struct netdev_lag_lower_state_info *lag_lower_info; int idx; @@ -626,15 +675,15 @@ static bool xsc_handle_changehash_event(struct xsc_lag *ldev, } static int xsc_lag_netdev_event(struct notifier_block *this, - unsigned long event, void *ptr) + unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct lag_tracker tracker; struct xsc_lag *ldev; bool changed = 0; - if ((event != NETDEV_CHANGE) && (event != NETDEV_CHANGEUPPER) && - (event != NETDEV_CHANGELOWERSTATE)) + if (event != NETDEV_CHANGE && event != NETDEV_CHANGEUPPER && + event != NETDEV_CHANGELOWERSTATE) return NOTIFY_DONE; ldev = container_of(this, struct xsc_lag, nb); @@ -643,15 +692,14 @@ static int xsc_lag_netdev_event(struct notifier_block *this, switch (event) { case NETDEV_CHANGEUPPER: changed = xsc_handle_changeupper_event(ldev, &tracker, ndev, - ptr); + ptr); break; case NETDEV_CHANGELOWERSTATE: changed = xsc_handle_changelowerstate_event(ldev, &tracker, - ndev, ptr); + ndev, ptr); break; case NETDEV_CHANGE: - changed = xsc_handle_changehash_event(ldev, &tracker, ndev, - ptr); + changed = xsc_handle_changehash_event(ldev, &tracker, ndev, ptr); break; } @@ -675,7 +723,7 @@ static void xsc_lag_fib_event_flush(struct notifier_block *nb) } bool xsc_esw_multipath_prereq(struct xsc_core_device *xdev0, - struct xsc_core_device *xdev1) + struct xsc_core_device *xdev1) { #ifdef CONFIG_XSC_ESWITCH return (xdev0->priv.eswitch->mode == XSC_ESWITCH_OFFLOADS && @@ -683,7 +731,6 @@ bool xsc_esw_multipath_prereq(struct xsc_core_device *xdev0, #else return false; #endif - } bool xsc_lag_multipath_check_prereq(struct xsc_lag *ldev) @@ -734,8 +781,8 @@ static void xsc_lag_set_port_affinity(struct xsc_lag *ldev, int port) } static void xsc_lag_fib_route_event(struct xsc_lag *ldev, - unsigned long event, - struct fib_info *fi) + unsigned long event, + struct fib_info *fi) { struct lag_mp *mp = &ldev->lag_mp; struct fib_nh *fib_nh0, *fib_nh1; @@ -749,7 +796,7 @@ static void xsc_lag_fib_route_event(struct xsc_lag *ldev, nhs = fi->fib_nhs; #endif /* Handle delete event */ - if ((event == FIB_EVENT_ENTRY_DEL) && __xsc_lag_is_active(ldev) && (nhs == 2)) { + if (event == FIB_EVENT_ENTRY_DEL && __xsc_lag_is_active(ldev) && nhs == 2) { /* stop track */ if (mp->mfi == fi) mp->mfi = NULL; @@ -761,7 +808,7 @@ static void xsc_lag_fib_route_event(struct xsc_lag *ldev, xsc_core_info(ldev->pf[0].xdev, "nhs=%d\n", nhs); #endif - if (nhs == 1 && (event != FIB_EVENT_ENTRY_DEL)) { + if (nhs == 1 && event != FIB_EVENT_ENTRY_DEL) { if (__xsc_lag_is_active(ldev)) { #ifdef HAVE_FIB_INFO_NH struct net_device *nh_dev = fib_info_nh(fi, 0)->fib_nh_dev; @@ -791,17 +838,17 @@ static void xsc_lag_fib_route_event(struct xsc_lag *ldev, #ifdef HAVE_FIB_NH_DEV if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev && - fib_nh1->fib_nh_dev == ldev->pf[1].netdev) && - !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev && - fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) { + fib_nh1->fib_nh_dev == ldev->pf[1].netdev) && + !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev && + fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) { #else if (!(fib_nh0->nh_dev == ldev->pf[0].netdev && - fib_nh1->nh_dev == ldev->pf[1].netdev) && - !(fib_nh0->nh_dev == ldev->pf[1].netdev && - fib_nh1->nh_dev == ldev->pf[0].netdev)) { + fib_nh1->nh_dev == ldev->pf[1].netdev) && + !(fib_nh0->nh_dev == ldev->pf[1].netdev && + fib_nh1->nh_dev == ldev->pf[0].netdev)) { #endif xsc_core_err(ldev->pf[0].xdev, - "Multipath offload require two ports of the same HCA\n"); + "Multipath offload require two ports of the same HCA\n"); return; } @@ -817,7 +864,7 @@ static void xsc_lag_fib_route_event(struct xsc_lag *ldev, #endif if (!neigh0 || !neigh1) { xsc_core_err(ldev->pf[0].xdev, - "Multipath offload require two ports with valid neighbor\n"); + "Multipath offload require two ports with valid neighbor\n"); return; } @@ -830,7 +877,7 @@ static void xsc_lag_fib_route_event(struct xsc_lag *ldev, neigh_release(neigh0); neigh_release(neigh1); xsc_core_err(ldev->pf[0].xdev, - "Multipath offload require two ports with valid gw\n"); + "Multipath offload require two ports with valid gw\n"); return; } } @@ -844,7 +891,7 @@ static void xsc_lag_fib_route_event(struct xsc_lag *ldev, neigh_release(neigh0); neigh_release(neigh1); xsc_core_err(ldev->pf[0].xdev, - "Multipath offload require two ports with valid gw\n"); + "Multipath offload require two ports with valid gw\n"); return; } } @@ -860,9 +907,9 @@ static void xsc_lag_fib_route_event(struct xsc_lag *ldev, } static void xsc_lag_fib_nexthop_event(struct xsc_lag *ldev, - unsigned long event, - struct fib_nh *fib_nh, - struct fib_info *fi) + unsigned long event, + struct fib_nh *fib_nh, + struct fib_info *fi) { struct lag_mp *mp = &ldev->lag_mp; @@ -902,20 +949,24 @@ static void xsc_lag_fib_update(struct work_struct *work) rtnl_lock(); switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: + fallthrough; case FIB_EVENT_ENTRY_APPEND: + fallthrough; case FIB_EVENT_ENTRY_ADD: + fallthrough; case FIB_EVENT_ENTRY_DEL: xsc_lag_fib_route_event(ldev, fib_work->event, - fib_work->fen_info.fi); + fib_work->fen_info.fi); fib_info_put(fib_work->fen_info.fi); break; case FIB_EVENT_NH_ADD: + fallthrough; case FIB_EVENT_NH_DEL: fib_nh = fib_work->fnh_info.fib_nh; xsc_lag_fib_nexthop_event(ldev, - fib_work->event, - fib_work->fnh_info.fib_nh, - fib_nh->nh_parent); + fib_work->event, + fib_work->fnh_info.fib_nh, + fib_nh->nh_parent); fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); break; } @@ -939,8 +990,8 @@ struct xsc_fib_event_work *xsc_lag_init_fib_work(struct xsc_lag *ldev, unsigned } int xsc_lag_fib_event(struct notifier_block *nb, - unsigned long event, - void *ptr) + unsigned long event, + void *ptr) { struct lag_mp *mp = container_of(nb, struct lag_mp, fib_nb); struct xsc_lag *ldev = container_of(mp, struct xsc_lag, lag_mp); @@ -963,8 +1014,11 @@ int xsc_lag_fib_event(struct notifier_block *nb, switch (event) { case FIB_EVENT_ENTRY_REPLACE: + fallthrough; case FIB_EVENT_ENTRY_APPEND: + fallthrough; case FIB_EVENT_ENTRY_ADD: + fallthrough; case FIB_EVENT_ENTRY_DEL: fen_info = container_of(info, struct fib_entry_notifier_info, info); fi = fen_info->fi; @@ -989,6 +1043,7 @@ int xsc_lag_fib_event(struct notifier_block *nb, fib_info_hold(fib_work->fen_info.fi); break; case FIB_EVENT_NH_ADD: + fallthrough; case FIB_EVENT_NH_DEL: fnh_info = container_of(info, struct fib_nh_notifier_info, info); @@ -1021,7 +1076,7 @@ int xsc_lag_mp_init(struct xsc_lag *ldev) mp->fib_nb.notifier_call = xsc_lag_fib_event; err = register_fib_notifier(&init_net, &mp->fib_nb, - xsc_lag_fib_event_flush, NULL); + xsc_lag_fib_event_flush, NULL); if (err) { destroy_workqueue(mp->wq); mp->fib_nb.notifier_call = NULL; @@ -1061,7 +1116,8 @@ int __xsc_lag_add_xdev(struct xsc_core_device *xdev) if (!ldev->nb.notifier_call) { ldev->nb.notifier_call = xsc_lag_netdev_event; - if (register_netdevice_notifier(&ldev->nb)) { + err = register_netdevice_notifier(&ldev->nb); + if (err) { ldev->nb.notifier_call = NULL; xsc_core_err(xdev, "Failed to register LAG netdev notifier\n"); } @@ -1134,8 +1190,9 @@ void xsc_lag_update_trackers(struct xsc_lag *ldev) tx_type = NETDEV_LAG_TX_TYPE_HASH; tracker->hash_type = bond_lag_hash_type(bond); tracker->old_hash_type = tracker->hash_type; - } else if (BOND_MODE(bond) == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) + } else if (BOND_MODE(bond) == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { tx_type = NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; + } upper = bond->dev; } @@ -1224,7 +1281,6 @@ void xsc_lag_remove(struct xsc_core_device *xdev) xsc_lag_dev_remove_pf(ldev, xdev); xsc_lag_dev_put(ldev); - } EXPORT_SYMBOL(xsc_lag_remove); @@ -1274,8 +1330,6 @@ void xsc_lag_disable(struct xsc_core_device *xdev) if (!ldev) goto unlock; - xsc_ldev_get(ldev); - if (!__xsc_lag_is_active(ldev)) goto unlock; @@ -1313,4 +1367,3 @@ void xsc_lag_enable(struct xsc_core_device *xdev) unlock: mutex_unlock(&xsc_intf_mutex); } - diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c index 565abdea4b459cc2aeb2cc347f4bb4ed8cb7c8ac..6a8a80a1558154f474814f79c59c5155d153add3 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ @@ -8,15 +7,21 @@ #include #include #include -#include -#include -#include -#include -#include - -#define FEATURE_ONCHIP_FT_MASK (1<<4) -#define FEATURE_DMA_RW_TBL_MASK (1<<8) -#define FEATURE_PCT_EXP_MASK (1<<9) +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" +#include +#include +#include +#include +#include "xsc_pci_ctrl.h" +#include "common/res_obj.h" + +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(19) #define XSC_PCI_CTRL_NAME "pci_ctrl" @@ -33,7 +38,7 @@ static int xsc_pci_ctrl_modify_qp(struct xsc_core_device *xdev, void *in, void * tl = (struct xsc_ioctl_data_tl *)out; resp = (struct xsc_ioctl_qp_range *)(tl + 1); xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", - resp->qpn, resp->num, resp->opcode); + resp->qpn, resp->num, resp->opcode); if (resp->num == 0) { xsc_core_dbg(xdev, "xsc_ioctl_qp_range: resp->num ==0\n"); return 0; @@ -53,14 +58,32 @@ static int xsc_pci_ctrl_modify_qp(struct xsc_core_device *xdev, void *in, void * xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); } } - kvfree(mailin); return ret; } +static struct pci_dev *xsc_pci_get_pcidev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + return pci_get_domain_bus_and_slot(domain, bus, devfn); +} + +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + struct pci_dev *pdev = NULL; + struct xsc_core_device *xdev = NULL; + + pdev = xsc_pci_get_pcidev_by_bus_and_slot(domain, bus, devfn); + if (!pdev) + return NULL; + + xdev = pci_get_drvdata(pdev); + + return xdev; +} + static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, - void *in, void *out) + void *in, void *out) { int ret = 0; struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; @@ -69,6 +92,7 @@ static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, struct xsc_vf_info vf_info; struct xsc_lag *ldev = xsc_lag_dev_get(xdev); u16 lag_id = U16_MAX; + struct xsc_core_device *rl_xdev; if (ldev && __xsc_lag_is_active(ldev)) lag_id = ldev->lag_id; @@ -84,6 +108,7 @@ static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, resp->mac_logic_in_port = xdev->mac_logic_port; resp->lag_id = lag_id; resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; resp->lag_port_start = XSC_LAG_PORT_START; resp->send_seg_num = xdev->caps.send_ds_num; resp->recv_seg_num = xdev->caps.recv_ds_num; @@ -96,18 +121,18 @@ static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, resp->pct_compress_vld = (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; - xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", resp->phy_port, - resp->func_id, resp->logic_in_port, - resp->mac_phy_port, resp->mac_logic_in_port, - resp->lag_id); - resp->funcid_encode[0] = XSC_PCIE0_VF0_FUNC_ID; - resp->funcid_encode[1] = XSC_PCIE0_VF_FUNC_ID_END; - resp->funcid_encode[2] = XSC_PCIE0_PF0_FUNC_ID; - resp->funcid_encode[3] = XSC_PCIE0_PF_FUNC_ID_END; - resp->funcid_encode[4] = XSC_PCIE1_VF0_FUNC_ID; - resp->funcid_encode[5] = XSC_PCIE1_VF_FUNC_ID_END; - resp->funcid_encode[6] = XSC_PCIE1_PF0_FUNC_ID; - resp->funcid_encode[7] = XSC_PCIE1_PF_FUNC_ID_END; + xsc_core_dbg(xdev, "BY_LOCAL:%d,%d,%d,%d,%d,%d\n", resp->phy_port, + resp->func_id, resp->logic_in_port, + resp->mac_phy_port, resp->mac_logic_in_port, + resp->lag_id); + resp->funcid[0] = xdev->caps.funcid[0]; + resp->funcid[1] = xdev->caps.funcid[1]; + resp->funcid[2] = xdev->caps.funcid[2]; + resp->funcid[3] = xdev->caps.funcid[3]; + resp->funcid[4] = xdev->caps.funcid[4]; + resp->funcid[5] = xdev->caps.funcid[5]; + resp->funcid[6] = xdev->caps.funcid[6]; + resp->funcid[7] = xdev->caps.funcid[7]; break; case XSC_IOCTL_OP_GET_VF_INFO: @@ -120,6 +145,50 @@ static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, vf_res->logic_port = vf_info.logic_port; break; + case XSC_IOCTL_OP_GET_INFO_BY_BDF: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + xsc_core_dbg(xdev, "ioctrl get_pcidev. domain=%u, bus=%u, devfn=%u\n", + resp->domain, resp->bus, resp->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(resp->domain, resp->bus, resp->devfn); + if (!rl_xdev) + return -1; + + resp->phy_port = rl_xdev->pcie_port; + resp->func_id = rl_xdev->glb_func_id; + resp->logic_in_port = rl_xdev->logic_port; + resp->mac_phy_port = rl_xdev->mac_port; + resp->mac_logic_in_port = rl_xdev->mac_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = rl_xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = XSC_LAG_PORT_START; + resp->send_seg_num = rl_xdev->caps.send_ds_num; + resp->recv_seg_num = rl_xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = rl_xdev->caps.raw_tpe_qp_num; + resp->chip_version = rl_xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (rl_xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (rl_xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (rl_xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "BY_BDF:%d,%d,%d,%d,%d,%d\n", resp->phy_port, + resp->func_id, resp->logic_in_port, + resp->mac_phy_port, resp->mac_logic_in_port, + resp->lag_id); + resp->funcid[0] = xdev->caps.funcid[0]; + resp->funcid[1] = xdev->caps.funcid[1]; + resp->funcid[2] = xdev->caps.funcid[2]; + resp->funcid[3] = xdev->caps.funcid[3]; + resp->funcid[4] = xdev->caps.funcid[4]; + resp->funcid[5] = xdev->caps.funcid[5]; + resp->funcid[6] = xdev->caps.funcid[6]; + resp->funcid[7] = xdev->caps.funcid[7]; + break; + default: ret = -EINVAL; break; @@ -129,40 +198,45 @@ static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, } static int xsc_pci_ctrl_get_contextinfo(struct xsc_core_device *xdev, - void *in, void *out) + void *in, void *out) { int ret = 0; struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_alloc_ucontext_req *req; struct xsc_alloc_ucontext_resp *resp; + struct xsc_core_device *rl_xdev = NULL; if (tl->opmod != XSC_IOCTL_OP_GET_CONTEXT) - ret = -EINVAL; + return -EINVAL; + + req = (struct xsc_alloc_ucontext_req *)(tl + 1); + xsc_core_dbg(xdev, "xsc_tdi_alloc_context req:\n"); + xsc_core_dbg(xdev, "req->domain=%u\n", req->domain); + xsc_core_dbg(xdev, "req->bus=%u\n", req->bus); + xsc_core_dbg(xdev, "req->devfn=%u\n", req->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(req->domain, req->bus, req->devfn); + if (!rl_xdev) + return -1; resp = (struct xsc_alloc_ucontext_resp *)(tl + 1); - // resp->qp_tab_size = 1 << xdev->caps.log_max_qp; - // resp->cache_line_size = L1_CACHE_BYTES; - // resp->max_sq_desc_sz = xdev->caps.max_sq_desc_sz; - // resp->max_rq_desc_sz = xdev->caps.max_rq_desc_sz; - // resp->max_send_wqebb = xdev->caps.max_wqes; - // resp->max_recv_wr = xdev->caps.max_wqes; - - resp->max_cq = 1 << xdev->caps.log_max_cq; - resp->max_qp = 1 << xdev->caps.log_max_qp; - resp->max_rwq_indirection_table_size = xdev->caps.max_rwq_indirection_table_size; - resp->qpm_tx_db = xdev->regs.tx_db; - resp->qpm_rx_db = xdev->regs.rx_db; - resp->cqm_next_cid_reg = xdev->regs.complete_reg; - resp->cqm_armdb = xdev->regs.complete_db; - resp->send_ds_num = xdev->caps.send_ds_num; - resp->recv_ds_num = xdev->caps.recv_ds_num; - resp->send_ds_shift = xdev->caps.send_wqe_shift; - resp->recv_ds_shift = xdev->caps.recv_wqe_shift; - resp->glb_func_id = xdev->glb_func_id; - - resp->max_wqes = xdev->caps.max_wqes; - - xsc_core_dbg(xdev, "xsc_tdi_alloc_context:\n"); + resp->max_cq = 1 << rl_xdev->caps.log_max_cq; + resp->max_qp = 1 << rl_xdev->caps.log_max_qp; + resp->max_rwq_indirection_table_size = rl_xdev->caps.max_rwq_indirection_table_size; + resp->qpm_tx_db = rl_xdev->regs.tx_db; + resp->qpm_rx_db = rl_xdev->regs.rx_db; + resp->cqm_next_cid_reg = rl_xdev->regs.complete_reg; + resp->cqm_armdb = rl_xdev->regs.complete_db; + resp->send_ds_num = rl_xdev->caps.send_ds_num; + resp->recv_ds_num = rl_xdev->caps.recv_ds_num; + resp->send_ds_shift = rl_xdev->caps.send_wqe_shift; + resp->recv_ds_shift = rl_xdev->caps.recv_wqe_shift; + resp->glb_func_id = rl_xdev->glb_func_id; + + resp->max_wqes = rl_xdev->caps.max_wqes; + + xsc_core_dbg(xdev, "xsc_tdi_alloc_context resp:\n"); xsc_core_dbg(xdev, "resp->max_cq=%u\n", resp->max_cq); xsc_core_dbg(xdev, "resp->max_qp=%u\n", resp->max_qp); xsc_core_dbg(xdev, "resp->qpm_tx_db=%llx\n", resp->qpm_tx_db); @@ -178,8 +252,63 @@ static int xsc_pci_ctrl_get_contextinfo(struct xsc_core_device *xdev, return ret; } +int noop_pre(struct kprobe *p, struct pt_regs *regs) { return 0; } + +static struct kprobe kp = { + .symbol_name = "kallsyms_lookup_name", +}; + +unsigned long (*kallsyms_lookup_name_func)(const char *name) = NULL; + +//调用kprobe找到kallsyms_lookup_name的地址位置 +int find_kallsyms_lookup_name(void) +{ + int ret = -1; + + kp.addr = 0; + kp.pre_handler = noop_pre; + ret = register_kprobe(&kp); + if (ret < 0) + return ret; + + kallsyms_lookup_name_func = (void *)kp.addr; + unregister_kprobe(&kp); + return ret; +} + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev) +{ + struct db_irq_matrix *m; + unsigned long addr; + char *name = "vector_matrix"; + int ret; + + ret = find_kallsyms_lookup_name(); + if (ret < 0) { + xsc_core_err(dev, "find kallsyms_lookup_name failed\n"); + return 0xffff; + } + + addr = kallsyms_lookup_name_func(name); + xsc_core_dbg(dev, "vector_matrix addr=0x%lx\n", addr); + if (addr == 0) { + xsc_core_err(dev, "not support, arch maybe not X86?\n"); + /* 返回0xffff,做到在不知道cpu vector剩余多少可用的情况 + * 下不影响fw用该值判断能否分配中断 + */ + return 0xffff; + } + m = (struct db_irq_matrix *)(*(long *)addr); + if (!m) { + xsc_core_err(dev, "vector_matrix is NULL\n"); + return 0xffff; + } + xsc_core_info(dev, "vector_matrix global_available=%u\n", m->global_available); + return m->global_available; +} + int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, - int out_size) + int out_size) { int opcode, ret = 0; struct xsc_ioctl_attr *hdr; @@ -205,13 +334,73 @@ int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, return ret; } +static long xsc_pci_ctrl_setinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + struct xsc_set_debug_info_mbox_in in; + struct xsc_set_debug_info_mbox_out out; + struct xsc_ioctl_set_debug_info info; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(xdev, "copy user_hdr from user failed, err = %d\n", err); + return -EFAULT; + } + + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(xdev, "incorrect check field, check field=%#x\n", hdr.check_filed); + return -EFAULT; + } + + if (hdr.attr.length != sizeof(info)) { + xsc_core_err(xdev, "unexpected length, length=%d\n", hdr.attr.length); + return -EFAULT; + } + + err = copy_from_user(&info, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(xdev, "copy attr.data from user failed, err = %d\n", err); + return -EFAULT; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_DEBUG_INFO); + switch (hdr.attr.opcode) { + case XSC_IOCTL_SET_LOG_LEVEL: + in.set_field = 0; + in.log_level = info.log_level; + break; + case XSC_IOCTL_SET_CMD_VERBOSE: + in.set_field = 1; + in.cmd_verbose = info.cmd_verbose; + break; + default: + xsc_core_err(xdev, "invalid opcode %d\n", hdr.attr.opcode); + return -EINVAL; + } + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to set debug info to fw, err = %d, status = %d\n", + err, out.hdr.status); + return -EFAULT; + } + + return 0; +} + static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr) + struct xsc_ioctl_hdr __user *user_hdr) { struct xsc_ioctl_hdr hdr; struct xsc_ioctl_hdr *in; int in_size; int err; + u16 global_available; + u16 totalvfs; err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); if (err) @@ -222,9 +411,10 @@ static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, case XSC_IOCTL_GET_PHY_INFO: case XSC_IOCTL_SET_QP_STATUS: case XSC_IOCTL_GET_CONTEXT: + case XSC_IOCTL_GET_VECTOR_MATRIX: break; default: - return -EINVAL; + return TRY_NEXT_CB; } in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; in = kvzalloc(in_size, GFP_KERNEL); @@ -232,22 +422,103 @@ static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, return -EFAULT; in->attr.opcode = hdr.attr.opcode; in->attr.length = hdr.attr.length; + + if (hdr.attr.opcode == XSC_IOCTL_GET_VECTOR_MATRIX) { + global_available = xsc_get_irq_matrix_global_available(xdev); + totalvfs = (pci_sriov_get_totalvfs(xdev->pdev) < 0) ? 0 : + pci_sriov_get_totalvfs(xdev->pdev); + in->attr.error = err; + memcpy(in->attr.data, (void *)&global_available, sizeof(u16)); + memcpy(in->attr.data + sizeof(u16), (void *)&totalvfs, sizeof(u16)); + goto next; + } + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); if (err) { kvfree(in); return -EFAULT; } - err = xsc_pci_ctrl_exec_ioctl(xdev, &in->attr, (in_size-sizeof(u32)), in->attr.data, - hdr.attr.length); + err = xsc_pci_ctrl_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, hdr.attr.length); in->attr.error = err; +next: if (copy_to_user((void *)user_hdr, in, in_size)) err = -EFAULT; kvfree(in); return err; } -static int xsc_ioctl_flow_cmdq(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) { struct xsc_ioctl_mbox_in *in; struct xsc_ioctl_mbox_out *out; @@ -268,6 +539,12 @@ static int xsc_ioctl_flow_cmdq(struct xsc_core_device *xdev, return -EFAULT; } + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; out = kvzalloc(out_size, GFP_KERNEL); if (!out) { @@ -276,7 +553,7 @@ static int xsc_ioctl_flow_cmdq(struct xsc_core_device *xdev, } memcpy(out->data, in->data, hdr->attr.length); out->len = in->len; - err = xsc_cmd_exec(xdev, in, in_size, out, out_size); + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); hdr->attr.error = __be32_to_cpu(out->error); if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) @@ -289,8 +566,65 @@ static int xsc_ioctl_flow_cmdq(struct xsc_core_device *xdev, return err; } +static int xsc_ioctl_emu_cmd(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_emu_hdr *emu_hdr; + u8 *buffer; + int in_size; + int out_size; + int err; + + buffer = kvzalloc(hdr->attr.length, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = copy_from_user(buffer, user_hdr->attr.data, hdr->attr.length); + if (err) + goto err_copy_user_data; + + emu_hdr = (struct xsc_ioctl_emu_hdr *)buffer; + in_size = emu_hdr->in_length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_alloc_in_mem; + } + memcpy(in, emu_hdr->data, emu_hdr->in_length); + + out_size = emu_hdr->out_length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto err_alloc_out_mem; + } + + err = xsc_cmd_exec(xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data + sizeof(struct xsc_ioctl_emu_hdr), + out, out_size)) + err = -EFAULT; + + kvfree(out); + kvfree(in); + kvfree(buffer); + return err; + +err_alloc_out_mem: + kvfree(in); +err_alloc_in_mem: +err_copy_user_data: + kvfree(buffer); + return err; +} + static int xsc_ioctl_modify_raw_qp(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) { struct xsc_modify_raw_qp_mbox_in *in; struct xsc_modify_raw_qp_mbox_out *out; @@ -307,15 +641,15 @@ static int xsc_ioctl_modify_raw_qp(struct xsc_core_device *xdev, goto err_out; err = copy_from_user(&in->req, user_hdr->attr.data, - sizeof(struct xsc_modify_raw_qp_request)); + sizeof(struct xsc_modify_raw_qp_request)); if (err) goto err; in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - in->pcie_no = xsc_get_pcie_no(); + in->pcie_no = g_xsc_pcie_no; err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), - out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); hdr->attr.error = __be32_to_cpu(out->hdr.status); @@ -334,17 +668,59 @@ static int xsc_ioctl_modify_raw_qp(struct xsc_core_device *xdev, return -EFAULT; } -static long xsc_pci_ctrl_cmdq(struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr) +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out, int opcode) { + unsigned int idx; + + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + default: + break; + } +} + +static long xsc_pci_ctrl_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_core_device *xdev = file->xdev; struct xsc_ioctl_hdr hdr; int err; - void *in; - void *out; err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); if (err) - return -EFAULT; + return -EINVAL; /* check valid */ if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) @@ -353,33 +729,39 @@ static long xsc_pci_ctrl_cmdq(struct xsc_core_device *xdev, /* check ioctl cmd */ switch (hdr.attr.opcode) { case XSC_CMD_OP_IOCTL_FLOW: - return xsc_ioctl_flow_cmdq(xdev, user_hdr, &hdr); + return xsc_ioctl_flow_cmdq(file, user_hdr, &hdr); case XSC_CMD_OP_MODIFY_RAW_QP: return xsc_ioctl_modify_raw_qp(xdev, user_hdr, &hdr); - case XSC_CMD_OP_CREATE_QP: - break; - case XSC_CMD_OP_DESTROY_QP: - break; - case XSC_CMD_OP_2RST_QP: - break; - case XSC_CMD_OP_CREATE_CQ: - break; - case XSC_CMD_OP_DESTROY_CQ: - break; - case XSC_CMD_OP_CREATE_MULTI_QP: - break; - case XSC_CMD_OP_ALLOC_MULTI_VIRTQ_CQ: - break; - case XSC_CMD_OP_RELEASE_MULTI_VIRTQ_CQ: - break; - case XSC_CMD_OP_ALLOC_MULTI_VIRTQ: - break; - case XSC_CMD_OP_RELEASE_MULTI_VIRTQ: + case XSC_CMD_OP_USER_EMU_CMD: + return xsc_ioctl_emu_cmd(xdev, user_hdr, &hdr); + default: + err = TRY_NEXT_CB; break; + } - default: + return err; +} + +static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + int op; + struct xsc_core_device *dev = file->xdev; + struct xsc_create_mkey_mbox_out *resp; + struct xsc_unregister_mr_mbox_in *req; + u8 key; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) return -EINVAL; - } in = kvzalloc(hdr.attr.length, GFP_KERNEL); if (!in) @@ -396,7 +778,50 @@ static long xsc_pci_ctrl_cmdq(struct xsc_core_device *xdev, goto err_exit; } - xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + op = be16_to_cpu(((struct xsc_inbox_hdr *)in)->opcode); + switch (op) { + case XSC_CMD_OP_CREATE_MKEY: + spin_lock(&dev->dev_res->mkey_lock); + key = 0x80 + dev->dev_res->mkey_key++; + spin_unlock(&dev->dev_res->mkey_lock); +#ifdef REG_MR_VIA_CMDQ + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); +#else + err = xsc_create_mkey(dev, in, out); +#endif + resp = (struct xsc_create_mkey_mbox_out *)out; + resp->mkey = xsc_idx_to_mkey(be32_to_cpu(resp->mkey) & 0xffffff) | key; + resp->mkey = cpu_to_be32(resp->mkey); + break; + +#ifndef REG_MR_VIA_CMDQ + case XSC_CMD_OP_DESTROY_MKEY: + err = xsc_destroy_mkey(dev, in, out); + break; + + case XSC_CMD_OP_REG_MR: + err = xsc_reg_mr(dev, in, out); + break; +#endif + + case XSC_CMD_OP_DEREG_MR: + req = (struct xsc_unregister_mr_mbox_in *)in; + req->mkey = be32_to_cpu(req->mkey); + req->mkey = cpu_to_be32(xsc_mkey_to_idx(req->mkey)); +#ifdef REG_MR_VIA_CMDQ + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); +#else + err = xsc_dereg_mr(dev, in, out); +#endif + break; + default: + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); + break; + } + xsc_pci_ctrl_cmdq_handle_res_obj(file, in, hdr.attr.length, out, hdr.attr.opcode); +/* if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + * err = -EFAULT; + */ if (copy_to_user((void *)user_hdr->attr.data, out, hdr.attr.length)) err = -EFAULT; err_exit: @@ -405,23 +830,30 @@ static long xsc_pci_ctrl_cmdq(struct xsc_core_device *xdev, return err; } -static void xsc_pci_ctrl_reg_cb(struct xsc_core_device *xdev, unsigned int cmd, - struct xsc_ioctl_hdr __user *user_hdr, void *data) +static int xsc_pci_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) { int err; switch (cmd) { case XSC_IOCTL_CMDQ: - err = xsc_pci_ctrl_cmdq(xdev, user_hdr); + err = xsc_pci_ctrl_cmdq(file, user_hdr); break; case XSC_IOCTL_DRV_GET: + err = xsc_pci_ctrl_getinfo(file->xdev, user_hdr); + break; case XSC_IOCTL_DRV_SET: - err = xsc_pci_ctrl_getinfo(xdev, user_hdr); + err = xsc_pci_ctrl_setinfo(file->xdev, user_hdr); + break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_pci_ctrl_cmdq_raw(file, user_hdr); break; default: - err = -EFAULT; + err = TRY_NEXT_CB; break; } + + return err; } void xsc_pci_ctrl_fini(void) @@ -439,3 +871,4 @@ int xsc_pci_ctrl_init(void) return ret; } + diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h index 473f75cff7e64abba58459d4cb68004438af2262..c57caed380b7f014af53607f66f1f71004ace9c2 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h @@ -1,14 +1,51 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ #ifndef XSC_PCI_CTRL_H #define XSC_PCI_CTRL_H -void xsc_pci_ctrl_fini(void); -int xsc_pci_ctrl_init(void); +#include +#include +#include +//for x86 +#ifndef NR_VECTORS +#define NR_VECTORS 256 #endif +#define IRQ_MATRIX_BITS NR_VECTORS +#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) + +struct db_cpumap { + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int managed_allocated; + u8 initialized; + u8 online; + unsigned long alloc_map[IRQ_MATRIX_SIZE]; + unsigned long managed_map[IRQ_MATRIX_SIZE]; +}; + +struct db_irq_matrix { + unsigned int matrix_bits; + unsigned int alloc_start; + unsigned int alloc_end; + unsigned int alloc_size; + unsigned int global_available; + unsigned int global_reserved; + unsigned int systembits_inalloc; + unsigned int total_allocated; + unsigned int online_maps; + struct db_cpumap __percpu *maps; + unsigned long scratch_map[IRQ_MATRIX_SIZE]; + unsigned long system_map[IRQ_MATRIX_SIZE]; +}; + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev); +int xsc_pci_ctrl_init(void); +void xsc_pci_ctrl_fini(void); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c index 8f4924b7004ce50977f2851308ad2e76da3a3dcc..d50e854461e0ac072231ecf4d267284ba8dd6b21 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c @@ -1,9 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ - #include #include #include @@ -11,10 +9,10 @@ #include #include #include -#include -#include -#include - +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/xsc_port_ctrl.h" +#include "common/res_obj.h" #include "fw/xsc_tbm.h" #define XSC_PORT_CTRL_MAX 256 @@ -33,14 +31,67 @@ static dev_t g_port_ctrl_root_dev; static struct class *g_port_ctrl_class; static int g_port_ctrl_dev_cnt; static struct list_head g_port_ctrl_cbs = LIST_HEAD_INIT(g_port_ctrl_cbs); -struct mutex g_port_ctrl_cbs_lock; +struct mutex g_port_ctrl_cbs_lock; /* protect port ctrl node list */ static int _port_ctrl_open(struct inode *inode, struct file *filp) { - struct xsc_port_ctrl *ctrl - = container_of(inode->i_cdev, struct xsc_port_ctrl, cdev); + struct xsc_port_ctrl *ctrl = container_of(inode->i_cdev, struct xsc_port_ctrl, cdev); + struct xsc_port_ctrl_file *file; + + file = kzalloc(sizeof(*file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&file->bdf_tree, GFP_ATOMIC); + spin_lock_init(&file->bdf_lock); + file->ctrl = ctrl; + + file->root_bdf = kzalloc(sizeof(*file->root_bdf), GFP_KERNEL); + if (!file->root_bdf) { + kfree(file); + return -ENOMEM; + } + INIT_RADIX_TREE(&file->root_bdf->obj_tree, GFP_ATOMIC); + spin_lock_init(&file->root_bdf->obj_lock); + file->root_bdf->xdev = container_of(ctrl, struct xsc_core_device, port_ctrl); + + spin_lock(&ctrl->file_lock); + list_add_tail(&file->file_node, &ctrl->file_list); + spin_unlock(&ctrl->file_lock); + filp->private_data = file; - filp->private_data = ctrl; + xsc_core_info(file->root_bdf->xdev, "process %d open port ctrl file\n", current->pid); + + return 0; +} + +static void xsc_release_port_ctrl_file(struct xsc_port_ctrl_file *file) +{ + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + + xsc_close_bdf_file(file->root_bdf); + kfree(file->root_bdf); + spin_lock(&file->bdf_lock); + radix_tree_for_each_slot(slot, &file->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&file->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&file->bdf_lock); +} + +static int _port_ctrl_release(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl_file *file = filp->private_data; + + xsc_release_port_ctrl_file(file); + spin_lock(&file->ctrl->file_lock); + list_del(&file->file_node); + spin_unlock(&file->ctrl->file_lock); + kfree(file); return 0; } @@ -64,22 +115,63 @@ static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) unsigned long start = (unsigned long)vma->vm_start; unsigned long size = (unsigned long)(vma->vm_end - vma->vm_start); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - struct xsc_port_ctrl *ctrl; + u64 addr; + u32 db_type; + u32 domain = 0; + u32 bus; + u32 devfn; + struct xsc_port_ctrl_file *file; struct xsc_core_device *xdev; + struct xsc_core_device *rl_xdev; + u32 bdf; + + file = filp->private_data; + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + + xsc_core_dbg(xdev, "_port_ctrl_map:offset=%lx\n", offset); + + bdf = offset >> 32; + db_type = bdf & 0x0000000f; + devfn = (bdf >> 4) & 0x000000ff; + bus = (bdf >> 12) & 0x000000ff; + + xsc_core_dbg(xdev, "bus=%u,devfn=%u,db_type=%u\n", bus, devfn, db_type); + + if (bdf != 0) { + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(domain, bus, devfn); + if (!rl_xdev) + return -1; + + if (db_type == XSC_MMAP_MSG_SQDB) { + addr = rl_xdev->regs.tx_db; + } else if (db_type == XSC_MMAP_MSG_RQDB) { + addr = rl_xdev->regs.rx_db; + } else if (db_type == XSC_MMAP_MSG_CQDB) { + addr = rl_xdev->regs.complete_db; + } else if (db_type == XSC_MMAP_MSG_ARM_CQDB) { + addr = rl_xdev->regs.complete_reg; + } else { + pr_err("[%s:%d] mmap err\n", __func__, __LINE__); + return -1; + } + } else { + rl_xdev = xdev; + if (is_db_ofst(xdev, offset)) + addr = offset; + else + return -EINVAL; + } - ctrl = filp->private_data; - xdev = container_of(ctrl, struct xsc_core_device, port_ctrl); + xsc_core_dbg(xdev, "tx_db=%llx,rx_db=%llx,cq_db=%llx,cq_reg=%llx\n", + rl_xdev->regs.tx_db, rl_xdev->regs.rx_db, + rl_xdev->regs.complete_db, rl_xdev->regs.complete_reg); - if (is_db_ofst(xdev, offset)) - reg_base = (pci_resource_start(xdev->pdev, xdev->bar_num) + (offset & PAGE_MASK)); - else - return -EINVAL; + reg_base = (pci_resource_start(rl_xdev->pdev, rl_xdev->bar_num) + (addr & PAGE_MASK)); -#if (CHIP_VERSION_H == 0x100) && (CHIP_HOTFIX_NUM >= 0x17) - reg_base = xsc_core_is_pf(xdev) ? reg_base - 0xA0000000 : reg_base; -#else - reg_base = reg_base - 0xA0000000; -#endif + if (xdev->chip_ver_h == 0x100) + reg_base = xsc_core_is_pf(rl_xdev) ? reg_base - 0xA0000000 : reg_base; + else + reg_base = reg_base - 0xA0000000; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, start, (reg_base >> PAGE_SHIFT), size, vma->vm_page_prot)) { @@ -90,23 +182,81 @@ static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) return 0; } +static inline struct xsc_bdf_file *get_bdf_file(struct xsc_port_ctrl_file *file, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_core_device *xdev; + struct xsc_bdf_file *bdf_file; + struct xsc_core_device *rl_xdev; + unsigned long key; + + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + xsc_core_dbg(xdev, "domain=%x, bus=%x, devfn=%x\n", hdr->domain, hdr->bus, hdr->devfn); + if ((hdr->domain == 0 && hdr->bus == 0 && hdr->devfn == 0) || + (hdr->domain == pci_domain_nr(xdev->pdev->bus) && + hdr->bus == xdev->pdev->bus->number && + hdr->devfn == xdev->pdev->devfn)) + return file->root_bdf; + + key = bdf_to_key(hdr->domain, hdr->bus, hdr->devfn); + spin_lock(&file->bdf_lock); + bdf_file = radix_tree_lookup(&file->bdf_tree, key); + spin_unlock(&file->bdf_lock); + if (bdf_file) { + xsc_core_dbg(bdf_file->xdev, "find the bdf file: %lx\n", bdf_file->key); + return bdf_file; + } + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(hdr->domain, hdr->bus, hdr->devfn); + if (!rl_xdev) + return NULL; + + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!bdf_file) + return NULL; + + bdf_file->key = key; + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + bdf_file->xdev = rl_xdev; + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->bdf_lock); + radix_tree_insert(&file->bdf_tree, key, bdf_file); + spin_unlock(&file->bdf_lock); + radix_tree_preload_end(); + xsc_core_dbg(rl_xdev, "bdf file not exist, create it and add to port ctrl file\n"); + + return bdf_file; +} + static long _port_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct xsc_port_ctrl_reg *p; - struct xsc_port_ctrl *ctrl; - struct xsc_core_device *xdev; + struct xsc_port_ctrl_file *file; struct xsc_ioctl_hdr __user *user_hdr; + struct xsc_bdf_file *bdf_file; + struct xsc_ioctl_hdr hdr; + int err; - ctrl = filp->private_data; - xdev = container_of(ctrl, struct xsc_core_device, port_ctrl); + file = filp->private_data; user_hdr = (struct xsc_ioctl_hdr __user *)arg; + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return err; + + bdf_file = get_bdf_file(file, &hdr); + if (!bdf_file) + return -EFAULT; list_for_each_entry(p, &g_port_ctrl_cbs, node) { - if (p->cb) - p->cb(xdev, cmd, user_hdr, p->data); + if (p->cb) { + err = p->cb(bdf_file, cmd, user_hdr, p->data); + if (err != TRY_NEXT_CB) + break; + } } - return 0; + return err; } static const struct file_operations g_port_ctrl_fops = { @@ -115,6 +265,7 @@ static const struct file_operations g_port_ctrl_fops = { .mmap = _port_ctrl_mmap, .unlocked_ioctl = _port_ctrl_ioctl, .compat_ioctl = _port_ctrl_ioctl, + .release = _port_ctrl_release, }; static struct xsc_port_ctrl_reg *_port_ctrl_cbs_get(const char *name) @@ -144,7 +295,7 @@ static int _port_ctrl_data_init(void) int major_devid; ret = alloc_chrdev_region(&g_port_ctrl_root_dev, 0, XSC_PORT_CTRL_MAX, - XSC_PORT_CTRL_NAME_PRE); + XSC_PORT_CTRL_NAME_PRE); if (ret < 0) { pr_err("%s cant't get major id\n", XSC_PORT_CTRL_NAME_PRE); return -1; @@ -156,7 +307,7 @@ static int _port_ctrl_data_init(void) g_port_ctrl_class = class_create(THIS_MODULE, XSC_PORT_CTRL_NAME_PRE); if (IS_ERR(g_port_ctrl_class)) { pr_err("failed to call create class witch name %s\n", - XSC_PORT_CTRL_NAME_PRE); + XSC_PORT_CTRL_NAME_PRE); unregister_chrdev_region(g_port_ctrl_root_dev, XSC_PORT_CTRL_MAX); return -1; } @@ -169,11 +320,20 @@ static int _port_ctrl_data_init(void) static void _port_ctrl_dev_del(struct xsc_core_device *dev) { struct xsc_port_ctrl *ctrl; + struct xsc_port_ctrl_file *file, *n; ctrl = &dev->port_ctrl; if (!ctrl) return; + spin_lock(&ctrl->file_lock); + list_for_each_entry_safe(file, n, &ctrl->file_list, file_node) { + xsc_release_port_ctrl_file(file); + list_del(&file->file_node); + kfree(file); + } + spin_unlock(&ctrl->file_lock); + device_destroy(g_port_ctrl_class, ctrl->devid); cdev_del(&ctrl->cdev); } @@ -191,6 +351,8 @@ static int _port_ctrl_dev_add(struct xsc_core_device *dev) ctrl = &dev->port_ctrl; ctrl->devid = g_port_ctrl_root_dev + g_port_ctrl_dev_cnt; ctrl->cdev.owner = THIS_MODULE; + INIT_LIST_HEAD(&ctrl->file_list); + spin_lock_init(&ctrl->file_lock); cdev_init(&ctrl->cdev, &g_port_ctrl_fops); ret = cdev_add(&ctrl->cdev, ctrl->devid, 1); if (ret != 0) { @@ -200,8 +362,9 @@ static int _port_ctrl_dev_add(struct xsc_core_device *dev) } ctrl->device = device_create(g_port_ctrl_class, NULL, ctrl->devid, NULL, - "%s!%s_%02x:%02x.%x", XSC_PORT_CTRL_NAME_PRE, XSC_PORT_CTRL_NAME, - dev->bus_id, dev->dev_id, dev->func_id); + "%s!%s_%02x:%02x.%x", XSC_PORT_CTRL_NAME_PRE, + XSC_PORT_CTRL_NAME, dev->bus_num, + dev->dev_num, dev->func_id); if (IS_ERR(ctrl->device)) { xsc_core_err(dev, "failed to create port control device\n"); cdev_del(&ctrl->cdev); @@ -264,24 +427,52 @@ int xsc_port_ctrl_init(void) return 0; } +static void xsc_release_bdf_file(struct xsc_core_device *dev) +{ + struct xsc_core_device *pf_dev; + int domain; + unsigned int bus; + unsigned int devfn; + struct xsc_port_ctrl_file *file, *n; + struct xsc_bdf_file *bdf_file; + unsigned long key; + + if (!dev->pdev->physfn) /*for vf passthrough vm*/ + return; + + pf_dev = pci_get_drvdata(dev->pdev->physfn); + domain = pci_domain_nr(dev->pdev->bus); + bus = dev->pdev->bus->number; + devfn = dev->pdev->devfn; + key = bdf_to_key(domain, bus, devfn); + xsc_core_dbg(dev, "%x %x %x removed\n", domain, bus, devfn); + + spin_lock(&pf_dev->port_ctrl.file_lock); + list_for_each_entry_safe(file, n, &pf_dev->port_ctrl.file_list, file_node) { + spin_lock(&file->bdf_lock); + bdf_file = radix_tree_delete(&file->bdf_tree, key); + spin_unlock(&file->bdf_lock); + if (!bdf_file) + continue; + xsc_close_bdf_file(bdf_file); + kfree(bdf_file); + } + spin_unlock(&pf_dev->port_ctrl.file_lock); +} + void xsc_port_ctrl_remove(struct xsc_core_device *dev) { -#ifndef RUN_WITH_PSV if (xsc_core_is_pf(dev)) -#else - if (1) -#endif _port_ctrl_dev_del(dev); + else + xsc_release_bdf_file(dev); } int xsc_port_ctrl_probe(struct xsc_core_device *dev) { int ret = 0; -#ifndef RUN_WITH_PSV + if (xsc_core_is_pf(dev)) { -#else - if (1) { -#endif ret = _port_ctrl_dev_add(dev); if (ret != 0) xsc_core_err(dev, "failed to add new port control device\n"); @@ -305,7 +496,7 @@ int xsc_port_ctrl_cb_reg(const char *name, port_ctrl_cb cb, void *data) return -1; } - reg_node = kmalloc(sizeof(struct xsc_port_ctrl_reg), GFP_KERNEL); + reg_node = kmalloc(sizeof(*reg_node), GFP_KERNEL); if (!reg_node) return -1;