diff --git a/drivers/infiniband/hw/xsc/Makefile b/drivers/infiniband/hw/xsc/Makefile index b4fa5748bbad46598a3538218e8d39bba030dece..01480d1443b0d13f684d98a0ed8a3eec547c450e 100644 --- a/drivers/infiniband/hw/xsc/Makefile +++ b/drivers/infiniband/hw/xsc/Makefile @@ -8,10 +8,15 @@ ifeq ($(USE_INTERNAL_IB_CORE), 1) ccflags-y += -include /usr/src/ofa_kernel/include/rdma/ib_umem.h endif +ifeq ($(HAVE_TO_USE_M_IB_CORE), 1) + ccflags-y += ${xsc-ccflags} + export KBUILD_EXTRA_SYMBOLS += $(PWD)/net/ethernet/yunsilicon/Module.symvers +endif + obj-$(CONFIG_INFINIBAND_XSC) += xsc_ib.o -xsc_ib-y := main.o xsc_rdma_ctrl.o cq.o qp.o mem.o mr.o ah.o \ - counters.o devx.o private_dev.o ib_umem_ex.o\ +xsc_ib-y := main.o xsc_rdma_ctrl.o xsc_rdma_prgrmmbl_cc_ctrl.o cq.o qp.o mem.o mr.o ah.o \ + counters.o devx.o ib_umem_ex.o\ rtt.o xsc_ib_sysfs.o xsc_ib-$(CONFIG_XSC_PEER_SUPPORT) += peer_mem.o diff --git a/drivers/infiniband/hw/xsc/ah.c b/drivers/infiniband/hw/xsc/ah.c index 39da2861897d7da4da31fe5693785504e101ee85..46c02f0fa7bf5a6743129fdc83d74fb0a59ea54e 100644 --- a/drivers/infiniband/hw/xsc/ah.c +++ b/drivers/infiniband/hw/xsc/ah.c @@ -7,6 +7,7 @@ #include #include #include + #include "xsc_ib.h" #include "user.h" diff --git a/drivers/infiniband/hw/xsc/counters.c b/drivers/infiniband/hw/xsc/counters.c index 060ca199afa4ce6ff818c5195b6eb2d001fa527c..4c3f2998ef3eaa1c80305a740040a1e87dd1976d 100644 --- a/drivers/infiniband/hw/xsc/counters.c +++ b/drivers/infiniband/hw/xsc/counters.c @@ -48,6 +48,13 @@ static const struct counter_desc hw_rdma_stats_pf_desc[] = { /*global*/ { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_pkts) }, { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_bytes) }, + + /*for diamond*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, out_of_sequence_sr) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, packet_seq_err_sr) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_ndp_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_ndp_rx_trimmed_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_ndp_trimmed_pkts_sr) }, }; static const struct counter_desc hw_rdma_stats_vf_desc[] = { @@ -129,6 +136,7 @@ static ssize_t counters_names_show(struct kobject *kobjs, ssize_t count = 0; const struct counter_desc *desc; struct xsc_counters_attribute *xsc_counters_name_attr; + u32 mask = 0; xsc_counters_name_attr = container_of(attr, struct xsc_counters_attribute, @@ -142,8 +150,12 @@ static ssize_t counters_names_show(struct kobject *kobjs, desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); } - for (i = 0; i < desc_size; ++i) + mask = xsc_get_rdma_stat_mask(xsc_counters_name_attr->dev); + for (i = 0 ; i < desc_size; i++) { + if (!((1 << i) & mask)) + continue; count += sprintf(&buf[count], "%s\n", desc[i].format); + } return count; } @@ -160,6 +172,7 @@ static ssize_t counters_show(struct kobject *kobjs, const struct counter_desc *desc; struct xsc_hw_stats_rdma stats_rdma; struct xsc_counters_attribute *xsc_counters_attr; + u32 mask = 0; xsc_counters_attr = container_of(attr, struct xsc_counters_attribute, @@ -179,7 +192,10 @@ static ssize_t counters_show(struct kobject *kobjs, stats = (u8 *)&stats_rdma.stats.vf_stats; } + mask = xsc_get_rdma_stat_mask(xsc_counters_attr->dev); for (i = 0 ; i < desc_size; i++) { + if (!((1 << i) & mask)) + continue; value = *(u64 *)(stats + desc[i].offset); value = be64_to_cpu(value); count += sprintf(&buf[count], "%-26s %-20llu\n", @@ -194,7 +210,8 @@ static ssize_t counters_value_read(struct file *file, struct bin_attribute *bin_attr, char *buf, loff_t loff, size_t size) { - int i; + int i = 0; + int j = 0; int ret; u8 *stats; int bin_size; @@ -204,6 +221,7 @@ static ssize_t counters_value_read(struct file *file, const struct counter_desc *desc; struct xsc_hw_stats_rdma stats_rdma; struct xsc_counters_bin_attribute *xsc_counters_bin_attr; + u32 mask = 0; xsc_counters_bin_attr = container_of(&bin_attr->attr, struct xsc_counters_bin_attribute, @@ -235,9 +253,14 @@ static ssize_t counters_value_read(struct file *file, if (!tmp_value) return 0; + mask = xsc_get_rdma_stat_mask(xdev); + j = 0; for (i = 0; i < desc_size; i++) { - tmp_value[i] = *(u64 *)(stats + desc[i].offset); - tmp_value[i] = be64_to_cpu(tmp_value[i]); + if (!((1 << i) & mask)) + continue; + tmp_value[j] = *(u64 *)(stats + desc[i].offset); + tmp_value[j] = be64_to_cpu(tmp_value[i]); + j++; } memcpy(buf, tmp_value, xsc_counters_bin_attr->size); diff --git a/drivers/infiniband/hw/xsc/counters.h b/drivers/infiniband/hw/xsc/counters.h index e6f605282fe97fb203daa0014c86b2f67c12865e..001a57b8372d0704c0e0cc976713afb5c4fc3f3f 100644 --- a/drivers/infiniband/hw/xsc/counters.h +++ b/drivers/infiniband/hw/xsc/counters.h @@ -49,4 +49,5 @@ struct xsc_global_cnt_attributes { ssize_t (*store)(struct xsc_global_cnt_interface *g, struct xsc_global_cnt_attributes *a, const char *buf, size_t count); }; -#endif /* __COUNTERS_H__ */ + +#endif diff --git a/drivers/infiniband/hw/xsc/cq.c b/drivers/infiniband/hw/xsc/cq.c index 360b1b2f7f40e58d27bed37ad0881c6b1e35f0cf..90899557d60cb40b126302792b93169d2071d2f0 100644 --- a/drivers/infiniband/hw/xsc/cq.c +++ b/drivers/infiniband/hw/xsc/cq.c @@ -35,23 +35,6 @@ enum { XSC_CQE_APP_OP_TM_MSG_COMPLETION_CANCELED = 0xC, }; -static const u32 xsc_msg_opcode[][2][2] = { - [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND, - [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND_IMMDT, - [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV, - [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV_IMMDT, - [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE, - [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE_IMMDT, - [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, - [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_WRITE_IMMDT, - [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_READ, - [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, - [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, - [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, - [XSC_MSG_OPCODE_MAD][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_REQ_SEND, - [XSC_MSG_OPCODE_MAD][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_RSP_RECV, -}; - static const u32 xsc_cqe_opcode[] = { [XSC_OPCODE_RDMA_REQ_SEND] = IB_WC_SEND, [XSC_OPCODE_RDMA_REQ_SEND_IMMDT] = IB_WC_SEND, @@ -71,15 +54,6 @@ int xsc_stall_cq_poll_max = 100000; int xsc_stall_cq_inc_step = 100; int xsc_stall_cq_dec_step = 10; -static inline u8 xsc_get_cqe_opcode(struct xsc_cqe *cqe) -{ - if (cqe->is_error) - return cqe->type ? XSC_OPCODE_RDMA_RSP_ERROR : XSC_OPCODE_RDMA_REQ_ERROR; - if (cqe->msg_opcode > XSC_MSG_OPCODE_MAD) - return XSC_OPCODE_RDMA_CQE_ERROR; - return xsc_msg_opcode[cqe->msg_opcode][cqe->type][cqe->with_immdt]; -} - static void xsc_ib_cq_comp(struct xsc_core_cq *cq) { struct ib_cq *ibcq = &to_xibcq(cq)->ibcq; @@ -150,6 +124,7 @@ static void handle_responder(struct ib_wc *wc, struct xsc_cqe *cqe, idx = wq->tail & (wq->wqe_cnt - 1); wc->wr_id = wq->wrid[idx]; + atomic_dec(&wq->flush_wqe_cnt); ++wq->tail; } @@ -287,8 +262,11 @@ static int xsc_poll_one(struct xsc_ib_cq *cq, memset(wc, 0, sizeof(*wc)); wc->qp = &(*cur_qp)->ibqp; - opcode = xsc_get_cqe_opcode(cqe); + opcode = xsc_get_cqe_opcode(dev->xdev, cqe); switch (opcode) { + case XSC_OPCODE_RDMA_REQ_SEND_IMMDT: + case XSC_OPCODE_RDMA_REQ_WRITE_IMMDT: + wc->wc_flags |= IB_WC_WITH_IMM; case XSC_OPCODE_RDMA_REQ_SEND: case XSC_OPCODE_RDMA_REQ_WRITE: case XSC_OPCODE_RDMA_REQ_READ: @@ -299,9 +277,16 @@ static int xsc_poll_one(struct xsc_ib_cq *cq, handle_good_req(wc, cqe, opcode); wc->wr_id = wq->wrid[idx]; wq->tail = wq->wqe_head[idx] + 1; + if (opcode != XSC_OPCODE_RDMA_MAD_REQ_SEND) + atomic_dec(&wq->flush_wqe_cnt); + wq->need_flush[idx] = 0; xsc_ib_dbg(dev, "wqeid:%u, wq tail:%u qpn:%u\n", idx, wq->tail, qpn); wc->status = IB_WC_SUCCESS; break; + case XSC_OPCODE_RDMA_RSP_RECV_IMMDT: + case XSC_OPCODE_RDMA_RSP_WRITE_IMMDT: + wc->wc_flags |= IB_WC_WITH_IMM; + WR_BE_32(wc->ex.imm_data, RD_LE_32(cqe->imm_data)); case XSC_OPCODE_RDMA_RSP_RECV: wq = &(*cur_qp)->rq; handle_responder(wc, cqe, *cur_qp, opcode); @@ -314,6 +299,30 @@ static int xsc_poll_one(struct xsc_ib_cq *cq, xsc_handle_rdma_mad_resp_recv(cq, cur_qp, wc, cqe, opcode); break; + case XSC_OPCODE_RDMA_REQ_ERROR: + wq = &(*cur_qp)->sq; + idx = cqe->wqe_id >> (wq->wqe_shift - XSC_BASE_WQE_SHIFT); + idx &= (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + if (wq->need_flush[idx]) + atomic_dec(&wq->flush_wqe_cnt); + wq->need_flush[idx] = 0; + xsc_ib_err(dev, "req error\n%08x %08x %08x %08x %08x %08x\n", + p[0], p[1], p[2], p[3], p[5], p[6]); + wc->status = IB_WC_GENERAL_ERR; + break; + case XSC_OPCODE_RDMA_RSP_ERROR: + wq = &(*cur_qp)->rq; + idx = wq->tail & (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; + wq->tail++; + atomic_dec(&wq->flush_wqe_cnt); + xsc_ib_err(dev, "rsp error\n%08x %08x %08x %08x %08x %08x\n", + p[0], p[1], p[2], p[3], p[5], p[6]); + wc->status = IB_WC_GENERAL_ERR; + break; + default: xsc_ib_err(dev, "completion error\n%08x %08x %08x %08x %08x %08x\n", p[0], p[1], p[2], p[3], p[5], p[6]); @@ -325,6 +334,102 @@ static int xsc_poll_one(struct xsc_ib_cq *cq, return 0; } +static inline void gen_flush_err_cqe(struct xsc_err_state_qp_node *err_node, + struct ib_qp *ibqp, struct xsc_ib_wq *wq, u32 idx, + struct ib_wc *wc) +{ + memset(wc, 0, sizeof(*wc)); + if (err_node->is_sq) { + switch (wq->wr_opcode[idx]) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + case IB_WR_SEND_WITH_INV: + wc->opcode = IB_WC_SEND; + break; + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + wc->opcode = IB_WC_RDMA_WRITE; + break; + case IB_WR_RDMA_READ: + wc->opcode = IB_WC_RDMA_READ; + } + } else { + wc->opcode = IB_WC_RECV; + } + + wc->qp = ibqp; + wc->status = IB_WC_WR_FLUSH_ERR; + wc->vendor_err = XSC_ERR_CODE_FLUSH; + wc->wr_id = wq->wrid[idx]; + wq->tail++; + atomic_dec(&wq->flush_wqe_cnt); + if (err_node->is_sq) + wq->need_flush[idx] = 0; +} + +static inline int xsc_generate_flush_err_cqe(struct ib_cq *ibcq, + int ne, int *npolled, struct ib_wc *wc) +{ + u32 qp_id = 0; + int flush_wqe_cnt = 0; + int sw_npolled = 0; + u32 idx = 0; + struct xsc_err_state_qp_node *err_qp_node; + struct xsc_core_qp *xqp; + struct xsc_ib_cq *cq = to_xcq(ibcq); + struct xsc_ib_wq *wq; + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + + list_for_each_entry(err_qp_node, &cq->err_state_qp_list, entry) { + if (!err_qp_node) + break; + + sw_npolled = 0; + qp_id = err_qp_node->qp_id; + xqp = __xsc_qp_lookup(dev->xdev, qp_id); + if (unlikely(!xqp)) { + xsc_ib_warn(dev, "CQE@CQ %d for unknown QPN %d\n", + cq->xcq.cqn, qp_id); + continue; + } + wq = err_qp_node->is_sq ? &(to_xibqp(xqp)->sq) : &(to_xibqp(xqp)->rq); + flush_wqe_cnt = atomic_read(&wq->flush_wqe_cnt); + xsc_ib_dbg(dev, "is_sq %d, flush_wq_cnt %d, ne %d, npolled %d, qp_id %d\n", + err_qp_node->is_sq, flush_wqe_cnt, ne, *npolled, qp_id); + + if (flush_wqe_cnt <= (ne - *npolled)) { + while (sw_npolled < flush_wqe_cnt) { + idx = wq->tail & (wq->wqe_cnt - 1); + if (err_qp_node->is_sq && !wq->need_flush[idx]) { + wq->tail++; + continue; + } else { + gen_flush_err_cqe(err_qp_node, &(to_xibqp(xqp)->ibqp), wq, + idx, wc + *npolled + sw_npolled); + ++sw_npolled; + } + } + *npolled += sw_npolled; + } else { + while (sw_npolled < (ne - *npolled)) { + idx = wq->tail & (wq->wqe_cnt - 1); + if (err_qp_node->is_sq && !wq->need_flush[idx]) { + wq->tail++; + continue; + } else { + gen_flush_err_cqe(err_qp_node, &(to_xibqp(xqp)->ibqp), wq, + idx, wc + *npolled + sw_npolled); + ++sw_npolled; + } + } + *npolled = ne; + break; + } + } + + return 0; +} + int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct xsc_ib_cq *cq = to_xcq(ibcq); @@ -344,18 +449,20 @@ int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) break; } - /* make sure cqe read out before update ci */ - rmb(); + if (err) { + if (npolled < num_entries && !(list_empty(&cq->err_state_qp_list))) + xsc_generate_flush_err_cqe(ibcq, num_entries, &npolled, wc); + } if (next_cid != xcq->cons_index) - xsc_cq_set_ci(xcq); + xsc_update_cq_ci(xcq->dev, xcq->cqn, xcq->cons_index); spin_unlock_irqrestore(&cq->lock, flags); return npolled; } -int xsc_cqe_is_empty(struct xsc_ib_cq *cq) +static int xsc_cqe_is_empty(struct xsc_ib_cq *cq) { struct xsc_cqe *cqe = get_sw_cqe(cq, cq->xcq.cons_index); @@ -367,29 +474,24 @@ int xsc_cqe_is_empty(struct xsc_ib_cq *cq) int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { - union xsc_cq_doorbell db; struct xsc_ib_cq *xcq = to_xcq(ibcq); struct xsc_core_cq *cq = &xcq->xcq; int ret = 0; unsigned long irq_flags; + u8 solicited = 0; spin_lock_irqsave(&xcq->lock, irq_flags); - db.val = 0; - db.cq_next_cid = cq->cons_index; - db.cq_id = cq->cqn; if (flags & IB_CQ_NEXT_COMP) - db.arm = 0; + solicited = 0; else if (flags & IB_CQ_SOLICITED) - db.arm = 1;/* arm next:0 arm solicited:1 */ + solicited = 1;/* arm next:0 arm solicited:1 */ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && (!xsc_cqe_is_empty(xcq))) { ret = 1; goto out; } - /* make sure val write to memory done */ - wmb(); - writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); + xsc_arm_cq(cq->dev, cq->cqn, cq->cons_index, solicited); out: spin_unlock_irqrestore(&xcq->lock, irq_flags); return ret; @@ -417,7 +519,7 @@ static void free_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf) static int create_cq_user(struct xsc_ib_dev *dev, struct ib_udata *udata, struct ib_ucontext *context, struct xsc_ib_cq *cq, - int entries, struct xsc_create_cq_mbox_in **cqb, + int entries, struct xsc_create_cq_ex_mbox_in **cqb, int *cqe_size, int *index, int *inlen) { struct xsc_ib_create_cq ucmd; @@ -462,7 +564,7 @@ static int create_cq_user(struct xsc_ib_dev *dev, struct ib_udata *udata, goto err_umem; } xsc_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, hw_npages, true); - (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + (*cqb)->ctx_ex.ctx.pa_num = cpu_to_be16(hw_npages); return 0; @@ -478,7 +580,7 @@ static void destroy_cq_user(struct xsc_ib_cq *cq, struct ib_udata *udata) static int create_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq, int entries, int cqe_size, - struct xsc_create_cq_mbox_in **cqb, + struct xsc_create_cq_ex_mbox_in **cqb, int *index, int *inlen) { int err; @@ -505,7 +607,7 @@ static int create_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq, goto err_buf; } xsc_fill_page_array(&cq->buf.buf, (*cqb)->pas, hw_npages); - (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + (*cqb)->ctx_ex.ctx.pa_num = cpu_to_be16(hw_npages); return 0; @@ -519,16 +621,18 @@ static void destroy_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq) free_cq_buf(dev, &cq->buf); } -xsc_ib_create_cq_def() +int xsc_ib_create_cq(struct ib_cq *ibcq, + const struct ib_cq_init_attr *attr, + struct ib_udata *udata) { struct ib_device *ibdev = ibcq->device; int entries = attr->cqe; int vector = attr->comp_vector; - struct xsc_create_cq_mbox_in *cqb = NULL; + struct xsc_create_cq_ex_mbox_in *cqb = NULL; struct xsc_ib_dev *dev = to_mdev(ibdev); struct xsc_ib_cq *cq; int index; - int inlen; + int inlen = 0; int cqe_size; int irqn; int err; @@ -561,15 +665,16 @@ xsc_ib_create_cq_def() } cq->cqe_size = cqe_size; - cqb->ctx.log_cq_sz = ilog2(entries); - cqb->ctx.glb_func_id = cpu_to_be16(dev->xdev->glb_func_id); + cqb->ctx_ex.ctx.log_cq_sz = ilog2(entries); + cqb->ctx_ex.ctx.glb_func_id = cpu_to_be16(dev->xdev->glb_func_id); err = xsc_vector2eqn(dev->xdev, vector, &eqn, &irqn); if (err) goto err_cqb; - cqb->ctx.eqn = eqn; - cqb->ctx.eqn = cpu_to_be16(cqb->ctx.eqn); + cqb->ctx_ex.ctx.eqn = eqn; + cqb->ctx_ex.ctx.eqn = cpu_to_be16(cqb->ctx_ex.ctx.eqn); + cqb->ctx_ex.page_shift = PAGE_SHIFT; err = xsc_core_create_cq(dev->xdev, &cq->xcq, cqb, inlen); if (err) @@ -588,6 +693,7 @@ xsc_ib_create_cq_def() } } + INIT_LIST_HEAD(&cq->err_state_qp_list); xsc_vfree(cqb); return 0; @@ -604,13 +710,19 @@ xsc_ib_create_cq_def() err_create: - return RET_VALUE(err); + return err; } xsc_ib_destroy_cq_def() { struct xsc_ib_dev *dev = to_mdev(cq->device); struct xsc_ib_cq *xcq = to_xcq(cq); + struct xsc_err_state_qp_node *tmp = NULL, *err_qp_node = NULL; + + list_for_each_entry_safe(err_qp_node, tmp, &xcq->err_state_qp_list, entry) { + list_del(&err_qp_node->entry); + kfree(err_qp_node); + } xsc_core_destroy_cq(dev->xdev, &xcq->xcq); if (udata) @@ -666,11 +778,7 @@ void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 rsn) if (nfreed) { cq->xcq.cons_index += nfreed; - /* Make sure update of buffer contents is done before - * updating consumer index. - */ - wmb(); - xsc_cq_set_ci(&cq->xcq); + xsc_update_cq_ci(cq->xcq.dev, cq->xcq.cqn, cq->xcq.cons_index); } } diff --git a/drivers/infiniband/hw/xsc/devx.c b/drivers/infiniband/hw/xsc/devx.c index fca43076bae1838296062a04a56b83072718ec0d..a6dc551db1cd1ea32025c25752f15d9f688e8739 100644 --- a/drivers/infiniband/hw/xsc/devx.c +++ b/drivers/infiniband/hw/xsc/devx.c @@ -3,6 +3,7 @@ * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ + #include #include #include @@ -13,7 +14,6 @@ #define UVERBS_MODULE_NAME xsc_ib #include #include "user.h" - static struct xsc_ib_ucontext *devx_uattrs2uctx(struct uverbs_attr_bundle *attrs) { return to_xucontext(ib_uverbs_get_ucontext(attrs)); @@ -35,7 +35,7 @@ static bool devx_is_general_cmd(void *in) static int UVERBS_HANDLER(XSC_IB_METHOD_DEVX_OTHER)(struct uverbs_attr_bundle *attrs) { - struct xsc_ib_ucontext *c; + struct xsc_ib_ucontext *c = NULL; struct xsc_ib_dev *dev; void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN); int cmd_out_len = uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT); diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.c b/drivers/infiniband/hw/xsc/ib_umem_ex.c index ce09d76ba58b4314a672be97ca6796292785327c..5a1d0160b657c84a9aec26433e9989752f9ba634 100644 --- a/drivers/infiniband/hw/xsc/ib_umem_ex.c +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.c @@ -3,8 +3,10 @@ * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. * All rights reserved. */ + #include #include + #include "ib_umem_ex.h" #ifndef CONFIG_INFINIBAND_PEER_MEMORY #include "ib_peer_mem.h" @@ -17,16 +19,17 @@ struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem) if (!umem) return ERR_PTR(-EINVAL); -#ifndef CONFIG_INFINIBAND_PEER_MEMORY +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + ret_umem = (struct ib_umem_ex *)umem; +#else ret_umem = kzalloc(sizeof(*ret_umem), GFP_KERNEL); if (!ret_umem) return ERR_PTR(-ENOMEM); ret_umem->umem = *umem; kfree(umem); -#else - ret_umem = (struct ib_umem_ex *)umem; #endif + return ret_umem; } @@ -41,7 +44,6 @@ struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, void ib_umem_ex_release(struct ib_umem_ex *umem_ex) { struct ib_umem *umem = (struct ib_umem *)umem_ex; - ib_umem_release(umem); } diff --git a/drivers/infiniband/hw/xsc/main.c b/drivers/infiniband/hw/xsc/main.c index 38374eda1ec5dc4306129b7c590571c9a82c2936..7d93e2cdc6b59230d2a614741104e22c60423f05 100644 --- a/drivers/infiniband/hw/xsc/main.c +++ b/drivers/infiniband/hw/xsc/main.c @@ -8,6 +8,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -27,19 +30,14 @@ #include "user.h" #include "xsc_ib.h" #include "xsc_rdma_ctrl.h" +#include "xsc_rdma_prgrmmbl_cc_ctrl.h" #define DRIVER_NAME "xsc_ib" -#define DRIVER_VERSION "1.0" -#define DRIVER_RELDATE "Jan 2022" -MODULE_DESCRIPTION("Yunsilicon Amber HCA IB driver"); +MODULE_DESCRIPTION("Yunsilicon HCA IB driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION); -static char xsc_version[] = - DRIVER_NAME ": Yunsilicon Infiniband driver" - DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; - static int xsc_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *udata) @@ -93,9 +91,9 @@ static int xsc_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->page_size_cap = dev->xdev->caps.min_page_sz; - props->max_mr_size = (1 << dev->xdev->caps.log_max_mtt) * PAGE_SIZE; - props->max_qp = 1 << dev->xdev->caps.log_max_qp; - props->max_qp_wr = (32 * 1024); /* hack for GPFS */ + props->max_mr_size = dev->xdev->caps.max_mtt * PAGE_SIZE; + props->max_qp = dev->xdev->caps.max_qp; + props->max_qp_wr = xsc_get_max_qp_depth(dev->xdev); max_rq_sg = dev->xdev->caps.max_rq_desc_sz / sizeof(struct xsc_wqe_data_seg); max_sq_sg = (dev->xdev->caps.max_sq_desc_sz - sizeof(struct xsc_wqe_ctrl_seg_2)) / sizeof(struct xsc_wqe_data_seg_2); @@ -104,10 +102,10 @@ static int xsc_ib_query_device(struct ib_device *ibdev, XSC_RADDR_SEG_NUM; props->max_recv_sge = dev->xdev->caps.recv_ds_num; props->max_sge_rd = 1;/*max sge per read wqe*/ - props->max_cq = 1 << dev->xdev->caps.log_max_cq; - props->max_cqe = dev->xdev->caps.max_cqes - 1; + props->max_cq = dev->xdev->caps.max_cq; + props->max_cqe = dev->xdev->caps.max_cqes; props->max_mr = 1 << dev->xdev->caps.log_max_mkey; - props->max_pd = 1 << dev->xdev->caps.log_max_pd; + props->max_pd = dev->xdev->caps.max_pd; props->max_qp_rd_atom = dev->xdev->caps.max_ra_req_qp; props->max_qp_init_rd_atom = dev->xdev->caps.max_ra_res_qp; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; @@ -132,7 +130,7 @@ static int xsc_ib_query_device(struct ib_device *ibdev, props->hw_ver = ((dev->xdev->chip_ver_l & 0xffff) << 16) | (dev->xdev->hotfix_num & 0xffff); props->max_pkeys = 0x80; - props->max_wq_type_rq = 1 << dev->xdev->caps.log_max_qp; + props->max_wq_type_rq = dev->xdev->caps.max_qp; props->hca_core_clock = dev->xdev->caps.hca_core_clock * 1000;//KHz props->rss_caps.max_rwq_indirection_tables = @@ -190,8 +188,8 @@ static int xsc_ib_query_device(struct ib_device *ibdev, return 0; } -void xsc_calc_link_info(struct xsc_core_device *xdev, - struct ib_port_attr *props) +static void xsc_calc_link_info(struct xsc_core_device *xdev, + struct ib_port_attr *props) { switch (xsc_get_link_speed(xdev)) { case MODULE_SPEED_10G: @@ -234,6 +232,10 @@ void xsc_calc_link_info(struct xsc_core_device *xdev, props->active_speed = XSC_RDMA_LINK_SPEED_50GB; props->active_width = 4; break; + case MODULE_SPEED_400G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_100GB; + props->active_width = 2; + break; default: props->active_speed = XSC_RDMA_LINK_SPEED_25GB; props->active_width = 1; @@ -313,6 +315,11 @@ static int xsc_ib_del_gid(const struct ib_gid_attr *attr, void **context) struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + if (attr->port_num > XSC_MAX_PORTS || + (!rdma_cap_roce_gid_table(attr->device, attr->port_num)) || + attr->index >= sgid_tbl->max) + return -EINVAL; + if (!sgid_tbl) return -EINVAL; @@ -334,7 +341,7 @@ static int xsc_ib_del_gid(const struct ib_gid_attr *attr, void **context) return 0; } -int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) +static int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) { int i = 0; u32 free_idx = 0; @@ -348,6 +355,10 @@ int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) if (sgid_tbl->count == sgid_tbl->max) return -ENOMEM; + if (attr->port_num > XSC_MAX_PORTS || + !rdma_cap_roce_gid_table(attr->device, attr->port_num) || + !context) + return -EINVAL; free_idx = sgid_tbl->max; for (i = 0; i < sgid_tbl->max; i++) { if (!memcmp(&sgid_tbl->tbl[i], gid_raw, sizeof(*gid_raw))) { @@ -453,19 +464,18 @@ xsc_ib_alloc_ucontext_def() if (err) return RET_VALUE(err); - resp.qp_tab_size = 1 << dev->xdev->caps.log_max_qp; + resp.qp_tab_size = dev->xdev->caps.max_qp; resp.cache_line_size = L1_CACHE_BYTES; resp.max_sq_desc_sz = dev->xdev->caps.max_sq_desc_sz; resp.max_rq_desc_sz = dev->xdev->caps.max_rq_desc_sz; resp.max_send_wqebb = dev->xdev->caps.max_wqes; resp.max_recv_wr = dev->xdev->caps.max_wqes; - resp.qpm_tx_db = dev->xdev->regs.tx_db; - resp.qpm_rx_db = dev->xdev->regs.rx_db; - resp.cqm_next_cid_reg = dev->xdev->regs.complete_reg; - resp.cqm_armdb = dev->xdev->regs.complete_db; + xsc_get_db_addr(dev->xdev, &resp.qpm_tx_db, &resp.qpm_rx_db, + &resp.cqm_armdb, &resp.cqm_next_cid_reg, NULL); resp.send_ds_num = dev->xdev->caps.send_ds_num; resp.recv_ds_num = dev->xdev->caps.recv_ds_num; resp.cmds_supp_uhw |= XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE; + resp.device_id = dev->xdev->pdev->device; context = to_xucontext(uctx); @@ -485,6 +495,7 @@ xsc_ib_alloc_ucontext_def() xsc_ib_dealloc_ucontext_def() { + return; } static int xsc_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) @@ -494,27 +505,29 @@ static int xsc_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; resource_size_t reg_base; resource_size_t reg_size = vma->vm_end - vma->vm_start; + u64 tx_db = 0; + u64 rx_db = 0; + u64 cq_db = 0; + u64 cq_reg = 0; + xsc_get_db_addr(xdev, &tx_db, &rx_db, &cq_db, &cq_reg, NULL); xsc_core_dbg(xdev, "offset:0x%lx", offset); - if (offset == (xdev->regs.tx_db & PAGE_MASK)) - reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + - (xdev->regs.tx_db & PAGE_MASK); - else if (offset == (xdev->regs.rx_db & PAGE_MASK)) - reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + - (xdev->regs.rx_db & PAGE_MASK); - else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) - reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + - (xdev->regs.complete_reg & PAGE_MASK); - else if (offset == (xdev->regs.complete_db & PAGE_MASK)) - reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + - (xdev->regs.complete_db & PAGE_MASK); + if (offset == (tx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + (tx_db & PAGE_MASK); + else if (offset == (rx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + (rx_db & PAGE_MASK); + else if (offset == (cq_reg & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + (cq_reg & PAGE_MASK); + else if (offset == (cq_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + (cq_db & PAGE_MASK); else return -EINVAL; xsc_core_dbg(xdev, "regbase:0x%llx", reg_base); - reg_base = xsc_core_is_pf(xdev) ? reg_base - 0xA0000000 : reg_base; + reg_base = (xsc_core_is_pf(xdev) && !is_pf_bar_compressed(xdev)) ? + reg_base - 0xA0000000 : reg_base; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, reg_base >> PAGE_SHIFT, @@ -534,7 +547,6 @@ xsc_ib_alloc_pd_def() err = xsc_core_alloc_pd(to_mdev(ibdev)->xdev, &pd->pdn); if (err) { - kfree(pd); return RET_VALUE(err); } @@ -616,7 +628,7 @@ static struct net_device *xsc_get_netdev(struct ib_device *ibdev, u8 port_num) return dev; } -void xsc_get_guid(const u8 *dev_addr, u8 *guid) +static void xsc_get_guid(const u8 *dev_addr, u8 *guid) { u8 mac[ETH_ALEN]; @@ -645,8 +657,8 @@ static int init_node_data(struct xsc_ib_dev *dev) return err; } -void xsc_core_event(struct xsc_core_device *xdev, enum xsc_dev_event event, - unsigned long param) +static void xsc_core_event(struct xsc_core_device *xdev, enum xsc_dev_event event, + unsigned long param) { struct xsc_priv *priv = &xdev->priv; struct xsc_device_context *dev_ctx; @@ -788,7 +800,6 @@ static int populate_specs_root(struct xsc_ib_dev *dev) const struct uverbs_object_tree_def **trees = (const struct uverbs_object_tree_def **)dev->driver_trees; size_t num_trees = 0; - trees[num_trees++] = xsc_ib_get_devx_tree(); WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); @@ -929,14 +940,66 @@ static int xsc_unregister_netdev_notifier(struct xsc_ib_dev *ibdev) return unregister_netdevice_notifier(&ibdev->nb); } +static void xsc_get_ibdev_name(void *xdev, u8 *name, int len) +{ + struct xsc_ib_dev *dev = (struct xsc_ib_dev *)((struct xsc_core_device *)xdev)->xsc_ib_dev; + + memcpy(name, dev->ib_dev.name, len); +} + +static void xsc_get_mdev_ibdev_name(struct net_device *netdev, char *name, int len) +{ + struct ib_device *ibdev; + struct device *dev; + const char *path = "/sys/class/infiniband/"; + struct path parent_path; + struct path child_path; + struct kobject *kobj; + struct dentry *parent; + struct dentry *child; + struct inode *inode; + struct kernfs_node *kn; + char child_name[128]; + + if (kern_path(path, LOOKUP_FOLLOW, &parent_path)) + return; + + parent = parent_path.dentry; + inode_lock(parent->d_inode); + list_for_each_entry(child, &parent->d_subdirs, d_child) { + sprintf(child_name, "/sys/class/infiniband/%s", child->d_iname); + if (kern_path(child_name, LOOKUP_FOLLOW, &child_path)) + continue; + inode = child_path.dentry->d_inode; + inode_lock(inode); + kn = inode->i_private; + if (!kn) + goto next; + kobj = kn->priv; + if (!kobj) + goto next; + dev = container_of(kobj, struct device, kobj); + ibdev = container_of(dev, struct ib_device, dev); + if (ibdev->dev.parent == netdev->dev.parent) { + memcpy(name, ibdev->name, len); + inode_unlock(inode); + path_put(&child_path); + break; + } +next: + inode_unlock(inode); + path_put(&child_path); + } + inode_unlock(parent->d_inode); + path_put(&parent_path); +} + static int init_one(struct xsc_core_device *xdev, struct xsc_ib_dev **m_ibdev) { struct xsc_ib_dev *dev; int err; - pr_info_once("%s", xsc_version); - dev = (struct xsc_ib_dev *)ib_alloc_device(xsc_ib_dev, ib_dev); if (!dev) return -ENOMEM; @@ -953,9 +1016,9 @@ static int init_one(struct xsc_core_device *xdev, dev->num_comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; if (xsc_lag_is_roce(xdev)) - strlcpy(dev->ib_dev.name, "xscale_bond_%d", IB_DEVICE_NAME_MAX); + strscpy(dev->ib_dev.name, "xscale_bond_%d", IB_DEVICE_NAME_MAX); else - strlcpy(dev->ib_dev.name, "xscale_%d", IB_DEVICE_NAME_MAX); + strscpy(dev->ib_dev.name, "xscale_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.local_dma_lkey = 0xFF; @@ -1027,12 +1090,14 @@ static int init_one(struct xsc_core_device *xdev, xdev->xsc_ib_dev = dev; + xdev->get_ibdev_name = xsc_get_ibdev_name; + xdev->get_rdma_ctrl_info = xsc_get_rdma_ctrl_info; + xsc_register_get_mdev_ibdev_name_func(xsc_get_mdev_ibdev_name); + xsc_register_netdev_notifier(dev); xsc_counters_init(&dev->ib_dev, xdev); - xsc_priv_dev_init(&dev->ib_dev, xdev); - xsc_rtt_sysfs_init(&dev->ib_dev, xdev); xsc_ib_sysfs_init(&dev->ib_dev, xdev); @@ -1054,7 +1119,6 @@ static void remove_one(struct xsc_core_device *xdev, void *intf_ctx) xsc_rtt_sysfs_fini(xdev); xsc_ib_sysfs_fini(&dev->ib_dev, xdev); - xsc_priv_dev_fini(&dev->ib_dev, xdev); xsc_counters_fini(&dev->ib_dev, xdev); xsc_unregister_netdev_notifier(dev); ib_unregister_device(&dev->ib_dev); @@ -1137,7 +1201,7 @@ static struct xsc_interface xsc_interface = { .protocol = XSC_INTERFACE_PROTOCOL_IB, }; -int xsc_ib_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +static int xsc_ib_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) { pr_info("xsc ib driver recv %lu event\n", action); @@ -1160,29 +1224,29 @@ struct notifier_block xsc_ib_nb = { void xsc_remove_rdma_driver(void) { xsc_rdma_ctrl_fini(); + xsc_rdma_prgrmmbl_cc_ctrl_fini(); xsc_unregister_interface(&xsc_interface); - xsc_priv_unregister_chrdev_region(); } static int __init xsc_ib_init(void) { int ret; - ret = xsc_priv_alloc_chrdev_region(); + ret = xsc_register_interface(&xsc_interface); if (ret) goto out; - ret = xsc_register_interface(&xsc_interface); - if (ret) { - xsc_priv_unregister_chrdev_region(); + ret = xsc_rdma_ctrl_init(); + if (ret != 0) { + pr_err("failed to register port control node\n"); + xsc_unregister_interface(&xsc_interface); goto out; } - ret = xsc_rdma_ctrl_init(); + ret = xsc_rdma_prgrmmbl_cc_ctrl_init(); if (ret != 0) { - pr_err("failed to register port control node\n"); + pr_err("failed to register programmable cc control node\n"); xsc_unregister_interface(&xsc_interface); - xsc_priv_unregister_chrdev_region(); goto out; } diff --git a/drivers/infiniband/hw/xsc/mem.c b/drivers/infiniband/hw/xsc/mem.c index ca70c21016212c9cc7313d70bc24106b29bc8c01..6cf1441ad79ac01714ba87fc9d9634143d701f34 100644 --- a/drivers/infiniband/hw/xsc/mem.c +++ b/drivers/infiniband/hw/xsc/mem.c @@ -18,9 +18,9 @@ static inline int xsc_count_trailing_zeros(unsigned long x) return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; } -int xsc_find_chunk_cont_0(struct xsc_pa_chunk *chunk, - int is_first, - int is_last) +static int xsc_find_chunk_cont_0(struct xsc_pa_chunk *chunk, + int is_first, + int is_last) { static const int max_count = sizeof(int) << 3; dma_addr_t pa, end_pa; @@ -190,10 +190,10 @@ int xsc_find_best_pgsz(struct ib_umem *umem, * @ncont: number of compund pages * @order: log2 of the number of compound pages */ -void __xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, - unsigned long max_page_shift, - int *count, int *shift, - int *ncont, int *order) +static void __xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, + unsigned long max_page_shift, + int *count, int *shift, + int *ncont, int *order) { unsigned long tmp; unsigned long m; @@ -256,9 +256,9 @@ void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, __xsc_ib_cont_pages(umem, addr, 0, count, shift, ncont, order); } -void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, - int page_shift, size_t offset, size_t num_pages, - __be64 *pas, int access_flags, bool need_to_devide) +static void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, size_t offset, size_t num_pages, + __be64 *pas, int access_flags, bool need_to_devide) { unsigned long umem_page_shift = PAGE_SHIFT; int shift = page_shift - umem_page_shift; diff --git a/drivers/infiniband/hw/xsc/mr.c b/drivers/infiniband/hw/xsc/mr.c index 34aeaaf20e85304fb990cab3c9effad8230d4699..b1305d9455939d2c6d3bfc3b5a00403e226cf56d 100644 --- a/drivers/infiniband/hw/xsc/mr.c +++ b/drivers/infiniband/hw/xsc/mr.c @@ -11,6 +11,7 @@ #include #include "common/xsc_cmd.h" #include + #include "ib_umem_ex.h" #include "xsc_ib.h" @@ -72,7 +73,7 @@ struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc) return ERR_PTR(err); } -void xsc_fill_pas(int npages, u64 *pas, __be64 *req_pas) +static void xsc_fill_pas(int npages, u64 *pas, __be64 *req_pas) { int i; @@ -83,7 +84,7 @@ void xsc_fill_pas(int npages, u64 *pas, __be64 *req_pas) static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, u64 length, struct ib_umem *umem, int npages, u64 *pas, int page_shift, - int access_flags) + int access_flags, int using_peer_mem) { struct xsc_ib_dev *dev = to_mdev(pd->device); struct xsc_register_mr_mbox_in *in; @@ -116,10 +117,14 @@ static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, in->req.pdn = cpu_to_be32(to_mpd(pd)->pdn); in->req.va_base = cpu_to_be64(virt_addr); in->req.map_en = XSC_MPT_MAP_EN; - in->req.len = cpu_to_be32((u32)length); - in->req.page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : - (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : - (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); + in->req.len = cpu_to_be64(length); + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); + xsc_ib_info(dev, "read_flush hwconfig %s\n", + dev->xdev->read_flush ? "enable" : "disable"); + if (dev->xdev->read_flush) + in->req.is_gpu = using_peer_mem; + else + in->req.is_gpu = 0; in->req.mkey = cpu_to_be32(mr->mmr.key); err = xsc_core_register_mr(dev->xdev, &mr->mmr, in, inlen); if (err) { @@ -157,9 +162,15 @@ struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int npages; u64 *pas; int err; + int using_peer_mem = 0; struct ib_peer_memory_client *ib_peer_mem = NULL; struct xsc_ib_peer_id *xsc_ib_peer_id = NULL; + if (length > dev->xdev->caps.max_mr_size) { + xsc_ib_err(dev, "reg user mr length(%llu) exceeded.\n", length); + return ERR_PTR(-EINVAL); + } + xsc_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", start, virt_addr, length); @@ -171,10 +182,8 @@ struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, #endif if (IS_ERR(umem)) { #ifdef CONFIG_INFINIBAND_PEER_MEMORY - xsc_ib_warn(dev, "umem get failed\n"); return (void *)umem; #else - // check client peer memory u8 peer_exists = 0; umem_ex = ib_client_umem_get(pd->uobject->context, @@ -195,18 +204,22 @@ struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, xsc_ib_peer_id); if (err) goto error; + using_peer_mem = 1; #endif - } else { umem_ex = ib_umem_ex(umem); if (IS_ERR(umem_ex)) { err = -ENOMEM; goto error; } +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + if (umem->is_peer) + using_peer_mem = 1; +#endif } umem = &umem_ex->umem; - err = xsc_find_best_pgsz(umem, 0x40211000, start, &npages, &page_shift, &pas); + err = xsc_find_best_pgsz(umem, XSC_MR_PAGE_CAP_MASK, start, &npages, &page_shift, &pas); if (err) { vfree(pas); pas = NULL; @@ -221,7 +234,8 @@ struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, xsc_ib_dbg(dev, "npages %d, page_shift %d\n", npages, page_shift); - mr = reg_create(pd, virt_addr, length, umem, npages, pas, page_shift, access_flags); + mr = reg_create(pd, virt_addr, length, umem, npages, pas, + page_shift, access_flags, using_peer_mem); if (IS_ERR(mr)) { err = PTR_ERR(mr); goto error; @@ -377,17 +391,6 @@ static int xsc_set_page(struct ib_mr *ibmr, u64 pa) return 0; } -u8 xsc_get_mr_page_mode(struct xsc_core_device *xdev, u32 page_shift) -{ - u8 page_mode = 0; - - page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : - (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : - (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); - - return page_mode; -} - int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { @@ -397,6 +400,10 @@ int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, xsc_set_page); } +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, align_to) ((x) & ~((align_to) - 1)) +#endif + int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) { const struct ib_reg_wr *reg_wr = container_of(wr, struct ib_reg_wr, wr); @@ -408,6 +415,11 @@ int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) int err; __be64 *pas; + if (ibmr->length > dev->xdev->caps.max_mr_size) { + xsc_ib_err(dev, "wr reg mr length exceeded.\n"); + return -EINVAL; + } + inlen = sizeof(*in) + sizeof(__be64) * mmr->npages; in = kzalloc(inlen, GFP_ATOMIC); if (!in) @@ -416,14 +428,14 @@ int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) in->req.pdn = cpu_to_be32(mmr->mmr.pd); in->req.mkey = cpu_to_be32(ibmr->rkey); in->req.acc = convert_access(reg_wr->access); - in->req.page_mode = 0; + in->req.is_gpu = 0; in->req.map_en = XSC_MPT_MAP_EN; if (xsc_ib_iommu_dma_map(ibmr->device)) { static u32 support_page_shift[] = {12, 16, 21, 30}; u64 va_base; u64 pa_base; - int len; + u64 len; int i; u32 page_shift; @@ -436,7 +448,7 @@ int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) pa_base = ALIGN_DOWN(mmr->pas[0], (1 << page_shift)); in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); in->req.pa_num = cpu_to_be32(1); - in->req.len = cpu_to_be32(len); + in->req.len = cpu_to_be64(len); in->req.va_base = cpu_to_be64(va_base); in->req.pas[0] = cpu_to_be64(pa_base); goto out; @@ -449,19 +461,19 @@ int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, PAGE_SHIFT_4K); in->req.va_base = cpu_to_be64(ibmr->iova); in->req.pa_num = cpu_to_be32(mmr->npages); - in->req.len = cpu_to_be32(ibmr->length); + in->req.len = cpu_to_be64(ibmr->length); pas = in->req.pas; for (i = 0; i < mmr->npages; i++) pas[i] = cpu_to_be64(mmr->pas[i]); out: - xsc_ib_dbg(dev, "iova=%llx, pas=%llx, req.page_mode=%u, req.va_base=%llx, req.pas=%llx, req.len=%d, req.pa_num=%d\n", + xsc_ib_dbg(dev, "iova=%llx, pas=%llx, req.page_mode=%u, req.va_base=%llx, req.pas=%llx, req.len=%lld, req.pa_num=%d\n", ibmr->iova, mmr->pas[0], in->req.page_mode, be64_to_cpu(in->req.va_base), be64_to_cpu(in->req.pas[0]), - be32_to_cpu(in->req.len), + be64_to_cpu(in->req.len), be32_to_cpu(in->req.pa_num)); err = xsc_core_register_mr(dev->xdev, &mmr->mmr, in, sizeof(*in)); @@ -492,7 +504,8 @@ void xsc_reg_local_dma_mr(struct xsc_core_device *dev) in.req.len = 0; in.req.mkey = cpu_to_be32(0xFF); in.req.acc = XSC_PERM_LOCAL_WRITE | XSC_PERM_LOCAL_READ; - in.req.page_mode = 0; + in.req.page_mode = xsc_get_mr_page_mode(dev, PAGE_SHIFT_4K); + in.req.is_gpu = 0; in.req.map_en = !(XSC_MPT_MAP_EN); in.req.va_base = 0; diff --git a/drivers/infiniband/hw/xsc/private_dev.c b/drivers/infiniband/hw/xsc/private_dev.c deleted file mode 100644 index 9bd08dcb75bd90a72179fd3038be096a1f1994b0..0000000000000000000000000000000000000000 --- a/drivers/infiniband/hw/xsc/private_dev.c +++ /dev/null @@ -1,1031 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#include -#include -#include -#include -#include -#include "common/xsc_core.h" -#include "common/xsc_ioctl.h" -#include "common/xsc_hsi.h" -#include "common/xsc_lag.h" -#include "common/res_obj.h" -#include "xsc_ib.h" - -#define FEATURE_ONCHIP_FT_MASK BIT(4) -#define FEATURE_DMA_RW_TBL_MASK BIT(8) -#define FEATURE_PCT_EXP_MASK BIT(9) - -static int xsc_priv_dev_open(struct inode *inode, struct file *file) -{ - struct xsc_priv_device *priv_dev = - container_of(inode->i_cdev, struct xsc_priv_device, cdev); - struct xsc_core_device *xdev = - container_of(priv_dev, struct xsc_core_device, priv_device); - struct xsc_bdf_file *bdf_file; - - bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); - if (!file) - return -ENOMEM; - - INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); - spin_lock_init(&bdf_file->obj_lock); - - bdf_file->xdev = xdev; - bdf_file->key = bdf_to_key(pci_domain_nr(xdev->pdev->bus), - xdev->pdev->bus->number, xdev->pdev->devfn); - bdf_file->restore_nic_fn = NULL; - - radix_tree_preload(GFP_KERNEL); - spin_lock(&priv_dev->bdf_lock); - radix_tree_insert(&priv_dev->bdf_tree, bdf_file->key, bdf_file); - spin_unlock(&priv_dev->bdf_lock); - radix_tree_preload_end(); - file->private_data = bdf_file; - - return 0; -} - -static int xsc_priv_dev_release(struct inode *inode, struct file *filp) -{ - struct xsc_bdf_file *bdf_file = filp->private_data; - struct xsc_core_device *xdev = bdf_file->xdev; - - xsc_close_bdf_file(bdf_file); - - if (bdf_file->restore_nic_fn) { - xsc_set_user_mode(xdev, false); - bdf_file->restore_nic_fn(xdev); - } - - spin_lock(&xdev->priv_device.bdf_lock); - radix_tree_delete(&xdev->priv_device.bdf_tree, bdf_file->key); - spin_unlock(&xdev->priv_device.bdf_lock); - - kfree(bdf_file); - - return 0; -} - -static long xsc_ioctl_mem_free(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) -{ - struct xsc_ioctl_mem_info *minfo; - struct xsc_ioctl_data_tl *tl; - struct xsc_ioctl_mbox_in *in; - struct xsc_mem_entry *m_ent; - char tname[TASK_COMM_LEN]; - int in_size; - int err = 0; - u8 lfound = 0; - - in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; - in = kvzalloc(in_size, GFP_KERNEL); - if (!in) - return -ENOMEM; - - in->len = hdr->attr.length; - err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); - if (err) { - kvfree(in); - return -EFAULT; - } - - if (in->len > sizeof(struct xsc_ioctl_data_tl)) { - tl = (struct xsc_ioctl_data_tl *)(in->data); - if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { - kvfree(in); - return -EFAULT; - } - minfo = (struct xsc_ioctl_mem_info *)(tl + 1); - if (minfo->vir_addr && minfo->phy_addr) { - memset(tname, 0, sizeof(tname)); - get_task_comm(tname, current); - - spin_lock_irq(&priv_dev->mem_lock); - list_for_each_entry(m_ent, &priv_dev->mem_list, list) { - if ((!strcmp(m_ent->task_name, tname)) && - m_ent->mem_info.mem_num == minfo->mem_num && - m_ent->mem_info.size == minfo->size) { - if (m_ent->mem_info.phy_addr == minfo->phy_addr && - m_ent->mem_info.vir_addr == minfo->vir_addr) { - lfound = 1; - list_del(&m_ent->list); - } else { - err = -ENOMEM; - } - break; - } - } - spin_unlock_irq(&priv_dev->mem_lock); - - if (lfound) { - dma_free_coherent(&xdev->pdev->dev, - minfo->size, - (void *)minfo->vir_addr, - minfo->phy_addr); - } - } else { - kvfree(in); - return -EFAULT; - } - } - - hdr->attr.error = err; - if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) - err = -EFAULT; - if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) - err = -EFAULT; - - kvfree(in); - return err; -} - -static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, - struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, - struct xsc_ioctl_hdr *hdr) -{ - struct xsc_ioctl_mem_info *minfo; - struct xsc_ioctl_data_tl *tl; - struct xsc_ioctl_mbox_in *in; - struct xsc_mem_entry *m_ent; - char tname[TASK_COMM_LEN]; - u64 vaddr = 0; - u64 paddr = 0; - int in_size; - int err = 0; - u8 lfound = 0; - u8 needfree = 0; - - in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; - in = kvzalloc(in_size, GFP_KERNEL); - if (!in) - return -ENOMEM; - - in->len = hdr->attr.length; - err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); - if (err) { - kvfree(in); - return -EFAULT; - } - - if (in->len > sizeof(struct xsc_ioctl_data_tl)) { - tl = (struct xsc_ioctl_data_tl *)(in->data); - if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { - kvfree(in); - return -EFAULT; - } - minfo = (struct xsc_ioctl_mem_info *)(tl + 1); - memset(tname, 0, sizeof(tname)); - get_task_comm(tname, current); - - spin_lock_irq(&priv_dev->mem_lock); - list_for_each_entry(m_ent, &priv_dev->mem_list, list) { - if ((!strcmp(m_ent->task_name, tname)) && - m_ent->mem_info.mem_num == minfo->mem_num) { - if (m_ent->mem_info.size == minfo->size) { - minfo->phy_addr = m_ent->mem_info.phy_addr; - minfo->vir_addr = m_ent->mem_info.vir_addr; - lfound = 1; - } else { - needfree = 1; - list_del(&m_ent->list); - } - break; - } - } - spin_unlock_irq(&priv_dev->mem_lock); - - if (needfree) { - dma_free_coherent(&xdev->pdev->dev, - m_ent->mem_info.size, - (void *)m_ent->mem_info.vir_addr, - m_ent->mem_info.phy_addr); - } - - if (!lfound) { - vaddr = (u64)dma_alloc_coherent(&xdev->pdev->dev, - minfo->size, - (dma_addr_t *)&paddr, - GFP_KERNEL); - if (vaddr) { - memset((void *)vaddr, 0, minfo->size); - minfo->phy_addr = paddr; - minfo->vir_addr = vaddr; - m_ent = kzalloc(sizeof(*m_ent), GFP_KERNEL); - if (!m_ent) { - kvfree(in); - return -ENOMEM; - } - strscpy(m_ent->task_name, tname, sizeof(m_ent->task_name)); - m_ent->mem_info.mem_num = minfo->mem_num; - m_ent->mem_info.size = minfo->size; - m_ent->mem_info.phy_addr = paddr; - m_ent->mem_info.vir_addr = vaddr; - spin_lock_irq(&priv_dev->mem_lock); - list_add(&m_ent->list, &priv_dev->mem_list); - spin_unlock_irq(&priv_dev->mem_lock); - } else { - kvfree(in); - return -ENOMEM; - } - } - } - - hdr->attr.error = err; - if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) - err = -EFAULT; - if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) - err = -EFAULT; - - kvfree(in); - return err; -} - -static long xsc_priv_dev_ioctl_mem(struct file *filp, unsigned long arg) -{ - struct xsc_bdf_file *bdf_file = filp->private_data; - struct xsc_core_device *xdev = bdf_file->xdev; - struct xsc_priv_device *priv_dev = &xdev->priv_device; - struct xsc_ioctl_hdr __user *user_hdr = - (struct xsc_ioctl_hdr __user *)arg; - struct xsc_ioctl_hdr hdr; - int err; - - err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); - if (err) - return -EFAULT; - - /* check valid */ - if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) - return -EINVAL; - - /* check ioctl cmd */ - switch (hdr.attr.opcode) { - case XSC_IOCTL_MEM_ALLOC: - return xsc_ioctl_mem_alloc(priv_dev, xdev, user_hdr, &hdr); - case XSC_IOCTL_MEM_FREE: - return xsc_ioctl_mem_free(priv_dev, xdev, user_hdr, &hdr); - default: - return -EINVAL; - } -} - -static int xsc_priv_modify_qp(struct xsc_core_device *xdev, void *in, void *out) -{ - int ret = 0, i = 0; - struct xsc_ioctl_qp_range *resp; - struct xsc_ioctl_data_tl *tl; - int insize; - struct xsc_modify_qp_mbox_in *mailin; - struct xsc_modify_qp_mbox_out mailout; - u32 qpn; - - tl = (struct xsc_ioctl_data_tl *)out; - resp = (struct xsc_ioctl_qp_range *)(tl + 1); - xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", - resp->qpn, resp->num, resp->opcode); - if (resp->num == 0) { - xsc_core_err(xdev, "xsc_ioctl_qp_range: resp->num == 0\n"); - return 0; - } - qpn = resp->qpn; - insize = sizeof(struct xsc_modify_qp_mbox_in); - mailin = kvzalloc(insize, GFP_KERNEL); - if (!mailin) - return -ENOMEM; - for (i = 0; i < resp->num; i++) { - mailin->hdr.opcode = cpu_to_be16(resp->opcode); - mailin->qpn = cpu_to_be32(qpn + i); - ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); - xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); - } - kvfree(mailin); - - return ret; -} - -static int xsc_priv_dev_ioctl_get_phy(struct xsc_core_device *xdev, - void *in, void *out) -{ - int ret = 0; - struct xsc_eswitch *esw = xdev->priv.eswitch; - struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; - struct xsc_ioctl_get_phy_info_res *resp; - u16 lag_id = xsc_get_lag_id(xdev); - - switch (tl->opmod) { - case XSC_IOCTL_OP_GET_LOCAL: - resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); - - resp->pcie_no = xdev->pcie_no; - resp->func_id = xdev->glb_func_id; - resp->pcie_host = xdev->caps.pcie_host; - resp->mac_phy_port = xdev->mac_port; - resp->funcid_to_logic_port_off = xdev->caps.funcid_to_logic_port; - resp->lag_id = lag_id; - resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; - resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; - resp->lag_port_start = xdev->caps.lag_logic_port_ofst; - resp->send_seg_num = xdev->caps.send_ds_num; - resp->recv_seg_num = xdev->caps.recv_ds_num; - resp->raw_tpe_qp_num = xdev->caps.raw_tpe_qp_num; - resp->chip_version = xdev->chip_ver_l; - resp->on_chip_tbl_vld = - (xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; - resp->dma_rw_tbl_vld = - (xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; - resp->pct_compress_vld = - (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; - - xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", - resp->pcie_no, resp->func_id, resp->pcie_host, - resp->mac_phy_port, resp->lag_id, - resp->funcid_to_logic_port_off); - resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; - resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; - resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; - resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; - resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; - resp->pcie0_pf_funcid_top = xdev->caps.pcie0_pf_funcid_top; - resp->pcie1_pf_funcid_base = xdev->caps.pcie1_pf_funcid_base; - resp->pcie1_pf_funcid_top = xdev->caps.pcie1_pf_funcid_top; - resp->hca_core_clock = xdev->caps.hca_core_clock; - resp->mac_bit = xdev->caps.mac_bit; - if (xsc_core_is_pf(xdev)) { - mutex_lock(&esw->mode_lock); - resp->esw_mode = esw->mode; - mutex_unlock(&esw->mode_lock); - } else { - resp->esw_mode = 0; - } - resp->board_id = xdev->board_info->board_id; - break; - - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void *in, void *out) -{ - struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; - struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; - - if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; - - resp->pcp = ib_dev->force_pcp; - return 0; -} - -static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void *in, void *out) -{ - struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; - struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; - - if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; - - resp->dscp = ib_dev->force_dscp; - return 0; -} - -static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void *in, void *out) -{ - struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; - struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; - - if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; - - if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) - return -EINVAL; - - ib_dev->force_pcp = req->pcp; - return 0; -} - -static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void *in, void *out) -{ - struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; - struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; - - if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; - - if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) - return -EINVAL; - - ib_dev->force_dscp = req->dscp; - return 0; -} - -int xsc_priv_dev_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, - int out_size) -{ - int opcode, ret = 0; - struct xsc_ioctl_attr *hdr; - - hdr = (struct xsc_ioctl_attr *)in; - opcode = hdr->opcode; - switch (opcode) { - case XSC_IOCTL_GET_PHY_INFO: - ret = xsc_priv_dev_ioctl_get_phy(xdev, in, out); - break; - case XSC_IOCTL_GET_FORCE_PCP: - xsc_core_dbg(xdev, "getting global pcp\n"); - ret = xsc_priv_dev_ioctl_get_force_pcp(xdev, in, out); - break; - case XSC_IOCTL_GET_FORCE_DSCP: - ret = xsc_priv_dev_ioctl_get_force_dscp(xdev, in, out); - break; - case XSC_IOCTL_SET_QP_STATUS: - xsc_core_dbg(xdev, "case XSC_IOCTL_SET_QP_STATUS:\n"); - ret = xsc_priv_modify_qp(xdev, in, out); - break; - case XSC_IOCTL_SET_FORCE_PCP: - xsc_core_dbg(xdev, "setting global pcp\n"); - ret = xsc_priv_dev_ioctl_set_force_pcp(xdev, in, out); - break; - case XSC_IOCTL_SET_FORCE_DSCP: - xsc_core_dbg(xdev, "setting global dscp\n"); - ret = xsc_priv_dev_ioctl_set_force_dscp(xdev, in, out); - break; - default: - ret = -EINVAL; - break; - } - - xsc_core_dbg(xdev, "xsc_priv_dev exec_ioctl.ret=%u\n", ret); - - return ret; -} - -static long xsc_priv_dev_ioctl_getinfo(struct file *filp, unsigned long arg) -{ - struct xsc_bdf_file *bdf_file = filp->private_data; - struct xsc_core_device *xdev = bdf_file->xdev; - struct xsc_ioctl_hdr __user *user_hdr = - (struct xsc_ioctl_hdr __user *)arg; - struct xsc_ioctl_hdr hdr; - struct xsc_ioctl_hdr *in; - int in_size; - int err; - - err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); - if (err) - return -EFAULT; - if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) - return -EINVAL; - switch (hdr.attr.opcode) { - case XSC_IOCTL_GET_PHY_INFO: - case XSC_IOCTL_GET_FORCE_PCP: - case XSC_IOCTL_GET_FORCE_DSCP: - case XSC_IOCTL_SET_QP_STATUS: - case XSC_IOCTL_SET_FORCE_PCP: - case XSC_IOCTL_SET_FORCE_DSCP: - case XSC_IOCTL_GET_CONTEXT: - break; - default: - return -EINVAL; - } - in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; - in = kvzalloc(in_size, GFP_KERNEL); - if (!in) - return -EFAULT; - in->attr.opcode = hdr.attr.opcode; - in->attr.length = hdr.attr.length; - err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); - if (err) { - kvfree(in); - return -EFAULT; - } - err = xsc_priv_dev_exec_ioctl(xdev, &in->attr, - (in_size - offsetof(struct xsc_ioctl_hdr, attr)), - in->attr.data, - hdr.attr.length); - in->attr.error = err; - if (copy_to_user((void *)arg, in, in_size)) - err = -EFAULT; - kvfree(in); - return err; -} - -static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, - char *data, unsigned int datalen) -{ - int err = 0; - struct xsc_flow_pct_v4_add *pct_v4; - struct xsc_flow_pct_v6_add *pct_v6; - - switch (tl->table) { - case XSC_FLOW_TBL_PCT_V4: - case XSC_FLOW_TBL_BM_PCT_V4: - pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); - err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); - break; - case XSC_FLOW_TBL_PCT_V6: - case XSC_FLOW_TBL_BM_PCT_V6: - pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); - err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); - break; - default: - break; - } - - return err; -} - -static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) -{ - struct xsc_flow_pct_v4_del *pct_v4; - struct xsc_flow_pct_v6_del *pct_v6; - - switch (tl->table) { - case XSC_FLOW_TBL_PCT_V4: - case XSC_FLOW_TBL_BM_PCT_V4: - pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); - xsc_destroy_pct_obj(file, pct_v4->priority); - break; - case XSC_FLOW_TBL_PCT_V6: - case XSC_FLOW_TBL_BM_PCT_V6: - pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); - xsc_destroy_pct_obj(file, pct_v6->priority); - break; - default: - break; - } -} - -static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, - char *data, unsigned int datalen) -{ - struct xsc_ioctl_data_tl *tl; - int err = 0; - - tl = (struct xsc_ioctl_data_tl *)data; - - switch (tl->opmod) { - case XSC_IOCTL_OP_ADD: - err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); - break; - case XSC_IOCTL_OP_DEL: - xsc_ioctl_flow_destroy_obj(file, tl); - break; - default: - break; - } - - return err; -} - -static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, - struct xsc_ioctl_hdr __user *user_hdr, - struct xsc_ioctl_hdr *hdr) -{ - struct xsc_ioctl_mbox_in *in; - struct xsc_ioctl_mbox_out *out; - int in_size; - int out_size; - int err; - - in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; - in = kvzalloc(in_size, GFP_KERNEL); - if (!in) - return -EFAULT; - - in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - in->len = __cpu_to_be16(hdr->attr.length); - err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); - if (err) { - kvfree(in); - return -EFAULT; - } - - err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); - if (err) { - kvfree(in); - return -EFAULT; - } - - out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; - out = kvzalloc(out_size, GFP_KERNEL); - if (!out) { - kvfree(in); - return -ENOMEM; - } - memcpy(out->data, in->data, hdr->attr.length); - out->len = in->len; - err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); - - hdr->attr.error = __be32_to_cpu(out->error); - if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) - err = -EFAULT; - if (copy_to_user((void *)user_hdr->attr.data, out->data, hdr->attr.length)) - err = -EFAULT; - - kvfree(in); - kvfree(out); - return err; -} - -static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, - struct xsc_core_device *xdev, - struct xsc_ioctl_hdr __user *user_hdr, - struct xsc_ioctl_hdr *hdr) -{ - struct xsc_modify_raw_qp_mbox_in *in; - struct xsc_modify_raw_qp_mbox_out *out; - int err; - - if (hdr->attr.length != sizeof(struct xsc_modify_raw_qp_request)) - return -EINVAL; - - in = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_in), GFP_KERNEL); - if (!in) - goto err_in; - out = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_out), GFP_KERNEL); - if (!out) - goto err_out; - - err = copy_from_user(&in->req, user_hdr->attr.data, - sizeof(struct xsc_modify_raw_qp_request)); - if (err) - goto err; - - in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - in->pcie_no = xdev->pcie_no; - - err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), - out, sizeof(struct xsc_modify_raw_qp_mbox_out)); - - hdr->attr.error = __be32_to_cpu(out->hdr.status); - - if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) - goto err; - - kvfree(in); - kvfree(out); - return 0; - -err: - kvfree(out); -err_out: - kvfree(in); -err_in: - return -EFAULT; -} - -static void xsc_handle_multiqp_create(struct xsc_bdf_file *file, void *in, - unsigned int inlen, void *out) -{ - u16 qp_num = 0; - int i = 0; - struct xsc_create_qp_request *req = NULL; - void *ptr = NULL; - int len = 0; - u32 qpn_base = be32_to_cpu(((struct xsc_create_multiqp_mbox_out *)out)->qpn_base); - - qp_num = be16_to_cpu(((struct xsc_create_multiqp_mbox_in *)in)->qp_num); - ptr = ((struct xsc_create_multiqp_mbox_in *)in)->data; - for (i = 0; i < qp_num; i++) { - req = (struct xsc_create_qp_request *)ptr; - len = sizeof(struct xsc_create_qp_request) + - be16_to_cpu(req->pa_num) * sizeof(u64); - xsc_alloc_qp_obj(file, qpn_base + i, (char *)req, len); - ptr += len; - } -} - -static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, - void *in, unsigned int inlen, void *out, int opcode) -{ - unsigned int idx; - - switch (opcode) { - case XSC_CMD_OP_ALLOC_PD: - idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); - xsc_alloc_pd_obj(file, idx, in, inlen); - break; - case XSC_CMD_OP_DEALLOC_PD: - idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); - xsc_destroy_pd_obj(file, idx); - break; - case XSC_CMD_OP_CREATE_MKEY: - idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); - xsc_alloc_mr_obj(file, idx, in, inlen); - break; - case XSC_CMD_OP_DESTROY_MKEY: - idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); - xsc_destroy_mr_obj(file, idx); - break; - case XSC_CMD_OP_CREATE_CQ: - idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); - xsc_alloc_cq_obj(file, idx, in, inlen); - break; - case XSC_CMD_OP_DESTROY_CQ: - idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); - xsc_destroy_cq_obj(file, idx); - break; - case XSC_CMD_OP_CREATE_QP: - idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); - xsc_alloc_qp_obj(file, idx, in, inlen); - break; - case XSC_CMD_OP_DESTROY_QP: - idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); - xsc_destroy_qp_obj(file, idx); - break; - case XSC_CMD_OP_CREATE_MULTI_QP: - xsc_handle_multiqp_create(file, in, inlen, out); - break; - default: - break; - } -} - -static long xsc_priv_dev_ioctl_cmdq(struct file *filp, unsigned long arg) -{ - struct xsc_bdf_file *bdf_file = filp->private_data; - struct xsc_priv_device *priv_dev = &bdf_file->xdev->priv_device; - struct xsc_core_device *xdev = bdf_file->xdev; - struct xsc_ioctl_hdr __user *user_hdr = - (struct xsc_ioctl_hdr __user *)arg; - struct xsc_ioctl_hdr hdr; - int err; - - err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); - if (err) - return -EFAULT; - - /* check valid */ - if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) - return -EINVAL; - - /* check ioctl cmd */ - switch (hdr.attr.opcode) { - case XSC_CMD_OP_IOCTL_FLOW: - return xsc_ioctl_flow_cmdq(bdf_file, user_hdr, &hdr); - case XSC_CMD_OP_MODIFY_RAW_QP: - return xsc_ioctl_modify_raw_qp(priv_dev, xdev, user_hdr, &hdr); - default: - return -EINVAL; - } -} - -static long xsc_priv_dev_ioctl_cmdq_raw(struct file *filp, unsigned long arg) -{ - struct xsc_bdf_file *bdf_file = filp->private_data; - struct xsc_core_device *xdev = bdf_file->xdev; - struct xsc_ioctl_hdr __user *user_hdr = - (struct xsc_ioctl_hdr __user *)arg; - struct xsc_ioctl_hdr hdr; - int err; - void *in; - void *out; - u16 out_len; - - err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); - if (err) - return -EFAULT; - - /* check valid */ - if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) - return -EINVAL; - - in = kvzalloc(hdr.attr.length, GFP_KERNEL); - if (!in) - return -ENOMEM; - out_len = min_t(u16, hdr.attr.length, MAX_MBOX_OUT_LEN); - out = kvzalloc(out_len, GFP_KERNEL); - if (!out) { - kfree(in); - return -ENOMEM; - } - - err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); - if (err) { - err = -EFAULT; - goto err_exit; - } - - xsc_cmd_exec(xdev, in, hdr.attr.length, out, out_len); - xsc_pci_ctrl_cmdq_handle_res_obj(bdf_file, in, hdr.attr.length, out, hdr.attr.opcode); - - if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) - err = -EFAULT; - if (copy_to_user((void *)user_hdr->attr.data, out, out_len)) - err = -EFAULT; -err_exit: - kfree(in); - kfree(out); - return err; -} - -static int xsc_ioctl_user_mode(struct file *filp, unsigned long arg) -{ - struct xsc_bdf_file *bdf_file = filp->private_data; - struct xsc_core_device *dev = bdf_file->xdev; - struct xsc_ioctl_hdr __user *user_hdr = - (struct xsc_ioctl_hdr __user *)arg; - struct xsc_ioctl_hdr hdr; - struct xsc_ioctl_user_mode_attr *attr; - u8 *buf; - int err = 0; - - err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); - if (err) { - xsc_core_err(dev, "fail to copy from user user_hdr\n"); - return -EFAULT; - } - - /* check valid */ - if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { - xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); - return -EINVAL; - } - - buf = kvzalloc(hdr.attr.length, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - err = copy_from_user(buf, user_hdr->attr.data, hdr.attr.length); - if (err) { - xsc_core_err(dev, "failed to copy ioctl user data.\n"); - kvfree(buf); - return -EFAULT; - } - - switch (hdr.attr.opcode) { - case XSC_IOCTL_OPCODE_ENABLE_USER_MODE: - attr = (struct xsc_ioctl_user_mode_attr *)buf; - xsc_set_user_mode(dev, (attr->enable ? true : false)); - if (attr->enable) - bdf_file->restore_nic_fn = xsc_eth_restore_nic_hca; - else - bdf_file->restore_nic_fn = NULL; - - break; - default: - err = -EOPNOTSUPP; - break; - } - - kvfree(buf); - return err; -} - -static long xsc_priv_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int err; - - switch (cmd) { - case XSC_IOCTL_CMDQ: - err = xsc_priv_dev_ioctl_cmdq(filp, arg); - break; - case XSC_IOCTL_DRV_GET: - case XSC_IOCTL_DRV_SET: - // TODO refactor to split driver get and set - err = xsc_priv_dev_ioctl_getinfo(filp, arg); - break; - case XSC_IOCTL_MEM: - err = xsc_priv_dev_ioctl_mem(filp, arg); - break; - case XSC_IOCTL_CMDQ_RAW: - err = xsc_priv_dev_ioctl_cmdq_raw(filp, arg); - break; - case XSC_IOCTL_USER_MODE: - err = xsc_ioctl_user_mode(filp, arg); - break; - default: - err = -EFAULT; - break; - } - return err; -} - -static const struct file_operations dev_fops = { - .owner = THIS_MODULE, - .open = xsc_priv_dev_open, - .unlocked_ioctl = xsc_priv_dev_ioctl, - .compat_ioctl = xsc_priv_dev_ioctl, - .release = xsc_priv_dev_release, -}; - -#define XSC_MAX_CDEV_NUM 1024 -static dev_t g_priv_cdev_no; -static int g_priv_cdev_cnt; -static char *g_priv_class_name = "xscale"; -static struct class *g_priv_class; -DECLARE_BITMAP(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); - -int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev) -{ - int ret; - int dev_id = 0; - struct xsc_priv_device *priv_dev = &dev->priv_device; - - if (g_priv_cdev_cnt >= XSC_MAX_CDEV_NUM) { - xsc_core_err(dev, "too many xscale cdevice\n"); - priv_dev->devno = U32_MAX; - return -EBUSY; - } - - sprintf(priv_dev->device_name, "%s", ib_dev->name); - - xsc_core_dbg(dev, "device_name %s\n", priv_dev->device_name); - - cdev_init(&priv_dev->cdev, &dev_fops); - priv_dev->cdev.owner = THIS_MODULE; - dev_id = find_first_zero_bit(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); - priv_dev->devno = g_priv_cdev_no + dev_id; - - ret = cdev_add(&priv_dev->cdev, priv_dev->devno, 1); - if (ret) { - xsc_core_err(dev, "%s cdev_add error ret:%d major:%d\n", - priv_dev->device_name, ret, MAJOR(priv_dev->devno)); - return ret; - } - - device_create(g_priv_class, NULL, priv_dev->devno, - NULL, "%s!%s", g_priv_class_name, priv_dev->device_name); - g_priv_cdev_cnt++; - set_bit(dev_id, g_bitmap_cdev_id); - - INIT_LIST_HEAD(&priv_dev->mem_list); - spin_lock_init(&priv_dev->mem_lock); - - INIT_RADIX_TREE(&priv_dev->bdf_tree, GFP_ATOMIC); - spin_lock_init(&priv_dev->bdf_lock); - - xsc_core_dbg(dev, "init success\n"); - - return 0; -} - -void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) -{ - struct xsc_priv_device *priv_dev; - struct cdev *char_dev; - struct xsc_bdf_file *bdf_file; - struct radix_tree_iter iter; - void **slot; - int dev_id = 0; - - if (!dev || !ib_dev) { - pr_err("[%s:%d] device is null pointer\n", __func__, __LINE__); - return; - } - - priv_dev = &dev->priv_device; - if (priv_dev->devno == U32_MAX) - return; - - char_dev = &priv_dev->cdev; - - dev_id = MINOR(priv_dev->devno); - spin_lock(&priv_dev->bdf_lock); - radix_tree_for_each_slot(slot, &priv_dev->bdf_tree, &iter, 0) { - bdf_file = (struct xsc_bdf_file *)(*slot); - xsc_close_bdf_file(bdf_file); - radix_tree_iter_delete(&priv_dev->bdf_tree, &iter, slot); - kfree(bdf_file); - } - spin_unlock(&priv_dev->bdf_lock); - device_destroy(g_priv_class, priv_dev->devno); - cdev_del(&priv_dev->cdev); - - clear_bit(dev_id, g_bitmap_cdev_id); - g_priv_cdev_cnt--; - xsc_core_dbg(dev, "fini success\n"); -} - -int xsc_priv_alloc_chrdev_region(void) -{ - int ret = 0; - char *device_name = "xscale"; - - ret = alloc_chrdev_region(&g_priv_cdev_no, 0, XSC_MAX_CDEV_NUM, device_name); - if (ret) { - pr_err("%s cant't get major %d\n", device_name, MAJOR(g_priv_cdev_no)); - return ret; - } - g_priv_class = class_create(THIS_MODULE, g_priv_class_name); - g_priv_cdev_cnt = 0; - - return 0; -} - -void xsc_priv_unregister_chrdev_region(void) -{ - class_destroy(g_priv_class); - unregister_chrdev_region(g_priv_cdev_no, XSC_MAX_CDEV_NUM); -} diff --git a/drivers/infiniband/hw/xsc/qp.c b/drivers/infiniband/hw/xsc/qp.c index 7ac6eec4b473c68d1d4fe7a7f23235b8c84173b6..d517a47ca01ca25b602ebdbc338de1d2300a2830 100644 --- a/drivers/infiniband/hw/xsc/qp.c +++ b/drivers/infiniband/hw/xsc/qp.c @@ -13,10 +13,13 @@ #include #include #include - /* not supported currently */ static int wq_signature; +#ifndef ETH_P_IBOE +#define ETH_P_IBOE 0x8915 +#endif + #define MAD_QUEUE_DEPTH 128 enum { @@ -91,27 +94,25 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos) if (dev->num_tc) return netdev_get_prio_tc_map(dev, prio); +#if IS_ENABLED(CONFIG_VLAN_8021Q) if (is_vlan_dev(ndev)) return (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +#endif return 0; } -static inline void set_remote_addr_seg(struct xsc_wqe_data_seg *remote_seg, +static inline void set_remote_addr_seg(struct xsc_core_device *xdev, + struct xsc_wqe_data_seg *remote_seg, u32 msg_len, u64 remote_addr, u32 rkey) { - remote_seg->in_line = 0; - WR_LE_32(remote_seg->seg_len, msg_len); - WR_LE_32(remote_seg->mkey, rkey); - WR_LE_64(remote_seg->va, remote_addr); + xsc_set_data_seg(xdev, remote_seg, remote_addr, rkey, msg_len); } -static void set_local_data_seg(struct xsc_wqe_data_seg *data_seg, struct ib_sge *sg) +static void set_local_data_seg(struct xsc_core_device *xdev, + struct xsc_wqe_data_seg *data_seg, struct ib_sge *sg) { - data_seg->in_line = 0; - WR_LE_32(data_seg->seg_len, sg->length); - WR_LE_32(data_seg->mkey, sg->lkey); - WR_LE_64(data_seg->va, sg->addr); + xsc_set_data_seg(xdev, data_seg, sg->addr, sg->lkey, sg->length); } static int set_data_inl_seg(struct xsc_ib_qp *qp, const struct ib_send_wr *wr, void *ctrl) @@ -181,12 +182,14 @@ static void xsc_ib_qp_event(struct xsc_core_qp *qp, int type) static int set_rq_size(struct xsc_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct xsc_ib_qp *qp, struct xsc_ib_create_qp *ucmd) { + bool check_res = false; u32 wqe_cnt = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; /* Sanity check RQ size before proceeding */ - if (wqe_cnt > dev->xdev->caps.max_wqes) { - xsc_ib_warn(dev, "max_recv_wr:%d exceed max rq depth\n", cap->max_recv_wr); - wqe_cnt = dev->xdev->caps.max_wqes; + check_res = xsc_check_max_qp_depth(dev->xdev, &wqe_cnt, dev->xdev->caps.max_wqes); + if (check_res) { + xsc_ib_err(dev, "max_recv_wr:%d exceed max rq depth\n", cap->max_recv_wr); + return -EINVAL; } if (!has_rq) { @@ -215,6 +218,7 @@ static int calc_sq_size(struct xsc_ib_dev *dev, struct ib_qp_init_attr *attr, { int wqe_size; int wq_size; + bool check_res = false; if (!attr->cap.max_send_wr) { xsc_ib_err(dev, "invalid max_send_wr:%d\n", attr->cap.max_send_wr); @@ -225,8 +229,14 @@ static int calc_sq_size(struct xsc_ib_dev *dev, struct ib_qp_init_attr *attr, qp->max_inline_data = (dev->xdev->caps.send_ds_num - 2) * sizeof(struct xsc_wqe_data_seg); attr->cap.max_inline_data = qp->max_inline_data; + attr->cap.max_send_wr = min_t(u32, attr->cap.max_send_wr, dev->xdev->caps.max_wqes); qp->sq.wqe_cnt = roundup_pow_of_two(attr->cap.max_send_wr); - qp->sq.wqe_cnt = min_t(int, qp->sq.wqe_cnt, dev->xdev->caps.max_wqes); + + check_res = xsc_check_max_qp_depth(dev->xdev, &qp->sq.wqe_cnt, dev->xdev->caps.max_wqes); + if (check_res) { + xsc_ib_err(dev, "max_send_wr:%d exceed max sq depth\n", attr->cap.max_send_wr); + return -EINVAL; + } qp->sq.ds_cnt = qp->sq.wqe_cnt << (dev->xdev->caps.send_wqe_shift - XSC_BASE_WQE_SHIFT); wq_size = qp->sq.wqe_cnt * wqe_size; qp->sq.wqe_shift = ilog2(wqe_size); @@ -385,7 +395,7 @@ static int create_kernel_qp(struct xsc_ib_dev *dev, sq_size = calc_sq_size(dev, init_attr, qp); if (sq_size < 0) { - err = -ENOMEM; + err = -EINVAL; xsc_ib_err(dev, "err %d\n", err); return err; } @@ -414,14 +424,16 @@ static int create_kernel_qp(struct xsc_ib_dev *dev, xsc_fill_page_array(&qp->buf, (*in)->req.pas, hw_npages); (*in)->req.pa_num = cpu_to_be16(hw_npages); - qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wrid), GFP_KERNEL); - qp->sq.wr_data = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wr_data), GFP_KERNEL); - qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(*qp->rq.wrid), GFP_KERNEL); - qp->sq.w_list = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.w_list), GFP_KERNEL); - qp->sq.wqe_head = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wqe_head), GFP_KERNEL); + qp->sq.wrid = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.wrid), GFP_KERNEL); + qp->sq.wr_data = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.wr_data), GFP_KERNEL); + qp->rq.wrid = kcalloc(qp->rq.wqe_cnt, sizeof(*qp->rq.wrid), GFP_KERNEL); + qp->sq.w_list = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.w_list), GFP_KERNEL); + qp->sq.wqe_head = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.wqe_head), GFP_KERNEL); + qp->sq.wr_opcode = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.wr_opcode), GFP_KERNEL); + qp->sq.need_flush = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.need_flush), GFP_KERNEL); if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || - !qp->sq.w_list || !qp->sq.wqe_head) { + !qp->sq.w_list || !qp->sq.wqe_head || !qp->sq.wr_opcode || !qp->sq.need_flush) { err = -ENOMEM; goto err_wrid; } @@ -445,6 +457,8 @@ static int create_kernel_qp(struct xsc_ib_dev *dev, return 0; err_wrid: + kfree(qp->sq.need_flush); + kfree(qp->sq.wr_opcode); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); @@ -461,6 +475,8 @@ static void destroy_qp_kernel(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) if (qp->sq.hdr_buf) ib_dma_free_coherent(&dev->ib_dev, qp->sq.hdr_size, qp->sq.hdr_buf, qp->sq.hdr_dma); + kfree(qp->sq.need_flush); + kfree(qp->sq.wr_opcode); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); @@ -480,6 +496,8 @@ static u8 ib_to_xsc_qp_type(enum ib_qp_type qp_type, __u32 flags) return XSC_QUEUE_TYPE_RAW_TSO; else if (flags & XSC_QP_FLAG_RAWPACKET_TX) return XSC_QUEUE_TYPE_RAW_TX; + else if (flags & XSC_QP_FLAG_RAWPACKET_SNIFFER) + return XSC_QUEUE_TYPE_SNIFFER; else return XSC_QUEUE_TYPE_RAW; } else { @@ -590,6 +608,7 @@ static int create_qp_common(struct xsc_ib_dev *dev, struct ib_pd *pd, goto err_create; } in->req.glb_funcid = cpu_to_be16(dev->xdev->glb_func_id); + in->req.page_shift = PAGE_SHIFT; qp->xqp.qp_type_internal = in->req.qp_type; @@ -859,6 +878,29 @@ xsc_ib_destroy_qp_def() struct xsc_ib_qp *xqp = to_xqp(qp); struct xsc_core_device *xdev = dev->xdev; struct xsc_lag *lag; + struct xsc_err_state_qp_node *tmp = NULL, *err_rq_node = NULL, *err_sq_node = NULL; + + if (qp->qp_type == IB_QPT_RC) { + if (xqp->recv_cq) { + list_for_each_entry_safe(err_rq_node, tmp, + &to_xcq(xqp->recv_cq)->err_state_qp_list, entry) { + if (err_rq_node->qp_id == xqp->xqp.qpn && !err_rq_node->is_sq) { + list_del(&err_rq_node->entry); + kfree(err_rq_node); + } + } + } + + if (xqp->send_cq) { + list_for_each_entry_safe(err_sq_node, tmp, + &to_xcq(xqp->send_cq)->err_state_qp_list, entry) { + if (err_sq_node->qp_id == xqp->xqp.qpn && err_sq_node->is_sq) { + list_del(&err_sq_node->entry); + kfree(err_sq_node); + } + } + } + } destroy_qp_common(dev, xqp); @@ -1019,6 +1061,73 @@ static inline u8 __xsc_get_min_qp_cnt_mac(struct xsc_lag *lag) return mac_index; } + +static int xsc_ib_err_state_qp(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp, + enum xsc_qp_state cur_state, enum xsc_qp_state state) +{ + struct xsc_err_state_qp_node *tmp = NULL, *err_rq_node = NULL, *err_sq_node = NULL; + int ret = 0; + + xsc_ib_dbg(dev, "modify qp: qpid %d, cur_qp_state %d, qp_state %d\n", + qp->xqp.qpn, cur_state, state); + if (cur_state == XSC_QP_STATE_ERR && state != XSC_QP_STATE_ERR) { + if (qp->recv_cq) { + list_for_each_entry_safe(err_rq_node, tmp, + &to_xcq(qp->recv_cq)->err_state_qp_list, entry) { + if (err_rq_node->qp_id == qp->xqp.qpn && !err_rq_node->is_sq) { + list_del(&err_rq_node->entry); + kfree(err_rq_node); + } + } + } + + if (qp->send_cq) { + list_for_each_entry_safe(err_sq_node, tmp, + &to_xcq(qp->send_cq)->err_state_qp_list, entry) { + if (err_sq_node->qp_id == qp->xqp.qpn && err_sq_node->is_sq) { + list_del(&err_sq_node->entry); + kfree(err_sq_node); + } + } + } + return ret; + } + + if (cur_state != XSC_QP_STATE_ERR && state == XSC_QP_STATE_ERR) { + if (qp->recv_cq) { + err_rq_node = kzalloc(sizeof(*err_rq_node), GFP_KERNEL); + if (!err_rq_node) + return -ENOMEM; + err_rq_node->qp_id = qp->xqp.qpn; + err_rq_node->is_sq = false; + list_add_tail(&err_rq_node->entry, &to_xcq(qp->recv_cq)->err_state_qp_list); + } + + if (qp->send_cq) { + err_sq_node = kzalloc(sizeof(*err_sq_node), GFP_KERNEL); + if (!err_sq_node) + return -ENOMEM; + err_sq_node->qp_id = qp->xqp.qpn; + err_sq_node->is_sq = true; + list_add_tail(&err_sq_node->entry, &to_xcq(qp->send_cq)->err_state_qp_list); + } + } + return ret; +} + +static inline void xsc_set_qp_access_flag(struct xsc_modify_qp_mbox_in *in, + struct xsc_qp_context *context, int access_flags) +{ + in->hdr.ver = cpu_to_be16(XSC_QP_CONTEXT_V1); + context->qp_access_flags = 0; + + if (access_flags & IB_ACCESS_REMOTE_READ) + context->qp_access_flags |= QP_ACCESS_REMOTE_READ; + if (access_flags & IB_ACCESS_REMOTE_WRITE) + context->qp_access_flags |= QP_ACCESS_REMOTE_WRITE; + context->qp_access_flags = cpu_to_be32(context->qp_access_flags); +} + static int __xsc_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) @@ -1125,6 +1234,12 @@ static int __xsc_ib_modify_qp(struct ib_qp *ibqp, ret += snprintf(ptr + ret, 256 - ret, "rq_psn=%#x,", attr->rq_psn); } + if (attr_mask & IB_QP_ACCESS_FLAGS) { + xsc_set_qp_access_flag(in, context, attr->qp_access_flags); + ret += snprintf(ptr + ret, 256 - ret, "qp_access_flags=%#x,\n", + attr->qp_access_flags); + } + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; @@ -1173,6 +1288,9 @@ static int __xsc_ib_modify_qp(struct ib_qp *ibqp, qp->sq.last_poll = 0; } + if (!err && (attr_mask & IB_QP_STATE)) + err = xsc_ib_err_state_qp(dev, qp, to_xsc_state(cur_state), + to_xsc_state(new_state)); out: kfree(in); return err; @@ -1214,6 +1332,12 @@ int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; } + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { + xsc_ib_err(dev, "ib_modify_qp not ok, type=%d mask=0x%x state from%d to%d\n", + ibqp->qp_type, attr_mask, cur_state, new_state); + goto out; + } + err = __xsc_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); out: mutex_unlock(&qp->mutex); @@ -1240,8 +1364,7 @@ static inline void xsc_post_send_db(struct xsc_ib_qp *qp, struct xsc_core_device *xdev, int nreq) { - u16 next_pid; - union xsc_db_data db; + u32 next_pid; if (unlikely(!nreq)) return; @@ -1249,14 +1372,7 @@ static inline void xsc_post_send_db(struct xsc_ib_qp *qp, qp->sq.head += nreq; next_pid = qp->sq.head << (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT); - db.sq_next_pid = next_pid; - db.sqn = qp->doorbell_qpn; - /* - * Make sure that descriptors are written before - * updating doorbell record and ringing the doorbell - */ - wmb(); - writel(db.raw_data, REG_ADDR(xdev, xdev->regs.tx_db)); + xsc_update_tx_db(xdev, qp->doorbell_qpn, next_pid); } static inline u32 xsc_crc32(struct xsc_ib_dev *dev, u32 crc, u8 *buf, size_t len) @@ -1273,7 +1389,7 @@ static inline u32 xsc_crc32(struct xsc_ib_dev *dev, u32 crc, u8 *buf, size_t len #define BTH_PSN_MASK (0x00ffffff) /* Compute a partial ICRC for all the IB transport headers. */ -u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) +static u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) { struct iphdr *ip4h = NULL; struct ipv6hdr *ip6h = NULL; @@ -1363,11 +1479,11 @@ u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) /* Routine for sending QP1 packets for RoCE V1 an V2 */ // TO BE DONE: sq hdr buf should be create dynamically for mult entry -int build_qp1_send_v2(struct xsc_ib_dev *dev, - struct xsc_ib_qp *qp, - const struct ib_send_wr *wr, - struct ib_sge *sge, - int payload_size, u32 *crc) +static int build_qp1_send_v2(struct xsc_ib_dev *dev, + struct xsc_ib_qp *qp, + const struct ib_send_wr *wr, + struct ib_sge *sge, + int payload_size, u32 *crc) { struct xsc_ib_ah *ah = container_of(ud_wr((struct ib_send_wr *)wr)->ah, struct xsc_ib_ah, ibah); @@ -1568,7 +1684,6 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct ib_wc wc; void *vaddr; int sig = 0; - if (wr->opcode == IB_WR_LOCAL_INV) { wc.status = IB_WC_SUCCESS; wc.wr_cqe = wr->wr_cqe; @@ -1592,8 +1707,18 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 1 : wr->send_flags & IB_SEND_SIGNALED; if (xsc_wr_reg_mr(dev, wr)) wc.status = IB_WC_GENERAL_ERR; - if (wr->wr_cqe && wr->wr_cqe->done && sig) - wr->wr_cqe->done(qp->send_cq, &wc); + + if (virt_addr_valid(wr->wr_cqe)) { + if (wr->wr_cqe && wr->wr_cqe->done && sig) + wr->wr_cqe->done(qp->send_cq, &wc); + } else { + xsc_ib_info(dev, "Error: system not support SMC-R-V2 feature!!!\n"); + return 0; + } + + wr = wr->next; + if (!wr) + return (wc.status == IB_WC_SUCCESS) ? 0 : -1; } spin_lock_irqsave(&qp->sq.lock, irqflag); @@ -1666,17 +1791,18 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; case IB_WR_SEND_WITH_IMM: ctrl->with_immdt = 1; - ctrl->opcode_data = send_ieth(wr); + WR_LE_32(ctrl->opcode_data, RD_BE_32(send_ieth(wr))); break; case IB_WR_RDMA_WRITE_WITH_IMM: ctrl->with_immdt = 1; - ctrl->opcode_data = send_ieth(wr); + WR_LE_32(ctrl->opcode_data, RD_BE_32(send_ieth(wr))); case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: - ctrl->with_immdt = 0; + if (ctrl->msg_len == 0) + break; ctrl->ds_data_num++; data_seg = get_seg_wqe(ctrl, seg_index); - set_remote_addr_seg(data_seg, + set_remote_addr_seg(dev->xdev, data_seg, msg_len, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); @@ -1694,7 +1820,7 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; case IB_QPT_UD: case IB_QPT_GSI: - ctrl->msg_opcode = XSC_MSG_OPCODE_MAD; + ctrl->msg_opcode = xsc_get_mad_msg_opcode(dev->xdev); ctrl->ds_data_num++; data_seg = get_seg_wqe(ctrl, seg_index); mad_send_base = (u8 *)qp->sq.hdr_buf + @@ -1721,7 +1847,7 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, *(u32 *)&mad_send_base[ctrl->msg_len] = ~crc; ctrl->msg_len += sizeof(crc); sg.length = ctrl->msg_len; - set_local_data_seg(data_seg, &sg); + set_local_data_seg(dev->xdev, data_seg, &sg); xsc_ib_info(dev, "qp[%d] send MAD packet, msg_len:%d\n", qp->xqp.qpn, ctrl->msg_len); qp->sq.mad_index = (qp->sq.mad_index + 1) % MAD_QUEUE_DEPTH; @@ -1751,13 +1877,18 @@ int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, for (i = 0; i < sg_n; ++i, ++seg_index) { if (likely(sgl[i].length)) { data_seg = get_seg_wqe(ctrl, seg_index); - set_local_data_seg(data_seg, &sgl[i]); + set_local_data_seg(dev->xdev, data_seg, &sgl[i]); } } } qp->sq.wrid[idx] = wr->wr_id; qp->sq.wqe_head[idx] = qp->sq.head + nreq; qp->sq.cur_post += 1; + if (ctrl->ce) { + atomic_inc(&qp->sq.flush_wqe_cnt); + qp->sq.need_flush[idx] = 1; + } + qp->sq.wr_opcode[idx] = wr->opcode; } out: xsc_ib_dbg(dev, "nreq:%d\n", nreq); @@ -1778,7 +1909,6 @@ int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, unsigned long flags; int err = 0; u16 next_pid = 0; - union xsc_db_data db; int nreq; u16 idx; int i; @@ -1821,22 +1951,15 @@ int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, qp->rq.wrid[idx] = wr->wr_id; idx = (idx + 1) & (qp->rq.wqe_cnt - 1); + atomic_inc(&qp->rq.flush_wqe_cnt); } out: if (likely(nreq)) { qp->rq.head += nreq; next_pid = qp->rq.head << (qp->rq.wqe_shift - XSC_BASE_WQE_SHIFT); - db.rq_next_pid = next_pid; - db.rqn = qp->doorbell_qpn; - - /* - * Make sure that descriptors are written before - * doorbell record. - */ - wmb(); - writel(db.raw_data, REG_ADDR(xdev, xdev->regs.rx_db)); + xsc_update_rx_db(xdev, qp->doorbell_qpn, next_pid); } spin_unlock_irqrestore(&qp->rq.lock, flags); @@ -1876,7 +1999,6 @@ int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_ struct xsc_ib_qp *qp = to_xqp(ibqp); struct xsc_query_qp_mbox_out *outb; struct xsc_qp_context *context; - int xsc_state; int err = 0; mutex_lock(&qp->mutex); @@ -1890,12 +2012,16 @@ int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_ if (err) goto out_free; + if (qp->xqp.err_occurred) { + qp->state = IB_QPS_ERR; + qp->xqp.err_occurred = 0; + } qp_attr->qp_state = qp->state; qp_attr->path_mtu = context->mtu_mode ? IB_MTU_4096 : IB_MTU_1024; qp_attr->rq_psn = be32_to_cpu(context->next_recv_psn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; - qp_attr->sq_draining = xsc_state == XSC_QP_STATE_SQ_DRAINING; + qp_attr->sq_draining = 0; qp_attr->retry_cnt = context->retry_cnt; qp_attr->rnr_retry = context->rnr_retry; qp_attr->cur_qp_state = qp_attr->qp_state; @@ -1932,10 +2058,83 @@ int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_ return err; } -void xsc_ib_drain_rq(struct ib_qp *qp __attribute__((unused))) +struct xsc_ib_drain_cqe { + struct ib_cqe cqe; + struct completion done; +}; + +static void xsc_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) { + struct xsc_ib_drain_cqe *cqe = container_of(wc->wr_cqe, + struct xsc_ib_drain_cqe, cqe); + + complete(&cqe->done); } -void xsc_ib_drain_sq(struct ib_qp *qp __attribute__((unused))) +void xsc_ib_drain_rq(struct ib_qp *qp) { + struct ib_cq *cq = qp->recv_cq; + struct xsc_ib_cq *xcq = to_xcq(cq); + struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; + struct xsc_ib_drain_cqe rdrain; + const struct ib_recv_wr rwr = { + .next = NULL, + {.wr_cqe = &rdrain.cqe,}, + }; + const struct ib_recv_wr *bad_rwr; + int ret; + struct xsc_ib_dev *dev = to_mdev(qp->device); + + ret = ib_modify_qp(qp, &attr, IB_QP_STATE); + if (ret) { + xsc_ib_err(dev, "failed to drain recv queue: %d\n", ret); + return; + } + + rdrain.cqe.done = xsc_ib_drain_qp_done; + init_completion(&rdrain.done); + + ret = xsc_ib_post_recv(qp, &rwr, &bad_rwr); + if (ret) { + xsc_ib_err(dev, "failed to drain recv queue: %d\n", ret); + return; + } + + xcq->xcq.comp(&xcq->xcq); + wait_for_completion(&rdrain.done); +} + +void xsc_ib_drain_sq(struct ib_qp *qp) +{ + struct ib_cq *cq = qp->send_cq; + struct xsc_ib_cq *xcq = to_xcq(cq); + struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; + struct xsc_ib_drain_cqe sdrain; + const struct ib_send_wr *bad_swr; + const struct ib_send_wr swr = { + .next = NULL, + { .wr_cqe = &sdrain.cqe, }, + .opcode = IB_WR_RDMA_WRITE, + .send_flags = IB_SEND_SIGNALED, + }; + int ret; + struct xsc_ib_dev *dev = to_mdev(qp->device); + + ret = ib_modify_qp(qp, &attr, IB_QP_STATE); + if (ret) { + xsc_ib_err(dev, "failed to drain recv queue: %d\n", ret); + return; + } + + sdrain.cqe.done = xsc_ib_drain_qp_done; + init_completion(&sdrain.done); + + ret = xsc_ib_post_send(qp, &swr, &bad_swr); + if (ret) { + xsc_ib_err(dev, "failed to drain send queue: %d\n", ret); + return; + } + + xcq->xcq.comp(&xcq->xcq); + wait_for_completion(&sdrain.done); } diff --git a/drivers/infiniband/hw/xsc/rtt.c b/drivers/infiniband/hw/xsc/rtt.c index b40c56587b0b0814491ee9a48093edc609dff207..8b8a72d9434e6320ac53e8951dd5108eebf77343 100644 --- a/drivers/infiniband/hw/xsc/rtt.c +++ b/drivers/infiniband/hw/xsc/rtt.c @@ -37,7 +37,10 @@ static ssize_t enable_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_EN); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt en, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -72,7 +75,10 @@ static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attribut err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to set rtt en, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -95,7 +101,10 @@ static ssize_t qpn_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes * in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_QPN); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), (void *)&out, sizeof(struct xsc_get_rtt_qpn_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt qpn, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -138,7 +147,10 @@ static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_qpn_mbox_in), (void *)&out, sizeof(struct xsc_rtt_qpn_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to set rtt qpn, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -160,7 +172,10 @@ static ssize_t period_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_PERIOD); err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt period, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -194,7 +209,10 @@ static ssize_t period_store(struct xsc_rtt_interface *g, struct xsc_rtt_attribut err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_period_mbox_in), (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to set rtt period, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -218,7 +236,10 @@ static ssize_t result_show(struct xsc_rtt_interface *g, struct xsc_rtt_attribute err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), (void *)&out, sizeof(struct xsc_rtt_result_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt result, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -253,7 +274,10 @@ static ssize_t stats_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), (void *)&out, sizeof(struct xsc_rtt_stats_mbox_out)); - if (err || out.hdr.status) { + if (!err && out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_err(g->xdev, "Operation not supported\n"); + return -EOPNOTSUPP; + } else if (err || out.hdr.status) { xsc_core_err(g->xdev, "Failed to get rtt stats, err(%u), status(%u)\n", err, out.hdr.status); return -EINVAL; @@ -389,7 +413,7 @@ void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev) err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); - if (err || out.hdr.status) + if (err || (out.hdr.status && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) xsc_core_err(xdev, "Failed to set rtt disable, err(%u), status(%u)\n", err, out.hdr.status); @@ -401,7 +425,7 @@ void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev) err = xsc_cmd_exec(xdev, (void *)&period_in, sizeof(struct xsc_rtt_period_mbox_in), (void *)&period_out, sizeof(struct xsc_rtt_period_mbox_out)); - if (err || period_out.hdr.status) + if (err || (period_out.hdr.status && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) xsc_core_err(xdev, "Failed to set rtt period default, err(%u), status(%u)\n", err, out.hdr.status); diff --git a/drivers/infiniband/hw/xsc/user.h b/drivers/infiniband/hw/xsc/user.h index 66307d95c1ad2444bf446d9c0f047047ce7fcd2d..4a18c69b818d27ae3048a91d2962b9e0ed47efed 100644 --- a/drivers/infiniband/hw/xsc/user.h +++ b/drivers/infiniband/hw/xsc/user.h @@ -11,6 +11,10 @@ #include /* For ETH_ALEN. */ #include +#ifndef UVERBS_ID_NS_SHIFT +#define UVERBS_ID_NS_SHIFT 12 +#endif + enum xsc_ib_devx_methods { XSC_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT), XSC_IB_METHOD_DEVX_QUERY_UAR, @@ -53,6 +57,7 @@ enum { XSC_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, XSC_QP_FLAG_RAWPACKET_TSO = 1 << 9, XSC_QP_FLAG_RAWPACKET_TX = 1 << 10, + XSC_QP_FLAG_RAWPACKET_SNIFFER = 1 << 11, }; struct xsc_ib_alloc_ucontext_req { @@ -73,7 +78,7 @@ struct xsc_ib_alloc_ucontext_resp { __u32 max_send_wqebb; __u32 max_recv_wr; __u16 num_ports; - __u16 reserved; + __u16 device_id; __u64 qpm_tx_db; __u64 qpm_rx_db; __u64 cqm_next_cid_reg; @@ -273,4 +278,5 @@ struct xsc_ib_query_device_resp { __u32 tunnel_offloads_caps; /* enum xsc_ib_tunnel_offloads */ __u32 reserved; }; + #endif /* XSC_IB_USER_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib.h b/drivers/infiniband/hw/xsc/xsc_ib.h index 076d5078d535978765e5a42d89c1e11c9b160fe0..eedfb57a4c5bcc8eaf605dd997e3d3ac5c028e60 100644 --- a/drivers/infiniband/hw/xsc/xsc_ib.h +++ b/drivers/infiniband/hw/xsc/xsc_ib.h @@ -22,6 +22,7 @@ #include #include #include +#include #include "xsc_ib_compat.h" @@ -65,6 +66,19 @@ struct xsc_ib_ucontext { #define field_avail(type, fld, sz) (offsetof(type, fld) + \ sizeof(((type *)0)->fld) <= (sz)) +#define XSC_PAGE_SHIFT_4K 12 +#define XSC_PAGE_SHIFT_64K 16 +#define XSC_PAGE_SHIFT_2M 21 +#define XSC_PAGE_SHIFT_1G 30 +#define XSC_PAGE_SZ_4K BIT(XSC_PAGE_SHIFT_4K) +#define XSC_PAGE_SZ_64K BIT(XSC_PAGE_SHIFT_64K) +#define XSC_PAGE_SZ_2M BIT(XSC_PAGE_SHIFT_2M) +#define XSC_PAGE_SZ_1G BIT(XSC_PAGE_SHIFT_1G) +#define XSC_MR_PAGE_CAP_MASK (XSC_PAGE_SZ_4K | \ + XSC_PAGE_SZ_64K | \ + XSC_PAGE_SZ_2M | \ + XSC_PAGE_SZ_1G) + static inline struct xsc_ib_ucontext *to_xucontext(struct ib_ucontext *ibucontext) { return container_of(ibucontext, struct xsc_ib_ucontext, ibucontext); @@ -82,20 +96,6 @@ struct xsc_ib_pd { #define XSC_IB_QPT_REG_UMR IB_QPT_RESERVED1 -enum { - XSC_PAGE_SHIFT_4K = 12, - XSC_PAGE_SHIFT_64K = 16, - XSC_PAGE_SHIFT_2M = 21, - XSC_PAGE_SHIFT_1G = 30, -}; - -enum { - XSC_PAGE_MODE_4K = 0, - XSC_PAGE_MODE_64K = 1, - XSC_PAGE_MODE_2M = 2, - XSC_PAGE_MODE_1G = 3, -}; - struct wr_list { u16 opcode; u16 next; @@ -127,6 +127,9 @@ struct xsc_ib_wq { dma_addr_t hdr_dma; int mad_queue_depth; int mad_index; + u32 *wr_opcode; + u32 *need_flush; + atomic_t flush_wqe_cnt; }; enum { @@ -213,6 +216,7 @@ struct xsc_ib_cq { struct xsc_ib_cq_resize *resize_buf; struct ib_umem *resize_umem; int cqe_size; + struct list_head err_state_qp_list; }; struct xsc_ib_xrcd { @@ -234,9 +238,9 @@ struct xsc_ib_mr { int npages; struct completion done; enum ib_wc_status status; - struct xsc_ib_peer_id *peer_id; - atomic_t invalidated; - struct completion invalidation_comp; + struct xsc_ib_peer_id *peer_id; + atomic_t invalidated; + struct completion invalidation_comp; }; struct xsc_ib_peer_id { @@ -303,7 +307,6 @@ struct xsc_ib_dev { struct uverbs_object_tree_def *driver_trees[6]; struct net_device *netdev; struct xsc_core_device *xdev; - XSC_DECLARE_DOORBELL_LOCK(uar_lock); struct list_head eqs_list; int num_ports; int num_comp_vectors; @@ -343,6 +346,12 @@ struct xsc_pa_chunk { size_t length; }; +struct xsc_err_state_qp_node { + struct list_head entry; + u32 qp_id; + bool is_sq; +}; + static inline struct xsc_ib_cq *to_xibcq(struct xsc_core_cq *xcq) { return container_of(xcq, struct xsc_ib_cq, xcq); @@ -623,4 +632,7 @@ static inline void *xsc_ib_recv_mad_sg_virt_addr(struct ib_device *ibdev, recv = container_of(mad_priv_hdr, struct ib_mad_private, header); return &recv->grh; } + +int xsc_get_rdma_ctrl_info(struct xsc_core_device *xdev, u16 opcode, void *out, int out_size); + #endif /* XSC_IB_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib_compat.h b/drivers/infiniband/hw/xsc/xsc_ib_compat.h index a7cfdf448b87f7cdf4f85fefb14cf8487ab74215..2263fdedd4cb1d8a506423360c17d9b2f7626d91 100644 --- a/drivers/infiniband/hw/xsc/xsc_ib_compat.h +++ b/drivers/infiniband/hw/xsc/xsc_ib_compat.h @@ -21,10 +21,11 @@ int xsc_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *ah_attr, int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags); #define xsc_ib_destroy_ah_def() int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags) -int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); int xsc_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); + +int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); // from main.c static functions @@ -39,8 +40,6 @@ int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); #define xsc_ib_destroy_cq_def() int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) #define xsc_ib_destroy_qp_def() int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) -#define xsc_ib_create_cq_def() int xsc_ib_create_cq(struct ib_cq *ibcq,\ - const struct ib_cq_init_attr *attr, struct ib_udata *udata) #define xsc_ib_dereg_mr_def() int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) #define xsc_ib_alloc_ucontext_def() int xsc_ib_alloc_ucontext(\ struct ib_ucontext *uctx, struct ib_udata *udata) @@ -52,4 +51,5 @@ int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); #define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) -#endif /* XSC_IB_COMPAT_H */ + +#endif diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c index dcf934b61e9bf48154f04a6958b5190519a0885d..4c6fb9b85df35926f682ec75c3abc132de876382 100644 --- a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c @@ -12,7 +12,9 @@ #include "common/xsc_ioctl.h" #include "common/xsc_hsi.h" #include "common/xsc_port_ctrl.h" +#include "common/tunnel_cmd.h" #include "xsc_ib.h" +#include "xsc_rdma_ctrl.h" #define XSC_RDMA_CTRL_NAME "rdma_ctrl" @@ -280,7 +282,7 @@ static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void * struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; resp->pcp = ib_dev->force_pcp; return 0; @@ -292,7 +294,7 @@ static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; resp->dscp = ib_dev->force_dscp; return 0; @@ -304,7 +306,7 @@ static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void * struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) return -EINVAL; @@ -319,7 +321,7 @@ static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) return -EINVAL; @@ -334,7 +336,7 @@ static int xsc_priv_dev_ioctl_get_cma_pcp(struct xsc_core_device *xdev, void *in struct xsc_ioctl_cma_pcp *resp = (struct xsc_ioctl_cma_pcp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; resp->pcp = ib_dev->cm_pcp; return 0; @@ -346,7 +348,7 @@ static int xsc_priv_dev_ioctl_get_cma_dscp(struct xsc_core_device *xdev, void *i struct xsc_ioctl_cma_dscp *resp = (struct xsc_ioctl_cma_dscp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; resp->dscp = ib_dev->cm_dscp; return 0; @@ -358,7 +360,7 @@ static int xsc_priv_dev_ioctl_set_cma_pcp(struct xsc_core_device *xdev, void *in struct xsc_ioctl_cma_pcp *req = (struct xsc_ioctl_cma_pcp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) return -EINVAL; @@ -373,7 +375,7 @@ static int xsc_priv_dev_ioctl_set_cma_dscp(struct xsc_core_device *xdev, void *i struct xsc_ioctl_cma_dscp *req = (struct xsc_ioctl_cma_dscp *)out; if (!xsc_core_is_pf(xdev)) - return -EOPNOTSUPP; + return XSC_CMD_STATUS_NOT_SUPPORTED; if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) return -EINVAL; @@ -391,6 +393,10 @@ static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, struct xsc_cc_mbox_out *out; u16 user_size; int err; + struct xsc_ioctl_tunnel_hdr tunnel_hdr = {0}; + + if (hdr->attr.tunnel_cmd) + hdr->attr.length -= sizeof(tunnel_hdr); user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; if (hdr->attr.length != user_size) @@ -403,19 +409,33 @@ static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, if (!out) goto err_out; - err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); - if (err) - goto err; + if (hdr->attr.tunnel_cmd) { + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + goto err; + err = copy_from_user(&in->data, user_hdr->attr.data + sizeof(tunnel_hdr), + expect_req_size); + if (err) + goto err; + } else { + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + } in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); in->hdr.ver = cpu_to_be16(hdr->attr.ver); if (encode) encode((void *)in->data, xdev->mac_port); - err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, - sizeof(*out) + expect_resp_size); + if (hdr->attr.tunnel_cmd) + err = xsc_tunnel_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size, &tunnel_hdr); + else + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); - hdr->attr.error = __be32_to_cpu(out->hdr.status); + hdr->attr.error = out->hdr.status; if (decode) decode((void *)out->data); @@ -436,8 +456,54 @@ static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, return -EFAULT; } -int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, - int out_size) +static int _rdma_ctrl_exec_tunnel_ioctl(struct xsc_core_device *xdev, + void *in, int in_size, + void *out, int out_size, + struct xsc_ioctl_tunnel_hdr *tunnel_hdr) +{ + struct xsc_cmd_get_ioctl_info_mbox_in *_in; + struct xsc_cmd_get_ioctl_info_mbox_out *_out; + int inlen; + int outlen; + int err; + struct xsc_ioctl_attr *hdr = (struct xsc_ioctl_attr *)in; + + inlen = sizeof(*_in) + out_size; + _in = kvzalloc(inlen, GFP_KERNEL); + if (!_in) { + err = -ENOMEM; + goto err_in; + } + + outlen = sizeof(*_out) + out_size; + _out = kvzalloc(outlen, GFP_KERNEL); + if (!_out) { + err = -ENOMEM; + goto err_out; + } + + memset(_in, 0, sizeof(*_in)); + _in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_GET_IOCTL_INFO); + _in->ioctl_opcode = cpu_to_be16(hdr->opcode); + _in->length = cpu_to_be16(out_size); + memcpy(_in->data, out, out_size); + err = xsc_tunnel_cmd_exec(xdev, _in, inlen, _out, outlen, tunnel_hdr); + if (err) + goto out; + memcpy(out, _out->data, out_size); + + return 0; +out: + kvfree(_out); +err_out: + kvfree(_in); +err_in: + return err; +} + +static int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, + void *in, int in_size, + void *out, int out_size) { int opcode, ret = 0; struct xsc_ioctl_attr *hdr; @@ -478,6 +544,14 @@ int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, v return ret; } +int xsc_get_rdma_ctrl_info(struct xsc_core_device *xdev, u16 opcode, void *out, int out_size) +{ + struct xsc_ioctl_attr attr; + + attr.opcode = opcode; + return _rdma_ctrl_exec_ioctl(xdev, &attr, sizeof(attr), out, out_size); +} + static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, struct xsc_ioctl_hdr __user *user_hdr) { @@ -485,6 +559,7 @@ static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, struct xsc_ioctl_hdr *in; int in_size; int err; + struct xsc_ioctl_tunnel_hdr tunnel_hdr; err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); if (err) @@ -504,24 +579,181 @@ static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, default: return -EINVAL; } + if (hdr.attr.tunnel_cmd) + hdr.attr.length -= sizeof(tunnel_hdr); in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; in = kvzalloc(in_size, GFP_KERNEL); if (!in) return -EFAULT; in->attr.opcode = hdr.attr.opcode; in->attr.length = hdr.attr.length; - err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); - if (err) { - kvfree(in); - return -EFAULT; - } - err = _rdma_ctrl_exec_ioctl(xdev, &in->attr, (in_size - sizeof(u32)), in->attr.data, - hdr.attr.length); + if (hdr.attr.tunnel_cmd) { + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) { + err = -EFAULT; + goto out; + } + err = copy_from_user(in->attr.data, user_hdr->attr.data + sizeof(tunnel_hdr), + hdr.attr.length); + if (err) { + err = -EFAULT; + goto out; + } + err = _rdma_ctrl_exec_tunnel_ioctl(xdev, &in->attr, (in_size - sizeof(u32)), + in->attr.data, hdr.attr.length, &tunnel_hdr); + } else { + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto out; + } + err = _rdma_ctrl_exec_ioctl(xdev, &in->attr, (in_size - sizeof(u32)), in->attr.data, + hdr.attr.length); + } in->attr.error = err; if (copy_to_user(user_hdr, in, in_size)) err = -EFAULT; +out: + kvfree(in); + return err; +} + +static long _rdma_ctrl_ioctl_get_rdma_counters(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_tunnel_hdr tunnel_hdr; + int err; + struct xsc_hw_stats_mbox_in in; + struct xsc_hw_stats_rdma_mbox_out out; + + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + return err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_STATS_RDMA); + err = xsc_tunnel_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out), &tunnel_hdr); + if (err) + return err; + + if (out.hdr.status) + return -EINVAL; + + err = copy_to_user(user_hdr->attr.data, &out.hw_stats, sizeof(out.hw_stats)); + if (err) + return err; + return 0; +} + +static long _rdma_ctrl_ioctl_get_prio_counters(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_tunnel_hdr tunnel_hdr; + int err; + struct xsc_prio_stats_mbox_in in; + struct xsc_prio_stats_mbox_out out; + + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + return err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_PRIO_STATS); + err = xsc_tunnel_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out), &tunnel_hdr); + if (err) + return err; + + if (out.hdr.status) + return -EINVAL; + + err = copy_to_user(user_hdr->attr.data, &out.prio_stats, sizeof(out.prio_stats)); + if (err) + return err; + return 0; +} + +static long _rdma_ctrl_ioctl_get_pfc_counters(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_tunnel_hdr tunnel_hdr; + int err; + struct xsc_pfc_prio_stats_mbox_in in; + struct xsc_pfc_prio_stats_mbox_out out; + + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + return err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_PFC_PRIO_STATS); + err = xsc_tunnel_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out), &tunnel_hdr); + if (err) + return err; + + if (out.hdr.status) + return -EINVAL; + + err = copy_to_user(user_hdr->attr.data, &out.prio_stats, sizeof(out.prio_stats)); + if (err) + return err; + return 0; +} + +static long _rdma_ctrl_ioctl_get_hw_counters(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_tunnel_hdr tunnel_hdr; + int err; + struct xsc_cmd_ioctl_get_hw_counters_mbox_in *in; + struct xsc_cmd_ioctl_get_hw_counters_mbox_out *out; + int inlen; + int outlen; + + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + return err; + + hdr->attr.length -= sizeof(tunnel_hdr); + inlen = sizeof(*in) + hdr->attr.length; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + outlen = sizeof(*out) + hdr->attr.length; + out = kvzalloc(outlen, GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto out; + } + memset(in, 0, inlen); + memset(out, 0, outlen); + err = copy_from_user(in->data, user_hdr->attr.data + sizeof(tunnel_hdr), hdr->attr.length); + if (err) + goto out; + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_IOCTL_GET_HW_COUNTERS); + in->length = cpu_to_be32(hdr->attr.length); + err = xsc_tunnel_cmd_exec(xdev, in, inlen, out, outlen, &tunnel_hdr); + if (err) + goto out; + + if (out->hdr.status) { + err = -EINVAL; + goto out; + } + + err = copy_to_user(user_hdr->attr.data, out->data, hdr->attr.length); + if (err) + goto out; +out: kvfree(in); + kvfree(out); return err; } @@ -634,6 +866,14 @@ static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_stat), sizeof(struct xsc_cc_cmd_stat), encode_cc_get_stat, decode_cc_get_stat); + case XSC_CMD_OP_QUERY_HW_STATS_RDMA: + return _rdma_ctrl_ioctl_get_rdma_counters(xdev, user_hdr, &hdr); + case XSC_CMD_OP_QUERY_PRIO_STATS: + return _rdma_ctrl_ioctl_get_prio_counters(xdev, user_hdr, &hdr); + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + return _rdma_ctrl_ioctl_get_pfc_counters(xdev, user_hdr, &hdr); + case XSC_CMD_OP_IOCTL_GET_HW_COUNTERS: + return _rdma_ctrl_ioctl_get_hw_counters(xdev, user_hdr, &hdr); default: return -EINVAL; } diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h index 79c5c0a9d255fc45eecb771a71380b4d7ad38899..5049377101f9a34c3132ad4b913cbd0b3fa8ec75 100644 --- a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h @@ -9,4 +9,5 @@ void xsc_rdma_ctrl_fini(void); int xsc_rdma_ctrl_init(void); -#endif /* XSC_RDMA_CTRL_H */ + +#endif diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_prgrmmbl_cc_ctrl.c b/drivers/infiniband/hw/xsc/xsc_rdma_prgrmmbl_cc_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..7bf1dd6df4b307cbe92c19b710a447919134a5cb --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_prgrmmbl_cc_ctrl.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_prgrmmbl_cc_ctrl.h" +#include "xsc_ib.h" +#include "xsc_rdma_prgrmmbl_cc_ctrl.h" + +#define FLEXCC_IOCTL_MAGIC (0x1c) +#define FLEXCC_IOCTL_BASE (0x1) +#define FLEXCC_IOCTL_CMD _IOWR(FLEXCC_IOCTL_MAGIC, FLEXCC_IOCTL_BASE,\ + struct flexcc_ioctl_buf) + +#define XSC_RDMA_CTRL_NAME "r_prgrm_cc_ctl" + +static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, + struct flexcc_ioctl_buf __user *user_buf) +{ + struct flexcc_mbox_in *in; + struct flexcc_mbox_out *out; + int in_len = sizeof(struct flexcc_mbox_in) + sizeof(struct flexcc_ioctl_buf); + int out_len = sizeof(struct flexcc_mbox_out) + sizeof(struct flexcc_ioctl_buf); + int err; + + in = kvzalloc(in_len, GFP_KERNEL); + if (!in) + return -ENOMEM; + out = kvzalloc(out_len, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_IOCTL_PRGRMMBL_CC); + in->hdr.ver = cpu_to_be16(0); + + err = copy_from_user(&in->data, user_buf, sizeof(struct flexcc_ioctl_buf)); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, (void *)in, in_len, (void *)out, out_len); + + if (copy_to_user(user_buf, out->data, sizeof(struct flexcc_ioctl_buf))) + err = -EFAULT; + + if (out->hdr.status) + err = -EFAULT; + +err_exit: + kvfree(in); + kvfree(out); + return err; +} + +static int _rdma_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + unsigned long args, void *data) +{ + struct xsc_core_device *xdev = file->xdev; + struct flexcc_ioctl_buf __user *user_buf = (struct flexcc_ioctl_buf __user *)args; + int err; + + switch (cmd) { + case FLEXCC_IOCTL_CMD: + err = _rdma_ctrl_ioctl_cmdq(xdev, user_buf); + break; + default: + err = -EFAULT; + break; + } + + return err; +} + +static void _rdma_prgrmmbl_cc_ctrl_reg_fini(void) +{ + xsc_prgrmmbl_cc_ctrl_cb_dereg(XSC_RDMA_CTRL_NAME); +} + +static int _rdma_prgrmmbl_cc_ctrl_reg_init(void) +{ + int ret; + + ret = xsc_prgrmmbl_cc_ctrl_cb_reg(XSC_RDMA_CTRL_NAME, _rdma_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_RDMA_CTRL_NAME); + + return ret; +} + +void xsc_rdma_prgrmmbl_cc_ctrl_fini(void) +{ + _rdma_prgrmmbl_cc_ctrl_reg_fini(); +} + +int xsc_rdma_prgrmmbl_cc_ctrl_init(void) +{ + return _rdma_prgrmmbl_cc_ctrl_reg_init(); +} diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_prgrmmbl_cc_ctrl.h b/drivers/infiniband/hw/xsc/xsc_rdma_prgrmmbl_cc_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..83c02ed5f0be66e44d960bbf587f7025d0da2a7f --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_prgrmmbl_cc_ctrl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_RDMA_PRGRMMBL_CC_CTRL_H +#define XSC_RDMA_PRGRMMBL_CC_CTRL_H + +void xsc_rdma_prgrmmbl_cc_ctrl_fini(void); +int xsc_rdma_prgrmmbl_cc_ctrl_init(void); + +#endif diff --git a/drivers/infiniband/hw/xsc/xsc_smc.h b/drivers/infiniband/hw/xsc/xsc_smc.h new file mode 100644 index 0000000000000000000000000000000000000000..e507954eb1af48c7484602bbd4394e041a65a0d7 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_smc.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_SMC_H__ +#define __XSC_SMC_H__ + +#include + +enum smc_wr_reg_state { + POSTED, /* ib_wr_reg_mr request posted */ + CONFIRMED, /* ib_wr_reg_mr response: successful */ + FAILED /* ib_wr_reg_mr response: failure */ +}; + +enum smc_link_state { /* possible states of a link */ + SMC_LNK_UNUSED, /* link is unused */ + SMC_LNK_INACTIVE, /* link is inactive */ + SMC_LNK_ACTIVATING, /* link is being activated */ + SMC_LNK_ACTIVE, /* link is active */ +}; + +#define SMC_GID_SIZE sizeof(union ib_gid) +#define SMC_LGR_ID_SIZE 4 +#define SMC_WR_BUF_CNT 64 /* # of ctrl buffers per link, SMC_WR_BUF_CNT + * should not be less than 2 * SMC_RMBS_PER_LGR_MAX, + * since every connection at least has two rq/sq + * credits in average, otherwise may result in + * waiting for credits in sending process. + */ +#define SMC_WR_BUF_SIZE 48 /* size of work request buffer */ +#define SMC_WR_BUF_V2_SIZE 8192 /* size of v2 work request buffer */ + +struct smc_ib_cq { /* ib_cq wrapper for smc */ + struct smc_ib_device *smcibdev; /* parent ib device */ + struct ib_cq *ib_cq; /* real ib_cq for link */ + struct tasklet_struct tasklet; /* tasklet for wr */ + int load; /* load of current cq */ +}; + +struct smc_wr_buf { + u8 raw[SMC_WR_BUF_SIZE]; +}; + +struct smc_wr_v2_buf { + u8 raw[SMC_WR_BUF_V2_SIZE]; +}; + +struct smc_link { + struct iw_ext_conn_param iw_conn_param; + struct smc_ib_device *smcibdev; /* ib-device */ + u8 ibport; /* port - values 1 | 2 */ + struct ib_pd *roce_pd; /* IB protection domain, + * unique for every RoCE QP + */ + struct smc_ib_cq *smcibcq; /* cq for recv & send */ + struct ib_qp *roce_qp; /* IB queue pair */ + struct ib_qp_attr qp_attr; /* IB queue pair attributes */ + + struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ + struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ + struct ib_sge *wr_tx_sges; /* WR send gather meta data */ + struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ + struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ + struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ + struct completion *wr_tx_compl; /* WR send CQE completion */ + /* above four vectors have wr_tx_cnt elements and use the same index */ + struct ib_send_wr *wr_tx_v2_ib; /* WR send v2 meta data */ + struct ib_sge *wr_tx_v2_sge; /* WR send v2 gather meta data*/ + struct smc_wr_tx_pend *wr_tx_v2_pend; /* WR send v2 waiting for CQE */ + dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ + dma_addr_t wr_tx_v2_dma_addr; /* DMA address of v2 tx buf*/ + atomic_long_t wr_tx_id; /* seq # of last sent WR */ + unsigned long *wr_tx_mask; /* bit mask of used indexes */ + u32 wr_tx_cnt; /* number of WR send buffers */ + wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */ + struct { + struct percpu_ref wr_tx_refs; + } ____cacheline_aligned_in_smp; + struct completion tx_ref_comp; + atomic_t tx_inflight_credit; + + struct smc_wr_buf *wr_rx_bufs[SMC_WR_BUF_CNT]; + /* WR recv payload buffers */ + struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ + struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */ + /* above three vectors have wr_rx_cnt elements and use the same index */ + dma_addr_t wr_rx_dma_addr[SMC_WR_BUF_CNT]; + /* DMA address of wr_rx_bufs */ + u64 wr_rx_id; /* seq # of last recv WR */ + u32 wr_rx_cnt; /* number of WR recv buffers */ + unsigned long wr_rx_tstamp; /* jiffies when last buf rx */ + + struct ib_reg_wr wr_reg; /* WR register memory region */ + wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ + struct { + struct percpu_ref wr_reg_refs; + } ____cacheline_aligned_in_smp; + struct completion reg_ref_comp; + enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ + + atomic_t peer_rq_credits; /* credits for peer rq flowctrl */ + atomic_t local_rq_credits; /* credits for local rq flowctrl */ + u8 credits_enable; /* credits enable flag, set when negotiation */ + u8 local_cr_watermark_high; /* local rq credits watermark */ + u8 peer_cr_watermark_low; /* peer rq credits watermark */ + u8 credits_update_limit; /* credits update limit for cdc msg */ + struct work_struct credits_announce_work; /* work for credits announcement */ + unsigned long flags; /* link flags, SMC_LINKFLAG_ANNOUNCE_PENDING .etc */ + + u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/ + u8 eiwarp_gid[SMC_GID_SIZE]; + /* gid of eRDMA iWARP device */ + u8 sgid_index; /* gid index for vlan id */ + u32 peer_qpn; /* QP number of peer */ + enum ib_mtu path_mtu; /* used mtu */ + enum ib_mtu peer_mtu; /* mtu size of peer */ + u32 psn_initial; /* QP tx initial packet seqno */ + u32 peer_psn; /* QP rx initial packet seqno */ + u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */ + u8 peer_gid[SMC_GID_SIZE]; /* gid of peer*/ + u8 link_id; /* unique # within link group */ + u8 link_uid[SMC_LGR_ID_SIZE]; /* unique lnk id */ + u8 peer_link_uid[SMC_LGR_ID_SIZE]; /* peer uid */ + u8 link_idx; /* index in lgr link array */ + u8 link_is_asym; /* is link asymmetric? */ + u8 clearing : 1; /* link is being cleared */ + refcount_t refcnt; /* link reference count */ + struct smc_link_group *lgr; /* parent link group */ + struct work_struct link_down_wrk; /* wrk to bring link down */ + char ibname[IB_DEVICE_NAME_MAX]; /* ib device name */ + int ndev_ifidx; /* network device ifindex */ + + enum smc_link_state state; /* state of link */ + struct delayed_work llc_testlink_wrk; /* testlink worker */ + struct completion llc_testlink_resp; /* wait for rx of testlink */ + int llc_testlink_time; /* testlink interval */ + atomic_t conn_cnt; /* connections on this link */ +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/cq.h b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h index 76f0c506444649a12602889936f3c1360ed65c61..19def0548e649e83679c56da36e6ea66cc8709c8 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/cq.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h @@ -14,8 +14,6 @@ struct xsc_core_cq { u32 cqn; int cqe_sz; - u64 arm_db; - u64 ci_db; struct xsc_core_device *dev; atomic_t refcount; struct completion free; @@ -48,37 +46,10 @@ enum { XSC_CQ_DB_REQ_NOT = 0, }; -static inline void xsc_cq_arm(struct xsc_core_cq *cq, u8 solicited) -{ - union xsc_cq_doorbell db; - - db.val = 0; - db.cq_next_cid = cq->cons_index; - db.cq_id = cq->cqn; - db.arm = solicited; - - /* Make sure that the doorbell record in host memory is - * written before ringing the doorbell via PCI MMIO. - */ - wmb(); - writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); -} - -static inline void xsc_cq_set_ci(struct xsc_core_cq *cq) -{ - struct xsc_core_device *xdev = cq->dev; - union xsc_cq_doorbell db; - - db.cq_next_cid = cq->cons_index; - db.cq_id = cq->cqn; - /* ensure write val visable before doorbell */ - wmb(); - - writel(db.val, REG_ADDR(xdev, cq->ci_db)); -} - int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, - struct xsc_create_cq_mbox_in *in, int inlen); + struct xsc_create_cq_ex_mbox_in *in, int inlen); +int xsc_create_cq_compat_handler(struct xsc_core_device *dev, struct xsc_create_cq_ex_mbox_in *in, + struct xsc_create_cq_mbox_out *out); int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq); int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, struct xsc_query_cq_mbox_out *out); diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/driver.h b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h index 03705978a85a68c84a6fa19a7d5ed466bae31efb..e0bdeb43b511502b33d7e39681e3f4c29b3d439b 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/driver.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h @@ -14,7 +14,6 @@ #include #include #include "common/device.h" -#include "common/doorbell.h" #include "common/xsc_core.h" #include "common/xsc_cmd.h" #include "common/xsc_hsi.h" @@ -118,7 +117,6 @@ struct xsc_cq_table { struct xsc_eq { struct xsc_core_device *dev; struct xsc_cq_table cq_table; - u32 doorbell;//offset from bar0/2 space start u32 cons_index; struct xsc_buf buf; int size; @@ -253,7 +251,7 @@ static inline void xsc_vfree(const void *addr) kfree(addr); } -int xsc_dev_init(struct xsc_core_device *xdev); +int xsc_dev_init(struct xsc_core_device *dev); void xsc_dev_cleanup(struct xsc_core_device *xdev); int xsc_cmd_init(struct xsc_core_device *xdev); void xsc_cmd_cleanup(struct xsc_core_device *xdev); @@ -317,21 +315,12 @@ int xsc_qptrace_debugfs_init(struct xsc_core_device *dev); void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev); int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node); +int xsc_db_alloc(struct xsc_core_device *xdev, struct xsc_db *db); int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, struct xsc_frag_buf *buf, int node); void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db); void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf); -static inline u32 xsc_mkey_to_idx(u32 mkey) -{ - return mkey >> ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); -} - -static inline u32 xsc_idx_to_mkey(u32 mkey_idx) -{ - return mkey_idx << ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); -} - enum { XSC_PROF_MASK_QP_SIZE = (u64)1 << 0, XSC_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/port.h b/drivers/net/ethernet/yunsilicon/xsc/common/port.h index 1d13b390c768b022acd44f7ae035236eff461d54..a44af6c88c0678334898a963ed2bb1ca982edc0f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/port.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/port.h @@ -37,4 +37,4 @@ int xsc_query_module_eeprom(struct xsc_core_device *dev, int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, struct xsc_module_eeprom_query_params *params, u8 *data); -#endif /* __XSC_PORT_H__ */ +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qp.h b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h index fd3d6ee4a8dfe27ba6ae2999532acd9e8884529f..e5a72151452dfe390ca52c69c2c4965df7e30801 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/qp.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h @@ -7,6 +7,7 @@ #define XSC_QP_H #include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" #include "common/device.h" #include "common/driver.h" @@ -117,6 +118,7 @@ struct xsc_core_qp { u16 qp_type_internal; u16 grp_id; u8 mac_id; + u8 err_occurred; }; struct xsc_qp_rsc { @@ -165,7 +167,11 @@ int create_resource_common(struct xsc_core_device *xdev, struct xsc_core_qp *qp); void destroy_resource_common(struct xsc_core_device *xdev, struct xsc_core_qp *qp); - +int xsc_alloc_qpn(struct xsc_core_device *xdev, u16 *qpn_base, u16 qp_cnt, u8 qp_type); +int xsc_dealloc_qpn(struct xsc_core_device *xdev, u16 qpn_base, u16 qp_cnt, u8 qp_type); +int xsc_unset_qp_info(struct xsc_core_device *xdev, u16 qpn); +int xsc_set_qp_info(struct xsc_core_device *xdev, struct xsc_create_qp_request *qp_info, + size_t pas_buf_size); int xsc_core_create_qp(struct xsc_core_device *xdev, struct xsc_core_qp *qp, struct xsc_create_qp_mbox_in *in, diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h index a54f712d1ae23aa60fc8329b21f99d8841125f96..57eb829f811b1dd8b4e41f8daf2545caf3565780 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h @@ -68,4 +68,5 @@ enum { int qpts_init(void); void qpts_fini(void); int qpts_write_one_msg(struct xsc_qpt_update_msg *msg); -#endif /* __QPTS_H__ */ + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h index 8f656e93273c7b12fc243e202a13642341f7d28f..e9bfad7fcb6eea42af6c24bdd16651e75ac45cab 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h @@ -48,6 +48,10 @@ struct xsc_wct_obj { unsigned int wct_idx; }; +struct xsc_user_mode_obj { + struct xsc_res_obj obj; +}; + struct xsc_em_obj { struct xsc_res_obj obj; unsigned int em_idx[54]; @@ -87,6 +91,7 @@ enum RES_OBJ_TYPE { RES_OBJ_PCT, RES_OBJ_WCT, RES_OBJ_EM, + RES_OBJ_USER_MODE, RES_OBJ_MAX }; @@ -118,4 +123,9 @@ void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority); void xsc_close_bdf_file(struct xsc_bdf_file *file); void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn); -#endif /* RES_OBJ_H */ +int xsc_alloc_user_mode_obj(struct xsc_bdf_file *file, void (*release_func)(void *), + unsigned int mode, char *data, unsigned int len); +void xsc_free_user_mode_obj(struct xsc_bdf_file *file, unsigned int mode); +void xsc_release_user_mode(struct xsc_bdf_file *file, unsigned int mode); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/tunnel_cmd.h b/drivers/net/ethernet/yunsilicon/xsc/common/tunnel_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..1f184c117958833c75535d182ec9ebc3df69090e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/tunnel_cmd.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef TUNNEL_CMD_H +#define TUNNEL_CMD_H + +#include "common/xsc_core.h" + +void xsc_tunnel_cmd_init(struct xsc_core_device *xdev); +void xsc_tunnel_cmd_recv_resp(struct xsc_core_device *xdev); +int xsc_tunnel_cmd_exec(struct xsc_core_device *xdev, void *in, int inlen, void *out, int outlen, + struct xsc_ioctl_tunnel_hdr *hdr); +int xsc_tunnel_cmd_recv_req(struct xsc_core_device *xdev); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/version.h b/drivers/net/ethernet/yunsilicon/xsc/common/version.h index 19ff481c0cfedf052ea3e93f12efc5297b46b285..3f5098e73f04dec38ec60def693d7e7d2282f820 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/version.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/version.h @@ -4,7 +4,7 @@ */ #define BRANCH_VERSION 1 -#define MAJOR_VERSION 2 +#define MAJOR_VERSION 4 #define MINOR_VERSION 0 -#define BUILD_VERSION 367 -#define HOTFIX_NUM 460 +#define BUILD_VERSION 321 +#define HOTFIX_NUM 159 diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/vport.h b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h index dad39f12e26590d87560b770da1335db472df7e2..f7da9392855189dd8e9ac9591233da2c8e70c5ed 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/vport.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h @@ -21,12 +21,6 @@ #define XSC_VPORT_MANAGER(dev) (xsc_core_is_vport_manager(dev)) -enum { - XSC_CAP_INLINE_MODE_L2, - XSC_CAP_INLINE_MODE_VPORT_CONTEXT, - XSC_CAP_INLINE_MODE_NOT_REQUIRED, -}; - /* Vport number for each function must keep unchanged */ enum { XSC_VPORT_PF = 0x0, @@ -41,6 +35,11 @@ enum { XSC_VPORT_ADMIN_STATE_AUTO = 0x2, }; +enum { + XSC_LEAVE, + XSC_JOIN, +}; + u8 xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, u16 vport); int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u16 opmod, u16 vport, u8 other_vport, u8 state); @@ -90,6 +89,10 @@ int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, enum xsc_list_type list_type, u8 addr_list[][ETH_ALEN], int list_size); +int xsc_nic_vport_add_uc_mac(struct xsc_core_device *xdev, + u8 *mac_addr, u16 *pct_prio); +int xsc_nic_vport_del_uc_mac(struct xsc_core_device *xdev, u16 pct_prio); +int xsc_nic_vport_modify_mc_mac(struct xsc_core_device *xdev, u8 *mac, u8 action); int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, u16 vport, int *promisc, diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h index 99f6a769791ccf7c05d6395f34f7985fcbc68395..6f905fe0d6d3623d7b78dae517d5725b640c0da9 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h @@ -8,7 +8,7 @@ #define CMDQ_VERSION 0x32 -#define MAX_MBOX_OUT_LEN 2048 +#define ETH_ALEN 6 #define QOS_PRIO_MAX 7 #define QOS_DSCP_MAX 63 @@ -20,6 +20,7 @@ #define XSC_BOARD_SN_LEN 32 #define MAX_PKT_LEN 9800 #define XSC_RTT_CFG_QPN_MAX 32 +#define XSC_QP_MEASURE_QP_NUM_MAX 128 #define XSC_PCIE_LAT_CFG_INTERVAL_MAX 8 #define XSC_PCIE_LAT_CFG_HISTOGRAM_MAX 9 @@ -29,23 +30,53 @@ #define XSC_PCIE_LAT_PERIOD_MAX 20 #define DPU_PORT_WGHT_CFG_MAX 1 -enum { - XSC_CMD_STAT_OK = 0x0, - XSC_CMD_STAT_INT_ERR = 0x1, - XSC_CMD_STAT_BAD_OP_ERR = 0x2, - XSC_CMD_STAT_BAD_PARAM_ERR = 0x3, - XSC_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, - XSC_CMD_STAT_BAD_RES_ERR = 0x5, - XSC_CMD_STAT_RES_BUSY = 0x6, - XSC_CMD_STAT_LIM_ERR = 0x8, - XSC_CMD_STAT_BAD_RES_STATE_ERR = 0x9, - XSC_CMD_STAT_IX_ERR = 0xa, - XSC_CMD_STAT_NO_RES_ERR = 0xf, - XSC_CMD_STAT_BAD_INP_LEN_ERR = 0x50, - XSC_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, - XSC_CMD_STAT_BAD_QP_STATE_ERR = 0x10, - XSC_CMD_STAT_BAD_PKT_ERR = 0x30, - XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +#define XSC_MAX_NUM_PCIE_INTF 2 +#define XSC_MAX_PF_NUM_PER_PCIE 8 + +/* xsc_cmd_status_code is used to indicate the result of a xsc cmd executing. + * How to use it please refer to the design doc: + * https://eb72aga9oq.feishu.cn/docx/UF0GdlGBRoEtvvx1FrAcrnmLnug + */ +enum xsc_cmd_status_code { + /* common status code, range: 0x0 ~ 0x1f */ + XSC_CMD_STATUS_OK = 0x0, + XSC_CMD_STATUS_FAIL = 0x1, + XSC_CMD_STATUS_NOT_SUPPORTED = 0x2, + XSC_CMD_STATUS_BAD_PARAM = 0x3, + XSC_CMD_STATUS_INVAL_RES = 0x5, + XSC_CMD_STATUS_BUSY = 0x6, + XSC_CMD_STATUS_PENDING = 0x7, + XSC_CMD_STATUS_INVAL_DATA = 0x8, + XSC_CMD_STATUS_NOT_FOUND = 0xa, + XSC_CMD_STATUS_NO_RES = 0xf, + + /* extended status code, range: 0x20 ~ 0x4f */ + XSC_CMD_STATUS_INVAL_FUNC = 0x41, + XSC_CMD_STATUS_NO_MPT_RES = 0x42, + XSC_CMD_STATUS_NO_MTT_RES = 0x43, + XSC_CMD_STATUS_NO_EQN_RES = 0x44, + XSC_CMD_STATUS_NO_EQ_PA_RES = 0x45, + XSC_CMD_STATUS_NO_CQN_RES = 0x46, + XSC_CMD_STATUS_NO_CQ_PA_RES = 0x47, + XSC_CMD_STATUS_NO_QPN_RES = 0x48, + XSC_CMD_STATUS_NO_QP_PA_RES = 0x49, + XSC_CMD_STATUS_NO_PDN_RES = 0x4a, + XSC_CMD_STATUS_QP_FLUSH_BUSY = 0x4b, + XSC_CMD_STATUS_QP_FLUSH_PENDING = 0x4c, + + /* Cmdq prototol status code, range: 0x50 ~ 0x5f */ + XSC_CMD_STATUS_BAD_INBUF = 0x50, + XSC_CMD_STATUS_BAD_OUTBUF = 0x51, + XSC_CMD_STATUS_INVAL_OPCODE = 0x52, + + XSC_CMD_STATUS_CODE_MAX = 0xff, +}; + +#define XSC_CMD_STATUS_CODE_COUNT (XSC_CMD_STATUS_CODE_MAX + 1) + +struct xsc_cmd_status_code_map { + int errno; + const char *str; }; enum { @@ -86,6 +117,13 @@ enum { XSC_CMD_OP_ENABLE_RELAXED_ORDER = 0x112, XSC_CMD_OP_QUERY_GUID = 0x113, XSC_CMD_OP_ACTIVATE_HW_CONFIG = 0x114, + XSC_CMD_OP_QUERY_READ_FLUSH = 0x115, + XSC_CMD_OP_SEND_TUNNEL_CMD_REQ = 0x116, + XSC_CMD_OP_RECV_TUNNEL_CMD_REQ = 0x117, + XSC_CMD_OP_SEND_TUNNEL_CMD_RESP = 0x118, + XSC_CMD_OP_RECV_TUNNEL_CMD_RESP = 0x119, + XSC_CMD_OP_GET_IOCTL_INFO = 0x11a, + XSC_CMD_OP_ANNOUNCE_DRIVER_INSTANCE = 0x11b, XSC_CMD_OP_CREATE_MKEY = 0x200, XSC_CMD_OP_QUERY_MKEY = 0x201, @@ -95,6 +133,8 @@ enum { XSC_CMD_OP_DEREG_MR = 0x205, XSC_CMD_OP_SET_MPT = 0x206, XSC_CMD_OP_SET_MTT = 0x207, + XSC_CMD_OP_SYNC_MR_TO_FW = 0x208, + XSC_CMD_OP_SYNC_MR_FROM_FW = 0x209, XSC_CMD_OP_CREATE_EQ = 0x301, XSC_CMD_OP_DESTROY_EQ = 0x302, @@ -106,6 +146,9 @@ enum { XSC_CMD_OP_MODIFY_CQ = 0x403, XSC_CMD_OP_ALLOC_MULTI_VIRTQ_CQ = 0x404, XSC_CMD_OP_RELEASE_MULTI_VIRTQ_CQ = 0x405, + XSC_CMD_OP_SET_CQ_CONTEXT = 0x406, + XSC_CMD_OP_SET_CQ_BUF_PA = 0x407, + XSC_CMD_OP_CREATE_CQ_EX = 0x408, XSC_CMD_OP_CREATE_QP = 0x500, XSC_CMD_OP_DESTROY_QP = 0x501, @@ -132,6 +175,10 @@ enum { XSC_CMD_OP_ALLOC_MULTI_VIRTQ = 0x516, XSC_CMD_OP_RELEASE_MULTI_VIRTQ = 0x517, XSC_CMD_OP_QUERY_QP_FLUSH_STATUS = 0x518, + XSC_CMD_OP_ALLOC_QPN = 0x519, + XSC_CMD_OP_DEALLOC_QPN = 0x520, + XSC_CMD_OP_SET_QP_INFO = 0x521, + XSC_CMD_QP_UNSET_QP_INFO = 0x522, XSC_CMD_OP_CREATE_PSV = 0x600, XSC_CMD_OP_DESTROY_PSV = 0x601, @@ -163,6 +210,8 @@ enum { XSC_CMD_OP_ENABLE_NIC_HCA = 0x810, XSC_CMD_OP_DISABLE_NIC_HCA = 0x811, XSC_CMD_OP_MODIFY_NIC_HCA = 0x812, + XSC_CMD_OP_QUERY_PKT_DST_INFO = 0x813, + XSC_CMD_OP_MODIFY_PKT_DST_INFO = 0x814, XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x820, XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x821, @@ -181,6 +230,8 @@ enum { XSC_CMD_OP_MODIFY_LINK_INFO = 0x834, XSC_CMD_OP_QUERY_FEC_PARAM = 0x835, XSC_CMD_OP_MODIFY_FEC_PARAM = 0x836, + XSC_CMD_OP_MODIFY_NIC_VPORT_UC_MAC = 0x837, + XSC_CMD_OP_MODIFY_NIC_VPORT_MC_MAC = 0x838, XSC_CMD_OP_LAG_CREATE = 0x840, XSC_CMD_OP_LAG_ADD_MEMBER = 0x841, @@ -194,6 +245,8 @@ enum { XSC_CMD_OP_IOCTL_FLOW = 0x900, XSC_CMD_OP_IOCTL_OTHER = 0x901, + XSC_CMD_OP_IOCTL_NETLINK = 0x902, + XSC_CMD_OP_IOCTL_GET_HW_COUNTERS = 0x903, XSC_CMD_OP_IOCTL_SET_DSCP_PMT = 0x1000, XSC_CMD_OP_IOCTL_GET_DSCP_PMT = 0x1001, @@ -221,6 +274,8 @@ enum { XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD = 0x1017, XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH = 0x1018, XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS = 0x1019, + XSC_CMD_OP_IOCTL_SET_PFC_NEW = 0x101a, + XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS_NEW = 0x101b, XSC_CMD_OP_IOCTL_SET_ENABLE_RP = 0x1030, XSC_CMD_OP_IOCTL_SET_ENABLE_NP = 0x1031, @@ -251,6 +306,7 @@ enum { XSC_CMD_OP_SET_MTU = 0x1100, XSC_CMD_OP_QUERY_ETH_MAC = 0X1101, + XSC_CMD_OP_QUERY_MTU = 0X1102, XSC_CMD_OP_QUERY_HW_STATS = 0X1200, XSC_CMD_OP_QUERY_PAUSE_CNT = 0X1201, @@ -258,6 +314,8 @@ enum { XSC_CMD_OP_QUERY_HW_STATS_RDMA = 0X1203, XSC_CMD_OP_QUERY_HW_STATS_ETH = 0X1204, XSC_CMD_OP_QUERY_HW_GLOBAL_STATS = 0X1210, + XSC_CMD_OP_QUERY_HW_PF_UC_STATS = 0X1211, + XSC_CMD_OP_QUERY_HW_PRS_CHK_ERR_STATS = 0x1212, XSC_CMD_OP_SET_RTT_EN = 0X1220, XSC_CMD_OP_GET_RTT_EN = 0X1221, @@ -272,13 +330,30 @@ enum { XSC_CMD_OP_AP_FEAT = 0x1400, XSC_CMD_OP_PCIE_LAT_FEAT = 0x1401, + XSC_CMD_OP_OOO_STATISTIC_FEAT = 0x1402, XSC_CMD_OP_GET_LLDP_STATUS = 0x1500, XSC_CMD_OP_SET_LLDP_STATUS = 0x1501, XSC_CMD_OP_SET_VPORT_RATE_LIMIT = 0x1600, + XSC_CMD_OP_IOCTL_SET_ROCE_ACCL = 0x1700, + XSC_CMD_OP_IOCTL_GET_ROCE_ACCL = 0x1701, + XSC_CMD_OP_IOCTL_SET_ROCE_ACCL_NEXT = 0x1702, + XSC_CMD_OP_IOCTL_GET_ROCE_ACCL_NEXT = 0x1703, + XSC_CMD_OP_IOCTL_PRGRMMBL_CC = 0x1704, + XSC_CMD_OP_IOCTL_SET_FLEXCC_NEXT = 0x1705, + XSC_CMD_OP_IOCTL_GET_FLEXCC_NEXT = 0x1706, + XSC_CMD_OP_IOCTL_GET_STAT_FLEXCC_NEXT = 0x1707, + XSC_CMD_OP_IOCTL_GET_SPORT_ROCE_ACCL_NEXT = 0x1708, + XSC_CMD_OP_IOCTL_SET_ROCE_ACCL_DISC_SPORT = 0x1709, + XSC_CMD_OP_IOCTL_GET_ROCE_ACCL_DISC_SPORT = 0x170a, + + XSC_CMD_OP_GET_LINK_SUB_STATE = 0x1800, XSC_CMD_OP_SET_PORT_ADMIN_STATUS = 0x1801, + + XSC_CMD_OP_IOCTL_GET_BYTE_CNT = 0x1900, + XSC_CMD_OP_USER_EMU_CMD = 0x8000, XSC_CMD_OP_MAX @@ -288,6 +363,10 @@ enum { XSC_CMD_EVENT_RESP_CHANGE_LINK = 0x0001, XSC_CMD_EVENT_RESP_TEMP_WARN = 0x0002, XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION = 0x0004, + XSC_CMD_EVENT_RECV_TUNNEL_CMD_REQ = 0x0008, + XSC_CMD_EVENT_RECV_TUNNEL_CMD_RSP = 0x0010, + XSC_CMD_EVENT_CHANGE_TO_EXCLUSIVE = 0x0020, + XSC_CMD_EVENT_CHANGE_TO_SHARE = 0x0040, }; enum xsc_eth_qp_num_sel { @@ -331,6 +410,7 @@ enum { MODULE_SPEED_200G_R4, MODULE_SPEED_200G_R8, MODULE_SPEED_400G_R8, + MODULE_SPEED_400G_R4, }; enum xsc_dma_direct { @@ -348,6 +428,9 @@ enum xsc_hw_feature_flag { XSC_HW_THIRD_FEATURE = 0x4, XSC_HW_PFC_STALL_STATS_SUPPORT = 0x8, XSC_HW_RDMA_CM_SUPPORT = 0x20, + XSC_HW_OFFLOAD_UNSUPPORT = 0x40, + XSC_HW_PF_UC_STATISTIC_SUPPORT = 0x80, + XSC_HW_PRGRMMBL_CC_SUPPORT = 0x100, XSC_HW_LAST_FEATURE = 0x80000000, }; @@ -369,6 +452,30 @@ struct xsc_outbox_hdr { __be16 ver; }; +enum { + DRIVER_INSTANCE_LAUNCH, + DRIVER_INSTANCE_PHASE_OUT, + DRIVER_INSTANCE_UPDATE_REP_FUNC, +}; + +struct xsc_cmd_announce_driver_instance_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 rep_func_id; + u8 status; + u8 rsvd[5]; +}; + +enum { + EXCLUSIVE_MODE, + SHARE_MODE, +}; + +struct xsc_cmd_announce_driver_instance_mbox_out { + struct xsc_outbox_hdr hdr; + u8 resource_access_mode; + u8 rsvd[7]; +}; + struct xsc_alloc_ia_lock_mbox_in { struct xsc_inbox_hdr hdr; u8 lock_num; @@ -421,6 +528,18 @@ struct xsc_create_cq_mbox_in { __be64 pas[]; }; +struct xsc_cq_context_ex { + struct xsc_cq_context ctx; + u8 page_shift; + u8 rsvd[7]; +}; + +struct xsc_create_cq_ex_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_cq_context_ex ctx_ex; + __be64 pas[]; +}; + struct xsc_create_cq_mbox_out { struct xsc_outbox_hdr hdr; __be32 cqn; @@ -438,6 +557,29 @@ struct xsc_destroy_cq_mbox_out { u8 rsvd[8]; }; +struct xsc_set_cq_context_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_cq_context_ex ctx_ex; +}; + +struct xsc_set_cq_context_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 cqn; + __be32 cq_pa_list_base; +}; + +struct xsc_set_cq_buf_pa_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 pa_list_start; + __be32 pa_num; + __be64 pas[]; +}; + +struct xsc_set_cq_buf_pa_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + /*QP mbox*/ struct xsc_create_qp_request { __be16 input_qpn; @@ -451,7 +593,8 @@ struct xsc_create_qp_request { __be16 cqn_recv; __be16 glb_funcid; /*rsvd,rename logic_port used to transfer logical_port to fw*/ - u8 rsvd[2]; + u8 page_shift; + u8 rsvd; __be64 pas[]; }; @@ -486,6 +629,12 @@ struct xsc_query_qp_flush_status_mbox_out { struct xsc_outbox_hdr hdr; }; +enum qp_access_flag { + QP_ACCESS_REMOTE_READ = (1 << 0), + QP_ACCESS_REMOTE_WRITE = (1 << 1), +}; + +#define XSC_QP_CONTEXT_V1 1 struct xsc_qp_context { __be32 remote_qpn; __be32 cqn_send; @@ -517,6 +666,9 @@ struct xsc_qp_context { __be16 lag_id; __be16 func_id; __be16 rsvd; + u8 no_need_wait; + u8 rsvd0[3]; + __be32 qp_access_flags; }; struct xsc_query_qp_mbox_in { @@ -534,7 +686,6 @@ struct xsc_modify_qp_mbox_in { struct xsc_inbox_hdr hdr; __be32 qpn; struct xsc_qp_context ctx; - u8 no_need_wait; }; struct xsc_modify_qp_mbox_out { @@ -586,6 +737,49 @@ struct xsc_release_multi_virtq_mbox_out { __be32 rsvd3; }; +struct xsc_alloc_qpn_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_cnt; + u8 qp_type; + u8 rsvd[5]; +}; + +struct xsc_alloc_qpn_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 qpn_base; +}; + +struct xsc_dealloc_qpn_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qpn_base; + __be16 qp_cnt; + u8 qp_type; + u8 rsvd[3]; +}; + +struct xsc_dealloc_qpn_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_set_qp_info_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_qp_request qp_info; +}; + +struct xsc_set_qp_info_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_unset_qp_info_in { + struct xsc_inbox_hdr hdr; + __be16 qpn; + u8 rsvd[6]; +}; + +struct xsc_unset_qp_info_out { + struct xsc_outbox_hdr hdr; +}; + /* MSIX TABLE mbox */ struct xsc_msix_table_info_mbox_in { struct xsc_inbox_hdr hdr; @@ -607,7 +801,7 @@ struct xsc_eq_context { u8 log_eq_sz; __be16 glb_func_id; u8 is_async_eq; - u8 rsvd[1]; + u8 page_shift; }; struct xsc_create_eq_mbox_in { @@ -666,9 +860,9 @@ struct xsc_dealloc_pd_mbox_out { struct xsc_register_mr_request { __be32 pdn; __be32 pa_num; - __be32 len; + __be64 len; __be32 mkey; - u8 rsvd; + u8 is_gpu; u8 acc; u8 page_mode; u8 map_en; @@ -757,6 +951,37 @@ struct xsc_destroy_mkey_mbox_out { u8 rsvd; }; +struct xsc_mr_info { + __be32 mpt_idx; + __be32 mtt_base; + __be32 mtt_num; +}; + +struct xsc_cmd_sync_mr_to_fw_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[6]; + __be16 mr_num; + struct xsc_mr_info data[]; +}; + +struct xsc_cmd_sync_mr_to_fw_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_sync_mr_from_fw_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 start; + u8 rsvd[4]; +}; + +struct xsc_cmd_sync_mr_from_fw_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[6]; + __be16 mr_num; + struct xsc_mr_info data[]; +}; + struct xsc_access_reg_mbox_in { struct xsc_inbox_hdr hdr; u8 rsvd0[2]; @@ -953,8 +1178,39 @@ struct xsc_hca_cap { __be32 qp_rate_limit_max; struct xsc_fw_version fw_ver; u8 lag_logic_port_ofst; + /* V1 */ + __be64 max_mr_size; + __be16 max_cmd_in_len; + __be16 max_cmd_out_len; + /* V2 */ + __be32 max_qp; + __be32 max_cq; + __be32 max_pd; + __be32 max_mtt; + /* V3 */ + __be32 mpt_tbl_addr; + __be32 mpt_tbl_depth; + __be32 mpt_tbl_width; + __be32 mtt_inst_base_addr; + __be32 mtt_inst_stride; + __be32 mtt_inst_num_log; + __be32 mtt_inst_depth; + /* V4 */ + __be16 vf_funcid_base[XSC_MAX_NUM_PCIE_INTF][XSC_MAX_PF_NUM_PER_PCIE]; + __be16 vf_funcid_top[XSC_MAX_NUM_PCIE_INTF][XSC_MAX_PF_NUM_PER_PCIE]; + __be16 pf_funcid_base[XSC_MAX_NUM_PCIE_INTF]; + __be16 pf_funcid_top[XSC_MAX_NUM_PCIE_INTF]; + u8 pcie_no; + u8 pf_id; + __be16 vf_id; + u8 pcie_host_num; + u8 pf_num_per_pcie; }; +#define CMD_QUERY_HCA_CAP_V1 1 +#define CMD_QUERY_HCA_CAP_V2 2 +#define CMD_QUERY_HCA_CAP_V3 3 +#define CMD_QUERY_HCA_CAP_V4 4 struct xsc_cmd_query_hca_cap_mbox_in { struct xsc_inbox_hdr hdr; __be16 cpu_num; @@ -1130,6 +1386,30 @@ struct xsc_modify_nic_vport_context_in { struct xsc_nic_vport_context nic_vport_ctx; }; +struct xsc_modify_nic_vport_uc_mac_out { + struct xsc_outbox_hdr hdr; + __be16 out_pct_prio; +}; + +struct xsc_modify_nic_vport_uc_mac_in { + struct xsc_inbox_hdr hdr; + __be16 in_pct_prio; + bool add_mac; + u8 mac_addr[6]; +}; + +struct xsc_modify_nic_vport_mc_mac_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_modify_nic_vport_mc_mac_in { + struct xsc_inbox_hdr hdr; + u8 action; + u8 mac[ETH_ALEN]; + u8 rsvd[1]; +}; + struct xsc_query_hca_vport_context_out { struct xsc_outbox_hdr hdr; struct xsc_hca_vport_context hca_vport_ctx; @@ -1226,6 +1506,15 @@ struct xsc_traffic_counter { u64 bytes; }; +struct xsc_link_sub_state_mbox_in { + struct xsc_inbox_hdr hdr; +}; + +struct xsc_link_sub_state_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 state_code; +}; + struct xsc_query_vport_counter_out { struct xsc_outbox_hdr hdr; struct xsc_traffic_counter received_errors; @@ -1290,66 +1579,77 @@ struct xsc_modify_raw_qp_mbox_out { #define ETH_ALEN 6 +#define LAG_CMD_V1 1 + +struct slave_func_data { + u8 pf_id; + u8 pcie_no; + u8 valid; +}; + struct xsc_create_lag_request { __be16 lag_id; u8 lag_type; u8 lag_sel_mode; - u8 mac_idx; + u8 pf_idx; u8 netdev_addr[ETH_ALEN]; u8 bond_mode; - u8 slave_status; + u8 slave_status; }; struct xsc_add_lag_member_request { __be16 lag_id; u8 lag_type; u8 lag_sel_mode; - u8 mac_idx; + u8 pf_idx; u8 netdev_addr[ETH_ALEN]; u8 bond_mode; - u8 slave_status; - u8 mad_mac_idx; + u8 slave_status; + u8 roce_pf_idx; + struct slave_func_data roce_pf_func_data; }; struct xsc_remove_lag_member_request { __be16 lag_id; u8 lag_type; - u8 mac_idx; - u8 mad_mac_idx; + u8 pf_idx; + u8 roce_pf_idx; u8 bond_mode; - u8 is_roce_lag_xdev; + u8 is_roce_lag_xdev; u8 not_roce_lag_xdev_mask; + struct slave_func_data roce_pf_func_data; + struct slave_func_data func_data[6]; }; struct xsc_update_lag_member_status_request { __be16 lag_id; u8 lag_type; - u8 mac_idx; + u8 pf_idx; u8 bond_mode; - u8 slave_status; + u8 slave_status; u8 rsvd; }; struct xsc_update_lag_hash_type_request { __be16 lag_id; - u8 lag_sel_mode; + u8 lag_sel_mode; u8 rsvd[5]; }; struct xsc_destroy_lag_request { __be16 lag_id; u8 lag_type; - u8 mac_idx; - u8 bond_mode; - u8 slave_status; + u8 pf_idx; + u8 bond_mode; + u8 slave_status; u8 rsvd[3]; }; struct xsc_set_lag_qos_request { - __be16 lag_id; - u8 member_idx; - u8 lag_op; - u8 resv[4]; + __be16 lag_id; + u8 member_idx; + u8 lag_op; + u8 resv[4]; }; struct xsc_create_lag_mbox_in { @@ -1501,6 +1801,12 @@ struct xsc_hw_stats_rdma_pf { /*global*/ u64 rdma_loopback_pkts; u64 rdma_loopback_bytes; + /*for diamond*/ + u64 out_of_sequence_sr; + u64 packet_seq_err_sr; + u64 rdma_ndp_rx_pkts; + u64 rdma_ndp_rx_trimmed_pkts; + u64 rdma_ndp_trimmed_pkts_sr; }; struct xsc_hw_stats_rdma_vf { @@ -1548,6 +1854,11 @@ struct xsc_hw_stats_eth_pf { u64 rdma_loopback_bytes; }; +struct xsc_hw_uc_stats_eth { + u64 tx_unicast_phy; + u64 rx_unicast_phy; +}; + struct xsc_hw_stats_eth_vf { /*by function*/ u64 rdma_tx_pkts; @@ -1565,6 +1876,12 @@ struct xsc_hw_stats_eth { } stats; }; +struct xsc_hw_uc_stats { + u8 is_pf; + u8 rsv[3]; + struct xsc_hw_uc_stats_eth eth_uc_stats; +}; + struct xsc_hw_stats_mbox_in { struct xsc_inbox_hdr hdr; u8 mac_port; @@ -1593,6 +1910,16 @@ struct xsc_hw_global_stats_rdma { u64 cqe_msg_code_error; }; +struct xsc_hw_uc_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; +}; + +struct xsc_hw_uc_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_uc_stats hw_uc_stats; +}; + struct xsc_hw_global_stats_mbox_in { struct xsc_inbox_hdr hdr; u8 rsv[4]; @@ -1618,6 +1945,42 @@ struct xsc_pfc_stall_stats_mbox_out { struct xsc_pfc_stall_stats pfc_stall_stats; }; +struct xsc_prs_chk_err_stats { + __be64 inner_sip_dip_eq; /* sip == dip */ + __be64 inner_sip_invalid; /* sip is loopbak/multicast/0/linklocal */ + __be64 inner_smac_invalid; /* smac is 0/multicast/broadcast */ + __be64 inner_ip_ver; /* ip ver !=4 && !=6 */ + __be64 inner_smac_dmac_eq; /* smac == dmac */ + __be64 inner_dmac_zero; /* dmac is zero */ + __be64 outer_sip_dip_eq; /* sip == dip */ + __be64 outer_sip_invalid; /* sip is loopbak/multicast/0/linklocal */ + __be64 outer_smac_invalid; /* smac is 0/multicast/broadcast */ + __be64 outer_ip_ver; /* ip ver !=4 && !=6 */ + __be64 outer_smac_dmac_eq; /* smac == dmac */ + __be64 outer_dmac_zero; /* dmac is zero */ + __be64 inner_udp_len; /* udp len error */ + __be64 inner_tp_checksum; /* tcp/udp checksum error */ + __be64 inner_ipv4_checksum; /* ipv4 checksum error */ + __be64 inner_ip_ttl; /* ip ttl is 0 */ + __be64 inner_ip_len; /* ip len error */ + __be64 inner_ipv4_ihl; /* ipv4 ihl error */ + __be64 outer_udp_len; /* udp len error */ + __be64 outer_tp_checksum; /* tcp/udp checksum error */ + __be64 outer_ipv4_checksum; /* ipv4 checksum error */ + __be64 outer_ip_ttl; /* ip ttl is 0 */ + __be64 outer_ip_len; /* ip len error */ + __be64 outer_ipv4_ihl; /* ipv4 ihl error */ +}; + +struct xsc_query_hw_prs_chk_err_stats_mbox_in { + struct xsc_inbox_hdr hdr; +}; + +struct xsc_query_hw_prs_chk_err_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_prs_chk_err_stats stats; +}; + struct xsc_dscp_pmt_set { u8 dscp; u8 priority; @@ -1826,6 +2189,22 @@ struct xsc_event_query_fecparam_mbox_out { (PFC_CFG_CHECK_TIMEOUT_US / PFC_CFG_CHECK_SLEEP_TIME_US) #define PFC_CFG_CHECK_VALID_CNT 3 +#define PFC_CFG_CHECK_TIMEOUT_CNT 80 +#define PFC_CFG_CHECK_SLEEP_TIME_MS 100 + +enum { + SET_PFC_STATUS_INIT = 0, + SET_PFC_STATUS_IN_PROCESS, + SET_PFC_STATUS_MAX, +}; + +enum { + SET_PFC_COMP_SUCCESS = 0, + SET_PFC_COMP_FAIL, + SET_PFC_COMP_TIMEOUT, + SET_PFC_COMP_MAX, +}; + enum { PFC_OP_ENABLE = 0, PFC_OP_DISABLE, @@ -1883,6 +2262,25 @@ struct xsc_pfc_get_cfg_status_mbox_out { struct xsc_outbox_hdr hdr; }; +struct xsc_pfc_set_new { + u8 req_prio; + u8 pfc_on; + u8 pfc_op; + u8 cur_prio_en;//every bit represents one priority, eg: 0x1 represents prio_0 pfc on + u8 lossless_num;//num of supported lossless priority +}; + +struct xsc_get_pfc_cfg_status_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; +}; + +struct xsc_get_pfc_cfg_status_mbox_out { + struct xsc_outbox_hdr hdr; + u8 status; + u8 comp; +}; + struct xsc_rate_limit_set { u32 rate_cir; u32 limit_id; @@ -2134,6 +2532,23 @@ struct xsc_cc_cmd_stat { u32 reset_bytecount; }; +struct xsc_perf_rate_measure { + u32 qp_num; + u32 qp_id_list[XSC_QP_MEASURE_QP_NUM_MAX]; + u32 qp_byte_cnt[XSC_QP_MEASURE_QP_NUM_MAX]; + u32 hw_ts; +}; + +struct xsc_perf_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_perf_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + struct xsc_set_mtu_mbox_in { struct xsc_inbox_hdr hdr; __be16 mtu; @@ -2174,6 +2589,7 @@ struct hwc_set_t { u8 per_dst_grp_cnt; u8 dcbx_status[XSC_MAX_MAC_NUM]; u8 dcbx_port_cnt; + u8 read_flush; }; struct hwc_get_t { @@ -2215,6 +2631,8 @@ struct hwc_get_t { u8 cur_dcbx_status[XSC_MAX_MAC_NUM]; u8 next_dcbx_status[XSC_MAX_MAC_NUM]; u8 dcbx_port_cnt; + u8 cur_read_flush; + u8 next_read_flush; }; struct xsc_set_mtu_mbox_out { @@ -2231,6 +2649,15 @@ struct xsc_query_eth_mac_mbox_out { u8 mac[6]; }; +struct xsc_query_mtu_mbox_in { + struct xsc_inbox_hdr hdr; +}; + +struct xsc_query_mtu_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 mtu; +}; + struct xsc_query_pause_cnt_mbox_in { struct xsc_inbox_hdr hdr; u16 mac_port; @@ -2247,7 +2674,8 @@ enum { XSC_TBM_CAP_HASH_PPH = 0, XSC_TBM_CAP_RSS, XSC_TBM_CAP_PP_BYPASS, - XSC_TBM_CAP_PCT_DROP_CONFIG, + XSC_TBM_CAP_MAC_DROP_CONFIG, + XSC_TBM_CAP_PF_ISOLATE_CONFIG, }; struct xsc_nic_attr { @@ -2318,6 +2746,32 @@ struct xsc_cmd_modify_nic_hca_mbox_out { u8 rsvd0[4]; }; +struct xsc_cmd_query_pkt_dst_info_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_bitmap; + u16 pkt_bitmap; + u32 resv0; +}; + +struct xsc_cmd_query_pkt_dst_info_mbox_out { + struct xsc_outbox_hdr hdr; + u16 dst_info[8]; + u32 resv0; +}; + +struct xsc_cmd_modify_pkt_dst_info_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_bitmap; + u16 pkt_bitmap; + u16 dst_info; + u16 resv0; +}; + +struct xsc_cmd_modify_pkt_dst_info_mbox_out { + struct xsc_outbox_hdr hdr; + u32 resv0; +}; + struct xsc_function_reset_mbox_in { struct xsc_inbox_hdr hdr; __be16 glb_func_id; @@ -2329,6 +2783,38 @@ struct xsc_function_reset_mbox_out { u8 rsvd[8]; }; +enum { + XSC_OOO_STATISTIC_FEAT_SET_RESET = 0, + XSC_OOO_STATISTIC_FEAT_SET_RANGE, + XSC_OOO_STATISTIC_FEAT_GET_RANGE, + XSC_OOO_STATISTIC_FEAT_GET_SHOW, +}; + +#define XSC_OOO_STATISTIC_RANGE_MAX 16 +#define XSC_OOO_STATISTIC_SHOW_MAX 17 + +#define XSC_OOO_STATISTIC_RESET 1 +#define XSC_OOO_STATISTIC_RANGE_VAL_MIN 0 +#define XSC_OOO_STATISTIC_RANGE_VAL_MAX 4095 + +struct xsc_ooo_statistic { + u8 ooo_statistic_reset; + u32 ooo_statistic_range[XSC_OOO_STATISTIC_RANGE_MAX]; + u32 ooo_statistic_show[XSC_OOO_STATISTIC_SHOW_MAX]; +}; + +struct xsc_ooo_statistic_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_ooo_statistic_feature_opcode; + struct xsc_ooo_statistic ooo_statistic; +}; + +struct xsc_ooo_statistic_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_ooo_statistic_feature_opcode; + struct xsc_ooo_statistic ooo_statistic; +}; + enum { XSC_PCIE_LAT_FEAT_SET_EN = 0, XSC_PCIE_LAT_FEAT_GET_EN, @@ -2480,6 +2966,60 @@ struct xsc_set_debug_info_mbox_out { u8 rsvd[8]; }; +struct xsc_roce_accl_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_roce_accl_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +#define XSC_DISCRETE_SPORT_NUM_MAX 128 + +struct xsc_roce_accl_set { + u64 sr_timeout; + u32 flag; + u8 retrans_mode; + u8 sr_mode; + u16 sr_count; + u16 sr_drop_limit; + u16 ndp_dst_port; + u8 bth_rsv7; + u8 packet_spray_mode; + u16 cont_sport_start; + u16 max_num_exponent; + u16 disturb_period; + u16 disturb_th; + u8 mac_port; + u8 lag_mode; +}; + +struct xsc_roce_accl_get { + u64 sr_timeout; + u8 retrans_mode; + u8 sr_mode; + u16 sr_count; + u16 sr_drop_limit; + u16 ndp_dst_port; + u8 bth_rsv7; + u8 packet_spray_mode; + u16 cont_sport_start; + u16 max_num_exponent; + u16 disturb_period; + u16 disturb_th; + u8 lag_mode; + u8 rsv[5]; +}; + +struct xsc_roce_accl_disc_sport { + u16 discrete_sports[XSC_DISCRETE_SPORT_NUM_MAX]; + u32 discrete_sports_num; + u8 mac_port; + u8 rsv[3]; +}; + struct xsc_cmd_enable_relaxed_order_in { struct xsc_inbox_hdr hdr; u8 rsvd[8]; @@ -2510,4 +3050,328 @@ struct xsc_cmd_activate_hw_config_mbox_out { u8 rsvd[8]; }; +struct xsc_cmd_read_flush_hw_config_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_read_flush_hw_config_mbox_out { + struct xsc_outbox_hdr hdr; + u8 read_flush; + u8 rsvd[7]; +}; + +enum { + ROCE_ACCL_NEXT_FLAG_SHOW_SHIFT = 0, + ROCE_ACCL_NEXT_FLAG_SACK_THRESHOLD_SHIFT = 2, + ROCE_ACCL_NEXT_FLAG_SACK_TIMEOUT_SHIFT = 3, + ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_MODE_SHIFT = 4, + ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_REQ_THRESHOLD_SHIFT = 5, + ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_RSP_WINDOW_SHIFT = 6, + ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_RSP_TIMEOUT_SHIFT = 7, + ROCE_ACCL_NEXT_FLAG_PATH_NUM_SHIFT = 8, + ROCE_ACCL_NEXT_FLAG_PACKET_SPRAY_MODE_SHIFT = 9, + ROCE_ACCL_NEXT_FLAG_QP_ID_SHIFT = 10, + ROCE_ACCL_NEXT_FLAG_PATH_UDP_SPORT_SHIFT = 11, + ROCE_ACCL_NEXT_FLAG_SHOW_PATH_UDP_SPORT_SHIFT = 12, + ROCE_ACCL_NEXT_FLAG_MAX_NUM = 13, +}; + +#define ROCE_ACCL_NEXT_FLAG_SHOW_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_SHOW_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_SACK_THRESHOLD_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_SACK_THRESHOLD_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_SACK_TIMEOUT_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_SACK_TIMEOUT_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_MODE_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_MODE_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_REQ_THRESHOLD_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_REQ_THRESHOLD_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_RSP_WINDOW_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_RSP_WINDOW_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_RSP_TIMEOUT_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_ACK_AGGREGATION_RSP_TIMEOUT_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_PATH_NUM_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_PATH_NUM_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_PACKET_SPRAY_MODE_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_PACKET_SPRAY_MODE_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_QP_ID_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_QP_ID_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_PATH_UDP_SPORT_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_PATH_UDP_SPORT_SHIFT) +#define ROCE_ACCL_NEXT_FLAG_SHOW_PATH_UDP_SPORT_MASK \ + (1ULL << ROCE_ACCL_NEXT_FLAG_SHOW_PATH_UDP_SPORT_SHIFT) + +struct xsc_roce_accl_next_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[0]; +}; + +struct xsc_roce_accl_next_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[0]; +}; + +#define ROCE_ACCL_NEXT_PATH_UDP_SPORT_NUM_MAX 16 + +struct xsc_roce_accl_next_set { + u64 flag; + u32 sack_threshold; + u32 sack_timeout; + u32 ack_aggregation_mode; + u32 ack_aggregation_req_threshold; + u32 ack_aggregation_rsp_window; + u32 ack_aggregation_rsp_timeout; + u32 path_num; + u32 packet_spray_mode; + u32 qp_id; + u32 path_udp_sport[ROCE_ACCL_NEXT_PATH_UDP_SPORT_NUM_MAX]; + u32 path_udp_sport_num; +}; + +struct xsc_roce_accl_next_get { + u32 sack_threshold; + u32 sack_timeout; + u32 ack_aggregation_mode; + u32 ack_aggregation_req_threshold; + u32 ack_aggregation_rsp_window; + u32 ack_aggregation_rsp_timeout; + u32 path_num; + u32 packet_spray_mode; +}; + +struct xsc_flexcc_next_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_flexcc_next_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +#define YUN_CC_CMD_DATA_LEN_MAX 120 + +enum { + YUN_CC_CMD_SET_SP_TH, + YUN_CC_CMD_SET_RTT_INTERVAL_INBAND, + YUN_CC_CMD_SET_RTT_INTERVAL_OUTBAND, + YUN_CC_CMD_SET_BYTE_RST_INTERVAL, + YUN_CC_CMD_SET_BWU_INTERVAL, + YUN_CC_CMD_SET_CSP_DSCP, + YUN_CC_CMD_SET_RTT_DSCP_OUTBAND, + YUN_CC_CMD_SET_CSP_ECN_AGGREGATION, + YUN_CC_CMD_SET_CC_ALG, + YUN_CC_CMD_SET_ENABLE, + YUN_CC_CMD_GET_ALL, + YUN_CC_CMD_GET_ALL_STAT, + YUN_CC_CMD_SET_CE_PROC_INTERVAL, +}; + +struct yun_cc_next_get_all { + u32 sp_threshold; + u32 rtt_interval_inband; + u32 rtt_interval_outband; + u32 byte_rst_interval; + u32 bwu_interval; + u32 csp_dscp; + u32 rtt_dscp_outband; + u32 csp_ecn_aggregation; + u32 enable; + u32 ce_proc_interval; + u32 cc_alg; + u32 cc_alg_mask; + u8 cc_alg_slot1_vrsn[32]; + u8 cc_alg_slot2_vrsn[32]; +}; + +struct yun_cc_next_get_all_stat { + u32 evt_sp_deliverd; + u32 evt_ce_deliverd; + u32 evt_rtt_req_deliverd; + u32 evt_rtt_rsp_deliverd; + u32 evt_rto_deliverd; + u32 evt_sack_deliverd; + u32 evt_byte_deliverd; + u32 evt_time_deliverd; + u32 evt_bwu_deliverd; + u32 evt_sp_aggregated; + u32 evt_ce_aggregated; + u32 evt_rtt_req_aggregated; + u32 evt_rtt_rsp_aggregated; + u32 evt_rto_aggregated; + u32 evt_sack_aggregated; + u32 evt_byte_aggregated; + u32 evt_time_aggregated; + u32 evt_bwu_aggregated; + u32 evt_sp_dropped; + u32 evt_ce_dropped; + u32 evt_rtt_req_dropped; + u32 evt_rtt_rsp_dropped; + u32 evt_rto_dropped; + u32 evt_sack_dropped; + u32 evt_byte_dropped; + u32 evt_time_dropped; + u32 evt_bwu_dropped; +}; + +struct yun_cc_next_sp_th { + u32 threshold; +}; + +struct yun_cc_next_rtt_interval_inband { + u32 interval; +}; + +struct yun_cc_next_rtt_interval_outband { + u32 interval; +}; + +struct yun_cc_next_byte_rst_interval { + u32 interval; +}; + +struct yun_cc_next_bwu_interval { + u32 interval; +}; + +struct yun_cc_next_csp_dscp { + u32 dscp; +}; + +struct yun_cc_next_rtt_dscp_outband { + u32 dscp; +}; + +struct yun_cc_csp_ecn_aggregation { + u32 agg; +}; + +struct yun_cc_next_cc_alg { + u32 user_alg_en; + u32 slot_mask; + u32 slot; +}; + +struct yun_cc_enable { + u32 en; +}; + +struct yun_cc_next_cmd_hdr { + u32 cmd; + u32 len; + u8 data[]; +}; + +struct yun_cc_next_ce_proc_interval { + u32 interval; +}; + +#define FLEXCC_IOCTL_USER_DATA_MAX 240 + +struct flexcc_ioctl_buf { + u8 data[FLEXCC_IOCTL_USER_DATA_MAX]; +}; + +struct flexcc_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct flexcc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct xsc_cmd_get_ioctl_info_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 ioctl_opcode; + __be16 length; + u8 rsvd[4]; + u8 data[]; +}; + +struct xsc_cmd_get_ioctl_info_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[]; +}; + +struct xsc_target_info { + __be32 domain; + __be32 bus; + __be32 devfn; + __be32 data_length; +}; + +struct xsc_send_tunnel_cmd_req_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_target_info target; + u8 data[]; +}; + +struct xsc_send_tunnel_cmd_req_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_recv_tunnel_cmd_req_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_recv_tunnel_cmd_req_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_target_info target; + u8 data[]; +}; + +struct xsc_send_tunnel_cmd_resp_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; + u8 data[]; +}; + +struct xsc_send_tunnel_cmd_resp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_recv_tunnel_cmd_resp_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_recv_tunnel_cmd_resp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[]; +}; + +struct xsc_cmd_netlink_msg_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 nlmsg_len; + u8 rsvd[6]; + u8 data[]; +}; + +struct xsc_cmd_netlink_msg_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[]; +}; + +struct xsc_cmd_ioctl_get_hw_counters_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 length; + u8 rsvd[4]; + u8 data[]; +}; + +struct xsc_cmd_ioctl_get_hw_counters_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[]; +}; + #endif /* XSC_CMD_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h index b912ab8b904e2704f18873fb2dc6a0333bf77355..4959893a6fdbe34b88e15576daceace20401c2dd 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h @@ -26,23 +26,38 @@ #include #include #include +#include #include "common/xsc_cmd.h" #include "common/xsc_ioctl.h" -#include "common/xsc_auto_hw.h" #include "common/driver.h" #include "common/xsc_reg.h" #include "common/xsc_eswitch.h" +#include "common/version.h" + +#if (HOTFIX_NUM == 0) +#define DRIVER_VERSION __stringify(BRANCH_VERSION) "." __stringify(MAJOR_VERSION) "." \ + __stringify(MINOR_VERSION) "." __stringify(BUILD_VERSION) +#else +#define DRIVER_VERSION __stringify(BRANCH_VERSION) "." __stringify(MAJOR_VERSION) "." \ + __stringify(MINOR_VERSION) "." __stringify(BUILD_VERSION) ".H" \ + __stringify(HOTFIX_NUM) +#endif extern uint xsc_debug_mask; extern unsigned int xsc_log_level; +#ifndef mmiowb #define mmiowb() +#endif + #define XSC_PCI_VENDOR_ID 0x1f67 #define XSC_MC_PF_DEV_ID 0x1011 #define XSC_MC_VF_DEV_ID 0x1012 +#define XSC_MC_PF_DEV_ID_DIAMOND 0x1021 +#define XSC_MC_PF_DEV_ID_DIAMOND_NEXT 0x1023 #define XSC_MF_HOST_PF_DEV_ID 0x1051 #define XSC_MF_HOST_VF_DEV_ID 0x1052 @@ -55,15 +70,6 @@ extern unsigned int xsc_log_level; #define XSC_MV_HOST_VF_DEV_ID 0x1152 #define XSC_MV_SOC_PF_DEV_ID 0x1153 -#define REG_ADDR(dev, offset) \ - (xsc_core_is_pf(dev) ? ((dev->bar) + ((offset) - 0xA0000000)) : ((dev->bar) + (offset))) - -#define REG_WIDTH_TO_STRIDE(width) ((width) / 8) -#define QPM_PAM_TBL_NUM 4 -#define QPM_PAM_TBL_NUM_MASK 3 -#define QPM_PAM_TBL_INDEX_SHIFT 2 -#define QPM_PAM_PAGE_SHIFT 12 - #define XSC_SUB_DEV_ID_MC_50 0xC050 #define XSC_SUB_DEV_ID_MC_100 0xC100 #define XSC_SUB_DEV_ID_MC_200 0xC200 @@ -76,6 +82,7 @@ extern unsigned int xsc_log_level; #define XSC_SUB_DEV_ID_MS_200S 0xA201 #define XSC_SUB_DEV_ID_MS_400M 0xA202 #define XSC_SUB_DEV_ID_MS_200_OCP 0xA203 +#define XSC_SUB_DEV_ID_MS_100S_OCP 0xA204 #define XSC_SUB_DEV_ID_MV_100 0xD100 #define XSC_SUB_DEV_ID_MV_200 0xD200 @@ -96,6 +103,10 @@ enum { XSC_CHIP_UNKNOWN, }; +#ifndef dev_fmt +#define dev_fmt(fmt) fmt +#endif + #define xsc_dev_log(condition, level, dev, fmt, ...) \ do { \ if (condition) \ @@ -160,13 +171,6 @@ do { \ #define XSC_PCIE_NO_SOC 0x1 #define XSC_PCIE_NO_UNSET 0xFF -enum xsc_driver_mode { - HOST_MODE, - SOC_MODE, -}; - -u8 xsc_get_driver_work_mode(void); - enum xsc_dev_event { XSC_DEV_EVENT_SYS_ERROR, XSC_DEV_EVENT_PORT_UP, @@ -219,16 +223,6 @@ struct qp_group_refer { u16 refer_cnt[GROUP_REFER_CNT_SIZE]; }; -struct xsc_priv_device { - char device_name[IB_DEVICE_NAME_MAX]; - dev_t devno; - struct cdev cdev; - struct list_head mem_list; - spinlock_t mem_lock; /* protect mem_list */ - struct radix_tree_root bdf_tree; - spinlock_t bdf_lock; /* protect bdf_tree */ -}; - enum xsc_pci_status { XSC_PCI_STATUS_DISABLED, XSC_PCI_STATUS_ENABLED, @@ -255,7 +249,6 @@ enum { XSC_INTERFACE_ATTACHED, }; -#define CONFIG_XSC_SRIOV 1 enum xsc_coredev_type { XSC_COREDEV_PF, @@ -359,7 +352,7 @@ struct xsc_vport_info { u32 group; }; -#define XSC_L2_ADDR_HASH_SIZE 8 +#define XSC_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) enum xsc_eswitch_vport_event { XSC_VPORT_UC_ADDR_CHANGE = BIT(0), @@ -471,12 +464,12 @@ struct xsc_port_caps { struct xsc_caps { u8 log_max_eq; - u8 log_max_cq; - u8 log_max_qp; u8 log_max_mkey; - u8 log_max_pd; u8 log_max_srq; u8 log_max_msix; + u32 max_cq; + u32 max_qp; + u32 max_pd; u32 max_cqes; u32 max_wqes; u32 max_sq_desc_sz; @@ -540,7 +533,7 @@ struct xsc_caps { u32 raweth_rss_qp_id_base:16; u16 msix_base; u16 msix_num; - u8 log_max_mtt; + u32 max_mtt; u8 log_max_tso; u32 hca_core_clock; u32 max_rwq_indirection_tables;/*rss_caps*/ @@ -561,7 +554,17 @@ struct xsc_caps { u8 pcie_host; u8 mac_bit; u16 funcid_to_logic_port; + u16 max_cmd_in_len; + u16 max_cmd_out_len; + u64 max_mr_size; u8 lag_logic_port_ofst; + u32 mpt_tbl_addr; + u32 mpt_tbl_depth; + u32 mpt_tbl_width; + u32 mtt_inst_base_addr; + u32 mtt_inst_stride; + u32 mtt_inst_num_log; + u32 mtt_inst_depth; }; struct cache_ent { @@ -684,28 +687,14 @@ struct xsc_cmd_stats { spinlock_t lock; }; -struct xsc_cmd_reg { - u32 req_pid_addr; - u32 req_cid_addr; - u32 rsp_pid_addr; - u32 rsp_cid_addr; - u32 req_buf_h_addr; - u32 req_buf_l_addr; - u32 rsp_buf_h_addr; - u32 rsp_buf_l_addr; - u32 msix_vec_addr; - u32 element_sz_addr; - u32 q_depth_addr; - u32 interrupt_stat_addr; -}; - enum xsc_cmd_status { XSC_CMD_STATUS_NORMAL, XSC_CMD_STATUS_TIMEDOUT, }; +#define XSC_CMD_MAX_RETRY_CNT 3 + struct xsc_cmd { - struct xsc_cmd_reg reg; void *cmd_buf; void *cq_buf; dma_addr_t dma; @@ -739,10 +728,7 @@ struct xsc_cmd { unsigned int irqn; u8 ownerbit_learned; u8 cmd_status; -}; - -struct xsc_lock { - spinlock_t lock; /* xsc spin lock */ + u8 retry_cnt; }; struct xsc_reg_addr { @@ -761,11 +747,14 @@ struct xsc_reg_addr { }; struct xsc_board_info { + u32 ref_cnt; u32 board_id; char board_sn[XSC_BOARD_SN_LEN]; __be64 guid; - u8 guid_valid; - u8 hw_config_activated; + u32 resource_access_mode; + rwlock_t mr_sync_lock; /* protect mr sync */ + struct list_head func_list; + u32 rep_func_id; }; /* our core device */ @@ -784,6 +773,8 @@ struct xsc_core_device { u8 mac_port; /* mac port */ u8 pcie_no; /* pcie number */ u8 pf_id; + u8 pcie_host_num; + u8 pf_num_per_pcie; u16 vf_id; u16 glb_func_id; /* function id */ @@ -798,17 +789,16 @@ struct xsc_core_device { struct xsc_caps caps; atomic_t num_qps; struct xsc_cmd cmd; - struct xsc_lock reg_access_lock; + spinlock_t reg_access_lock; /* reg access lock */ void *counters_priv; - struct xsc_priv_device priv_device; struct xsc_board_info *board_info; void (*event)(struct xsc_core_device *dev, enum xsc_dev_event event, unsigned long param); - void (*event_handler)(void *adapter); + void (*link_event_handler)(void *adapter); + struct work_struct event_work; - struct xsc_reg_addr regs; u32 chip_ver_h; u32 chip_ver_m; u32 chip_ver_l; @@ -824,15 +814,27 @@ struct xsc_core_device { u8 reg_mr_via_cmdq; u8 user_mode; + u8 read_flush; struct xsc_port_ctrl port_ctrl; + struct xsc_port_ctrl prgrmmbl_cc_ctrl; void *rtt_priv; void *ap_priv; void *pcie_lat; + void *hal; u8 bond_id; struct list_head slave_node; + struct completion recv_tunnel_resp_event; + void (*get_ifname)(void *xdev, u8 *ifname, int len); + void (*get_ibdev_name)(void *xdev, u8 *ibdev_name, int len); + void (*get_ip_addr)(void *xdev, u32 *ip_addr); + int (*get_rdma_ctrl_info)(struct xsc_core_device *xdev, + u16 opcode, void *out, int out_size); + void (*handle_netlink_cmd)(struct xsc_core_device *xdev, void *in, void *out); + void *sock; + struct list_head func_node; }; struct xsc_feature_flag { @@ -912,6 +914,7 @@ void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol); void xsc_dev_list_lock(void); void xsc_dev_list_unlock(void); int xsc_dev_list_trylock(void); +void xsc_get_devinfo(u8 *data, u32 len); int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, int out_size, int func_id); @@ -939,12 +942,6 @@ int xsc_counters_init(struct ib_device *ib_dev, void xsc_counters_fini(struct ib_device *ib_dev, struct xsc_core_device *dev); -int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev); -void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev); - -int xsc_priv_alloc_chrdev_region(void); -void xsc_priv_unregister_chrdev_region(void); - int xsc_eth_sysfs_create(struct net_device *netdev, struct xsc_core_device *dev); void xsc_eth_sysfs_remove(struct net_device *netdev, struct xsc_core_device *dev); int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); @@ -958,7 +955,6 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix); int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num); int xsc_cmd_modify_hca(struct xsc_core_device *dev); -int xsc_query_guid(struct xsc_core_device *dev); void xsc_free_board_info(void); int xsc_irq_eq_create(struct xsc_core_device *dev); @@ -985,7 +981,6 @@ struct cpumask *xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int void mask_cpu_by_node(int node, struct cpumask *dstp); int xsc_get_link_speed(struct xsc_core_device *dev); int xsc_chip_type(struct xsc_core_device *dev); -int xsc_eth_restore_nic_hca(struct xsc_core_device *dev); #define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) @@ -1045,176 +1040,6 @@ static inline bool xsc_rl_is_supported(struct xsc_core_device *dev) return false; } -/* define in andes */ -#define HIF_CPM_IDA_DATA_MEM_STRIDE 0x40 - -#define CPM_IAE_CMD_READ 0 -#define CPM_IAE_CMD_WRITE 1 - -#define CPM_IAE_ADDR_REG_STRIDE HIF_CPM_IDA_ADDR_REG_STRIDE - -#define CPM_IAE_DATA_MEM_STRIDE HIF_CPM_IDA_DATA_MEM_STRIDE - -#define CPM_IAE_DATA_MEM_MAX_LEN 16 - -struct iae_cmd { - union { - struct { - u32 iae_idx:HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH; - u32 iae_len:HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH; - u32 iae_r0w1:HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH; - }; - unsigned int raw_data; - }; -}; - -static inline void acquire_ia_lock(struct xsc_core_device *xdev, int *iae_idx) -{ - int lock_val; - int lock_vld; - - lock_val = readl(REG_ADDR(xdev, xdev->regs.cpm_get_lock)); - lock_vld = lock_val >> HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT; - if (lock_vld) - *iae_idx = lock_val & HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK; - else - *iae_idx = -1; -} - -#define ACQUIRE_IA_LOCK(bp, iae_idx) \ - do { \ - int idx; \ - acquire_ia_lock(bp, &idx); \ - iae_idx = idx; \ - } while (0) - -static inline void release_ia_lock(struct xsc_core_device *xdev, int lock_idx) -{ - writel(lock_idx, REG_ADDR(xdev, xdev->regs.cpm_put_lock)); -} - -#define RELEASE_IA_LOCK(bp, iae_idx) release_ia_lock(bp, iae_idx) - -static inline void ia_write_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) -{ - int i; - int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; - - for (i = 0; i < n; i++) { - writel(*(ptr++), REG_ADDR(xdev, offset)); - offset += sizeof(*ptr); - } -} - -static inline void ia_read_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) -{ - int i; - int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; - u32 *pptr = ptr; - - for (i = 0; i < n; i++) { - *(pptr) = readl(REG_ADDR(xdev, offset)); - offset += sizeof(*ptr); - pptr = pptr + 1; - } -} - -static inline void ia_write_reg_addr(struct xsc_core_device *xdev, u32 reg, int iae_idx) -{ - int offset = xdev->regs.cpm_addr + (iae_idx) * CPM_IAE_ADDR_REG_STRIDE; - - writel(reg, REG_ADDR(xdev, offset)); -} - -static inline void initiate_ia_cmd(struct xsc_core_device *xdev, int iae_idx, int length, int r0w1) -{ - struct iae_cmd cmd; - int addr = xdev->regs.cpm_cmd; - - cmd.iae_r0w1 = r0w1; - cmd.iae_len = length - 1; - cmd.iae_idx = iae_idx; - writel(cmd.raw_data, REG_ADDR(xdev, addr)); -} - -static inline void initiate_ia_write_cmd(struct xsc_core_device *xdev, int iae_idx, int length) -{ - initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_WRITE); -} - -static inline void initiate_ia_read_cmd(struct xsc_core_device *xdev, int iae_idx, int length) -{ - initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_READ); -} - -static inline void wait_for_complete(struct xsc_core_device *xdev, int iae_idx) -{ - while ((readl(REG_ADDR(xdev, xdev->regs.cpm_busy)) & (1 << iae_idx))) - ; -} - -static inline void ia_write_reg_mr(struct xsc_core_device *xdev, u32 reg, - u32 *ptr, int n, int idx) -{ - ia_write_data(xdev, ptr, n, idx); - ia_write_reg_addr(xdev, reg, idx); - initiate_ia_write_cmd(xdev, idx, n); -} - -#define IA_WRITE_REG_MR(bp, reg, ptr, n, idx) ia_write_reg_mr(bp, reg, ptr, n, idx) - -static inline void ia_write(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) -{ - int iae_idx; - - acquire_ia_lock(xdev, &iae_idx); - ia_write_data(xdev, ptr, n, iae_idx); - ia_write_reg_addr(xdev, reg, iae_idx); - initiate_ia_write_cmd(xdev, iae_idx, n); - release_ia_lock(xdev, iae_idx); -} - -#define IA_WRITE(bp, reg, ptr, n) ia_write(bp, reg, ptr, n) - -static inline void ia_read(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) -{ - int iae_idx; - - acquire_ia_lock(xdev, &iae_idx); - ia_write_reg_addr(xdev, reg, iae_idx); - initiate_ia_read_cmd(xdev, iae_idx, n); - wait_for_complete(xdev, iae_idx); - ia_read_data(xdev, ptr, n, iae_idx); - release_ia_lock(xdev, iae_idx); -} - -#define IA_READ(bp, reg, ptr, n) ia_read(bp, reg, ptr, n) - -static inline u32 reg_read32(struct xsc_core_device *dev, u32 offset) -{ - u32 val = 0; - - if (xsc_core_is_pf(dev)) - val = readl(REG_ADDR(dev, offset)); - else - IA_READ(dev, offset, &val, 1); - - return val; -} - -static inline void reg_write32(struct xsc_core_device *dev, u32 offset, u32 val) -{ - u32 *ptr = &val; - - if (xsc_core_is_pf(dev)) - writel(val, REG_ADDR(dev, offset)); - else - IA_WRITE(dev, offset, ptr, 1); -} - -#define REG_RD32(dev, offset) reg_read32(dev, offset) -#define REG_WR32(dev, offset, val) reg_write32(dev, offset, val) - static inline unsigned long bdf_to_key(unsigned int domain, unsigned int bus, unsigned int devfn) { return ((unsigned long)domain << 32) | ((bus & 0xff) << 16) | (devfn & 0xff); @@ -1271,6 +1096,21 @@ is_support_pfc_prio_statistic(struct xsc_core_device *dev) return false; } +static inline bool is_dpu_soc_pf(u32 device_id) +{ + return device_id == XSC_MV_SOC_PF_DEV_ID; +} + +static inline bool is_dpu_host_pf(u32 device_id) +{ + return device_id == XSC_MV_HOST_PF_DEV_ID; +} + +static inline bool is_host_pf(struct xsc_core_device *xdev) +{ + return xsc_core_is_pf(xdev) && !is_dpu_soc_pf(xdev->pdev->device); +} + static inline bool is_support_pfc_stall_stats(struct xsc_core_device *dev) { @@ -1288,23 +1128,110 @@ static inline bool is_support_hw_pf_stats(struct xsc_core_device *dev) return xsc_core_is_pf(dev); } +static inline bool +is_support_pf_uc_statistic(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_PF_UC_STATISTIC_SUPPORT) + return true; + + return false; +} + static inline void xsc_set_user_mode(struct xsc_core_device *dev, u8 mode) { dev->user_mode = mode; } +static inline bool xsc_support_hw_feature(struct xsc_core_device *dev, u32 feature) +{ + return dev->caps.hw_feature_flag & feature; +} + static inline u8 xsc_get_user_mode(struct xsc_core_device *dev) { return dev->user_mode; } +#define XSC_ORIGIN_PF_BAR_SIZE (256 * 1024 * 1024) +static inline bool is_pf_bar_compressed(struct xsc_core_device *dev) +{ + return pci_resource_len(dev->pdev, 0) != XSC_ORIGIN_PF_BAR_SIZE; +} + void xsc_pci_exit(void); void xsc_remove_eth_driver(void); - void xsc_remove_rdma_driver(void); +void xsc_init_hal(struct xsc_core_device *xdev, u32 device_id); +void xsc_set_pf_db_addr(struct xsc_core_device *xdev, + u64 tx_db, u64 rx_db, u64 cq_db, u64 cq_reg, u64 eq_db); +void xsc_get_db_addr(struct xsc_core_device *xdev, + u64 *tx_db, u64 *rx_db, u64 *cq_db, u64 *cq_reg, u64 *eq_db); +void xsc_read_reg(struct xsc_core_device *xdev, u32 addr, void *data, int len); +void xsc_write_reg(struct xsc_core_device *xdev, u32 addr, void *data); +void xsc_ia_read(struct xsc_core_device *xdev, u32 addr, void *data, int nr); +void xsc_ia_write(struct xsc_core_device *xdev, u32 addr, void *data, int nr); +void xsc_update_tx_db(struct xsc_core_device *xdev, u32 sqn, u32 next_pid); +void xsc_update_rx_db(struct xsc_core_device *xdev, u32 rqn, u32 next_pid); + +void xsc_arm_cq(struct xsc_core_device *xdev, u32 cqn, u32 next_cid, u8 solicited); +void xsc_update_cq_ci(struct xsc_core_device *xdev, u32 cqn, u32 next_cid); +void xsc_update_eq_ci(struct xsc_core_device *xdev, u32 eqn, u32 next_cid, u8 arm); + +void xsc_update_cmdq_req_pid(struct xsc_core_device *xdev, u32 req_pid); +void xsc_update_cmdq_req_cid(struct xsc_core_device *xdev, u32 req_cid); +void xsc_update_cmdq_rsp_pid(struct xsc_core_device *xdev, u32 rsp_pid); +void xsc_update_cmdq_rsp_cid(struct xsc_core_device *xdev, u32 rsp_cid); +u32 xsc_get_cmdq_req_pid(struct xsc_core_device *xdev); +u32 xsc_get_cmdq_req_cid(struct xsc_core_device *xdev); +u32 xsc_get_cmdq_rsp_pid(struct xsc_core_device *xdev); +u32 xsc_get_cmdq_rsp_cid(struct xsc_core_device *xdev); +u32 xsc_get_cmdq_log_stride(struct xsc_core_device *xdev); +void xsc_set_cmdq_depth(struct xsc_core_device *xdev, u32 depth); +void xsc_set_cmdq_req_buf_addr(struct xsc_core_device *xdev, u32 haddr, u32 laddr); +void xsc_set_cmdq_rsp_buf_addr(struct xsc_core_device *xdev, u32 haddr, u32 laddr); +void xsc_set_cmdq_msix_vector(struct xsc_core_device *xdev, u32 vector); +void xsc_check_cmdq_status(struct xsc_core_device *xdev); +int xsc_handle_cmdq_interrupt(struct xsc_core_device *xdev); +u8 xsc_get_mr_page_mode(struct xsc_core_device *xdev, u8 page_shift); +u32 xsc_mkey_to_idx(struct xsc_core_device *xdev, u32 mkey); +u32 xsc_idx_to_mkey(struct xsc_core_device *xdev, u32 mkey_idx); +void xsc_set_mpt(struct xsc_core_device *xdev, int iae_idx, u32 mtt_base, void *mr_request); +void xsc_clear_mpt(struct xsc_core_device *xdev, int iae_idx, u32 mtt_base, void *mr_request); +void xsc_set_mtt(struct xsc_core_device *xdev, int iae_idx, u32 mtt_base, void *mr_request); +void xsc_set_read_done_msix_vector(struct xsc_core_device *xdev, u32 vector); +int xsc_dma_write_tbl_once(struct xsc_core_device *xdev, u32 data_len, u64 dma_wr_addr, + u32 host_id, u32 func_id, u64 success[2], u32 size); +void xsc_dma_read_tbl(struct xsc_core_device *xdev, u32 host_id, u32 func_id, u64 data_addr, + u32 tbl_id, u32 burst_num, u32 tbl_start_addr); +bool xsc_skb_need_linearize(struct xsc_core_device *xdev, int ds_num); +bool xsc_is_err_cqe(struct xsc_core_device *xdev, void *cqe); +u8 xsc_get_cqe_error_code(struct xsc_core_device *xdev, void *cqe); +u8 xsc_get_cqe_opcode(struct xsc_core_device *xdev, void *cqe); +u16 xsc_get_eth_channel_num(struct xsc_core_device *xdev); +u32 xsc_get_max_mtt_num(struct xsc_core_device *xdev); +u32 xsc_get_max_mpt_num(struct xsc_core_device *xdev); +u32 xsc_get_rdma_stat_mask(struct xsc_core_device *xdev); +u32 xsc_get_eth_stat_mask(struct xsc_core_device *xdev); +void xsc_set_data_seg(struct xsc_core_device *xdev, void *data_seg, u64 addr, u32 key, u32 length); +u8 xsc_get_mad_msg_opcode(struct xsc_core_device *xdev); +u32 xsc_get_max_qp_depth(struct xsc_core_device *xdev); +bool xsc_check_max_qp_depth(struct xsc_core_device *xdev, u32 *wqe_cnt, u32 max_qp_depth); +void xsc_set_mtt_info(struct xsc_core_device *xdev); + void xsc_set_exit_flag(void); bool xsc_get_exit_flag(void); bool exist_incomplete_qp_flush(void); +int xsc_cmd_query_read_flush(struct xsc_core_device *dev); + +int xsc_register_devinfo(struct xsc_core_device *xdev, char *ifname, char *ibdev_name); +void xsc_register_get_mdev_info_func(int (*get_mdev_info)(void *data)); + +typedef void (*get_ibdev_name_func_t)(struct net_device *netdev, char *ibdev_name, int len); +void xsc_register_get_mdev_ibdev_name_func(get_ibdev_name_func_t fn); + #endif /* XSC_CORE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h index 89bc1b1e0221f6cb5f4cfe7aa14da926608c3a68..caa2db387a4c18470f3540cae60d2f0a7ab20299 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h @@ -25,6 +25,19 @@ enum xsc_vlan_rule_type { XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, }; +enum { + XSC_ACTION_NONE = 0, + XSC_ACTION_ADD = 1, + XSC_ACTION_DEL = 2, +}; + +struct xsc_l2_hash_node { + struct hlist_node hlist; + u8 action; + u8 mac_addr[ETH_ALEN]; + u16 pct_prio; +}; + struct xsc_vlan_table { DECLARE_BITMAP(active_cvlans, VLAN_N_VID); DECLARE_BITMAP(active_svlans, VLAN_N_VID); @@ -51,4 +64,4 @@ int xsc_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, int xsc_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); void xsc_set_rx_mode_work(struct work_struct *work); -#endif /* XSC_FS_H */ +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h index 4d00ce5a39b1f74a11c8b71c5084c9f821d6afec..d2ba78caad4a0774cf082e088ab0cc863089cba5 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h @@ -15,8 +15,12 @@ #define PAGE_SIZE_4K (_AC(1, UL) << PAGE_SHIFT_4K) #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) +#ifndef EQ_NUM_MAX #define EQ_NUM_MAX 1024 +#endif +#ifndef EQ_SIZE_MAX #define EQ_SIZE_MAX 1024 +#endif #define XSC_RSS_INDIR_TBL_S 256 #define XSC_MAX_TSO_PAYLOAD 0x10000/*64kb*/ @@ -86,8 +90,9 @@ enum { XSC_OPCODE_RDMA_REQ_ERROR = 8, XSC_OPCODE_RDMA_RSP_ERROR = 9, XSC_OPCODE_RDMA_CQE_ERROR = 10, - XSC_OPCODE_RDMA_MAD_REQ_SEND, - XSC_OPCODE_RDMA_MAD_RSP_RECV, + XSC_OPCODE_RDMA_MAD_REQ_SEND = 11, + XSC_OPCODE_RDMA_MAD_RSP_RECV = 12, + XSC_OPCODE_RDMA_CQE_RAW_SNF = 13, }; enum { @@ -135,6 +140,7 @@ enum { XSC_QUEUE_TYPE_RAW_TPE = 5, XSC_QUEUE_TYPE_RAW_TSO = 6, XSC_QUEUE_TYPE_RAW_TX = 7, + XSC_QUEUE_TYPE_SNIFFER = 8, XSC_QUEUE_TYPE_INVALID = 0xFF, }; @@ -185,13 +191,7 @@ struct regpair { }; struct xsc_cqe { - union { - u8 msg_opcode; - struct { - u8 error_code:7; - u8 is_error:1; - }; - }; + u8 placeholder1; __le32 qp_id:15; u8 rsv1:1; u8 se:1; @@ -204,21 +204,13 @@ struct xsc_cqe { __le32 vni; __le64 ts:48; __le16 wqe_id; - __le16 rsv[3]; + u8 placeholder2; + u8 rsv3; + __le16 rsv[2]; __le16 rsv2:15; u8 owner:1; }; -/* CQ doorbell */ -union xsc_cq_doorbell { - struct{ - u32 cq_next_cid:16; - u32 cq_id:15; - u32 arm:1; - }; - u32 val; -}; - /* EQE TBD */ struct xsc_eqe { u8 type; @@ -231,16 +223,6 @@ struct xsc_eqe { u8 owner:1; }; -/* EQ doorbell */ -union xsc_eq_doorbell { - struct{ - u32 eq_next_cid : 11; - u32 eq_id : 11; - u32 arm : 1; - }; - u32 val; -}; - /*for beryl tcam table .begin*/ #define XSC_TBM_PCT_DW_SIZE_MAX 20 #define XSC_TCAM_REG_ADDR_STRIDE 4 @@ -316,26 +298,6 @@ enum xsc_tbm_pct_inport { #define XSC_SEND_WQE_SIZE BIT(XSC_SEND_WQE_SHIFT) #define XSC_RECV_WQE_SIZE BIT(XSC_RECV_WQE_SHIFT) -union xsc_db_data { - struct { - __le32 sq_next_pid:16; - __le32 sqn:15; - __le32:1; - }; - struct { - __le32 rq_next_pid:13; - __le32 rqn:15; - __le32:4; - }; - struct { - __le32 cq_next_cid:16; - __le32 cqn:15; - __le32 solicited:1; - - }; - __le32 raw_data; -}; - #define XSC_BROADCASTID_MAX 2 #define XSC_TBM_BOMT_DESTINFO_SHIFT (XSC_BROADCASTID_MAX / 2) diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h index a5033d0e042a4510089d82fdcd2531fd7aa32cee..f68b07643bc4daeee0b8d62a2cb5217170e3534c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h @@ -40,6 +40,7 @@ enum { XSC_IOCTL_GET_CMA_PCP = 0x103, XSC_IOCTL_GET_CMA_DSCP = 0x104, XSC_IOCTL_GET_CONTEXT = 0x105, + XSC_IOCTL_GET_DEVINFO = 0x106, XSC_IOCTL_GAT_MAX }; @@ -66,7 +67,15 @@ enum { }; enum { - XSC_IOCTL_OPCODE_ENABLE_USER_MODE = 0x600, + XSC_IOCTL_OPCODE_VF_USER_MODE = 0x600, + XSC_IOCTL_OPCODE_PF_USER_MODE = 0x601, + XSC_IOCTL_OPCODE_BOND_USER_MODE = 0x602, +}; + +enum { + XSC_USER_MODE_FWD_BCAST_PKT_BIT = 0, + XSC_USER_MODE_FWD_LLDP_PKT_BIT, + XSC_USER_MODE_FWD_PKT_NUM, }; enum xsc_flow_tbl_id { @@ -181,6 +190,22 @@ struct xsc_ioctl_mem_info { u64 phy_addr; }; +#define MAX_IFNAME_LEN 31 +struct xsc_devinfo { + u32 domain; + u32 bus; + u32 devfn; + u8 ifname[MAX_IFNAME_LEN + 1]; + u8 ibdev_name[MAX_IFNAME_LEN + 1]; + u32 ip_addr; + u32 vendor_id; +}; + +struct xsc_ioctl_get_devinfo { + u32 dev_num; + struct xsc_devinfo data[]; +}; + /* get phy info */ struct xsc_ioctl_get_phy_info_attr { u16 bdf; @@ -280,6 +305,10 @@ struct xsc_ioctl_set_debug_info { struct xsc_ioctl_user_mode_attr { u8 enable; + u8 mac_bitmap; + u16 pkt_bitmap; + u16 dst_info; + u32 resv0; }; /* type-value */ @@ -296,10 +325,17 @@ struct xsc_ioctl_attr { u16 length; /* data length */ u32 error; /* ioctl error info */ u16 ver; - u16 rsvd; + u8 tunnel_cmd; + u8 rsvd; u8 data[]; /* specific table info */ }; +struct xsc_ioctl_tunnel_hdr { + u32 domain; + u32 bus; + u32 devfn; +}; + struct xsc_ioctl_emu_hdr { u16 in_length; /* cmd req length */ u16 out_length; /* cmd rsp length */ @@ -314,4 +350,4 @@ struct xsc_ioctl_hdr { struct xsc_ioctl_attr attr; }; -#endif /* XSC_IOCTL_H */ +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h index 24aa39a15e9d16dd21df005275364ffe411edb91..8d23760a478c459a79ac5eb60defa607855a5232 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h @@ -69,6 +69,7 @@ struct xsc_lag { u32 board_id; int mode_changes_in_progress; u8 not_roce_lag_xdev_mask; + struct slave_func_data func_data; }; struct xsc_lag_event { @@ -84,6 +85,8 @@ struct xsc_lag_event { enum lag_slave_status slave_status; u8 is_roce_lag_xdev; u8 not_roce_lag_xdev_mask; + struct slave_func_data roce_pf_func_data; + struct slave_func_data func_data[6]; }; struct lag_event_list { @@ -116,6 +119,8 @@ struct xsc_lag *xsc_get_lag(struct xsc_core_device *xdev); struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev); u16 xsc_get_lag_id(struct xsc_core_device *xdev); struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev); +bool xsc_lag_is_kernel(struct xsc_core_device *xdev); +u16 xsc_lag_set_user_mode(struct xsc_core_device *xdev, u8 mode); static inline void xsc_board_lag_lock(struct xsc_core_device *xdev) { diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h index dae4fa46cc270aa6cc6ecc23395f0eea2f64ec40..665103ac4dfa12e4232a1478e22a623b68d44ff0 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h @@ -27,4 +27,4 @@ void xsc_port_ctrl_cb_dereg(const char *name); void xsc_port_ctrl_fini(void); int xsc_port_ctrl_init(void); struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn); -#endif /* XSC_PORT_CTRL_H */ +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h index ed1271bced09cf432750f85303c6650af49295a7..fec39d7137f57cb17fd2eafb248c060fa90caabf 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h @@ -172,4 +172,4 @@ struct epp_pph { #define PPH_CSUM_VAL(base) \ ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_VAL_OFF)) >> \ PPH_CSUM_VAL_SHIFT) & PPH_CSUM_VAL_MASK) -#endif /* XSC_PPH_H */ +#endif /* XSC_TBM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_prgrmmbl_cc_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_prgrmmbl_cc_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..de4d31b5ea385bdc73228e85d01eda91167ac572 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_prgrmmbl_cc_ctrl.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PRGRMMBL_CC_CTRL_H +#define XSC_PRGRMMBL_CC_CTRL_H + +typedef int (*port_prgrmmbl_cc_ctrl_cb)(struct xsc_bdf_file *file, unsigned int cmd, + unsigned long args, void *data); +struct class; + +bool xsc_prgrmmble_cc_ctrl_is_supported(struct xsc_core_device *dev); +int xsc_prgrmmbl_cc_ctrl_cb_init(void); +void xsc_prgrmmbl_cc_ctrl_cb_fini(void); +int xsc_prgrmmbl_cc_ctrl_dev_del(struct xsc_core_device *dev, + struct class *port_ctrl_class, int *dev_id); +int xsc_prgrmmbl_cc_ctrl_dev_add(struct xsc_core_device *dev, + struct class *port_ctrl_class, dev_t dev_id); + +int xsc_prgrmmbl_cc_ctrl_cb_reg(const char *name, port_prgrmmbl_cc_ctrl_cb cb, void *data); +void xsc_prgrmmbl_cc_ctrl_cb_dereg(const char *name); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h index b54b2b9adff6e72fb8fc43e57798f5845e25ad38..eef3ab7fd36510d3194837a642e03085ef070556 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h @@ -5,7 +5,6 @@ #ifndef XSC_REG_H #define XSC_REG_H - #define CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0x0 #define CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0x4 #define CMDQM_HOST_REQ_PID_MEM_ADDR 0x8 @@ -32,4 +31,27 @@ #define DB_CQ_CID_DIRECT_MEM_ADDR 0x8cc #define TX_DB_FUNC_MEM_ADDR 0x8d0 #define RX_DB_FUNC_MEM_ADDR 0x8d4 -#endif /* XSC_REG_H */ + +#define TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0x8d8 +#define DMA_UL_BUSY_REG_ADDR 0x8dc +#define DMA_DL_DONE_REG_ADDR 0x8e0 +#define DMA_DL_SUCCESS_REG_ADDR 0x8e4 +#define ERR_CODE_CLR_REG_ADDR 0x8e8 +#define DMA_RD_TABLE_ID_REG_ADDR 0x8ec +#define DMA_RD_ADDR_REG_ADDR 0x8f0 +#define INDRW_RD_START_REG_ADDR 0x8f4 +#define TBL_DL_BUSY_REG_ADDR 0x8f8 +#define TBL_DL_REQ_REG_ADDR 0x8fc +#define TBL_DL_ADDR_L_REG_ADDR 0x900 +#define TBL_DL_ADDR_H_REG_ADDR 0x904 +#define TBL_DL_START_REG_ADDR 0x908 +#define TBL_UL_REQ_REG_ADDR 0x90c +#define TBL_UL_ADDR_L_REG_ADDR 0x910 +#define TBL_UL_ADDR_H_REG_ADDR 0x914 +#define TBL_UL_START_REG_ADDR 0x918 +#define TBL_MSG_RDY_REG_ADDR 0x91c + +#define CPM_IDA_ADDR_REG_ADDR_NEW 0xa00 +#define CPM_IDA_DATA_MEM_ADDR_NEW 0x2000 + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/main.c b/drivers/net/ethernet/yunsilicon/xsc/net/main.c index 0315fd30698991f77c88177f1400b9902683aa51..5c2560fb50fd89fe1d4075d4d96ebd8e22008267 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/main.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/main.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,8 @@ #include "xsc_eth_dim.h" MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Yunsilicon network adapters ethernet driver"); +MODULE_VERSION(DRIVER_VERSION); #define MAX_VF_NUM_MINIDUMP 1024 @@ -50,6 +53,10 @@ static int xsc_eth_open(struct net_device *netdev); static int xsc_eth_close(struct net_device *netdev); static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc); +#ifdef NEED_CREATE_RX_THREAD +extern uint32_t xsc_eth_rx_thread_create(struct xsc_adapter *adapter); +#endif + static inline void xsc_set_feature(netdev_features_t *features, netdev_features_t feature, bool enable) @@ -124,6 +131,14 @@ static void xsc_eth_init_frags_partition(struct xsc_rq *rq) &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; int f; + if (rq->wqe.info.num_frags == 1 && + frag_info->frag_stride >= XSC_RX_FRAG_SZ) { + *frag = next_frag; + frag->last_in_page = 1; + next_frag.di++; + continue; + } + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { if (next_frag.offset + frag_info[f].frag_stride > XSC_RX_FRAG_SZ) { @@ -163,7 +178,7 @@ static void xsc_eth_free_di_list(struct xsc_rq *rq) kvfree(rq->wqe.di); } -int xsc_rx_alloc_page_cache(struct xsc_rq *rq, int node, u8 log_init_sz) +static int xsc_rx_alloc_page_cache(struct xsc_rq *rq, int node, u8 log_init_sz) { struct xsc_page_cache *cache = &rq->page_cache; @@ -176,7 +191,7 @@ int xsc_rx_alloc_page_cache(struct xsc_rq *rq, int node, u8 log_init_sz) return 0; } -void xsc_rx_free_page_cache(struct xsc_rq *rq) +static void xsc_rx_free_page_cache(struct xsc_rq *rq) { struct xsc_page_cache *cache = &rq->page_cache; u32 i; @@ -194,7 +209,7 @@ int xsc_eth_reset(struct xsc_core_device *dev) return 0; } -void xsc_eth_cq_error_event(struct xsc_core_cq *xcq, enum xsc_event event) +static void xsc_eth_cq_error_event(struct xsc_core_cq *xcq, enum xsc_event event) { struct xsc_cq *xsc_cq = container_of(xcq, struct xsc_cq, xcq); struct xsc_core_device *xdev = xsc_cq->xdev; @@ -208,7 +223,7 @@ void xsc_eth_cq_error_event(struct xsc_core_cq *xcq, enum xsc_event event) xsc_core_err(xdev, "Eth catch CQ ERROR:%x, cqn: %d\n", event, xcq->cqn); } -void xsc_eth_completion_event(struct xsc_core_cq *xcq) +static void xsc_eth_completion_event(struct xsc_core_cq *xcq) { struct xsc_cq *cq = container_of(xcq, struct xsc_cq, xcq); struct xsc_core_device *xdev = cq->xdev; @@ -226,7 +241,7 @@ void xsc_eth_completion_event(struct xsc_core_cq *xcq) cq->channel->stats->poll_tx = 0; if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) - xsc_core_warn(xdev, "ch%d_cq%d, napi_flag=0x%lx\n", + xsc_core_info(xdev, "ch%d_cq%d, napi_flag=0x%lx\n", cq->channel->chl_idx, xcq->cqn, cq->napi->state); napi_schedule(cq->napi); @@ -255,21 +270,27 @@ static inline int xsc_cmd_destroy_cq(struct xsc_core_device *dev, struct xsc_cor return 0; } -int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, - struct xsc_create_cq_mbox_in *in, int insize) +static int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, + struct xsc_create_cq_ex_mbox_in *in, int insize) { int err, ret = -1; struct xsc_cq_table *table = &xdev->dev_res->cq_table; struct xsc_create_cq_mbox_out out; - in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ_EX); ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); - if (ret || out.hdr.status) { + if (ret || (out.hdr.status && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { xsc_core_err(xdev, "failed to create cq, err=%d out.status=%u\n", ret, out.hdr.status); return -ENOEXEC; } + if (out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + ret = xsc_create_cq_compat_handler(xdev, in, &out); + if (ret) + return ret; + } + xcq->cqn = be32_to_cpu(out.cqn) & 0xffffff; xcq->cons_index = 0; xcq->arm_sn = 0; @@ -290,7 +311,7 @@ int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, return ret; } -int xsc_eth_destroy_cq(struct xsc_core_device *xdev, struct xsc_cq *cq) +static int xsc_eth_destroy_cq(struct xsc_core_device *xdev, struct xsc_cq *cq) { struct xsc_cq_table *table = &xdev->dev_res->cq_table; struct xsc_core_cq *tmp; @@ -328,15 +349,15 @@ int xsc_eth_destroy_cq(struct xsc_core_device *xdev, struct xsc_cq *cq) return err; } -void xsc_eth_free_cq(struct xsc_cq *cq) +static void xsc_eth_free_cq(struct xsc_cq *cq) { xsc_eth_wq_destroy(&cq->wq_ctrl); } -int xsc_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, - struct xsc_create_multiqp_mbox_in *in, - int insize, - int *prqn_base) +static int xsc_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, + struct xsc_create_multiqp_mbox_in *in, + int insize, + int *prqn_base) { int ret; struct xsc_create_multiqp_mbox_out out; @@ -354,7 +375,7 @@ int xsc_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, return 0; } -void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) +static void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) { struct xsc_rq *rq; struct xsc_sq *sq; @@ -384,34 +405,8 @@ void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) } } -int xsc_eth_create_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq, - struct xsc_create_qp_mbox_in *in, int insize) -{ - int ret = -1; - struct xsc_create_qp_mbox_out out; - - in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); - ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); - if (ret || out.hdr.status) { - xsc_core_err(xdev, "failed to create rq, err=%d out.status=%u\n", - ret, out.hdr.status); - return -ENOEXEC; - } - - prq->rqn = be32_to_cpu(out.qpn) & 0xffffff; - prq->cqp.event = xsc_eth_qp_event; - prq->cqp.eth_queue_type = XSC_RES_RQ; - - ret = create_resource_common(xdev, &prq->cqp); - if (ret) { - xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", __func__, prq->rqn, ret); - return ret; - } - - return 0; -} -int xsc_eth_destroy_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq) +static int xsc_eth_destroy_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq) { struct xsc_destroy_qp_mbox_in in; struct xsc_destroy_qp_mbox_out out; @@ -468,8 +463,8 @@ static void xsc_free_qp_rq(struct xsc_rq *rq) } } -int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, - struct xsc_create_qp_mbox_in *in, int insize) +static int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, + struct xsc_create_qp_mbox_in *in, int insize) { struct xsc_create_qp_mbox_out out; int ret; @@ -487,7 +482,7 @@ int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, return 0; } -int xsc_eth_modify_qp_sq(struct xsc_core_device *xdev, struct xsc_modify_raw_qp_mbox_in *in) +static int xsc_eth_modify_qp_sq(struct xsc_core_device *xdev, struct xsc_modify_raw_qp_mbox_in *in) { struct xsc_modify_raw_qp_mbox_out out; int ret; @@ -505,7 +500,7 @@ int xsc_eth_modify_qp_sq(struct xsc_core_device *xdev, struct xsc_modify_raw_qp_ return 0; } -int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) +static int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) { struct xsc_destroy_qp_mbox_in in; struct xsc_destroy_qp_mbox_out out; @@ -598,20 +593,56 @@ static int xsc_eth_alloc_cq(struct xsc_channel *c, struct xsc_cq *pcq, return ret; } +#ifdef NEED_CREATE_RX_THREAD +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret = XSCALE_RET_SUCCESS; + struct xsc_create_cq_ex_mbox_in *in; + int inlen; + int hw_npages; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_ex_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + in->ctx_ex.ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx_ex.ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx_ex.ctx.glb_func_id = cpu_to_be16(c->adapter->xdev->glb_func_id); + in->ctx.page_shift = PAGE_SHIFT; + + xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, + &in->pas[0], hw_npages); + + ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + + kfree(in); + xsc_core_info(c->adapter->xdev, "create cqn%d, func_id=%d, ret=%d\n", + pcq->xcq.cqn, c->adapter->xdev->glb_func_id, ret); + return ret; +} +#else static int xsc_eth_set_cq(struct xsc_channel *c, struct xsc_cq *pcq, struct xsc_cq_param *pcq_param) { int ret = XSCALE_RET_SUCCESS; struct xsc_core_device *xdev = c->adapter->xdev; - struct xsc_create_cq_mbox_in *in; + struct xsc_create_cq_ex_mbox_in *in; int inlen; int eqn, irqn; int hw_npages; hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); /*mbox size + pas size*/ - inlen = sizeof(struct xsc_create_cq_mbox_in) + + inlen = sizeof(struct xsc_create_cq_ex_mbox_in) + sizeof(__be64) * hw_npages; in = kvzalloc(inlen, GFP_KERNEL); @@ -623,11 +654,12 @@ static int xsc_eth_set_cq(struct xsc_channel *c, if (ret) goto err; - in->ctx.eqn = eqn; - in->ctx.eqn = cpu_to_be16(in->ctx.eqn); - in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; - in->ctx.pa_num = cpu_to_be16(hw_npages); - in->ctx.glb_func_id = cpu_to_be16(xdev->glb_func_id); + in->ctx_ex.ctx.eqn = eqn; + in->ctx_ex.ctx.eqn = cpu_to_be16(in->ctx_ex.ctx.eqn); + in->ctx_ex.ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx_ex.ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx_ex.ctx.glb_func_id = cpu_to_be16(xdev->glb_func_id); + in->ctx_ex.page_shift = PAGE_SHIFT; xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, &in->pas[0], hw_npages); @@ -643,6 +675,7 @@ static int xsc_eth_set_cq(struct xsc_channel *c, c->chl_idx, pcq->xcq.cqn, eqn, xdev->glb_func_id, ret); return ret; } +#endif static int xsc_eth_open_cq(struct xsc_channel *c, struct xsc_cq *pcq, @@ -697,7 +730,7 @@ static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, return xsc_modify_qp(xdev, &in, &out, qpn, status); } -int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu, u16 rx_buf_sz) +static int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu, u16 rx_buf_sz) { struct xsc_set_mtu_mbox_in in; struct xsc_set_mtu_mbox_out out; @@ -722,7 +755,7 @@ int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu, u16 rx_buf_sz) return ret; } -int xsc_eth_get_mac(struct xsc_core_device *dev, char *mac) +static int xsc_eth_get_mac(struct xsc_core_device *dev, char *mac) { struct xsc_query_eth_mac_mbox_out *out; struct xsc_query_eth_mac_mbox_in in; @@ -752,13 +785,13 @@ int xsc_eth_get_mac(struct xsc_core_device *dev, char *mac) return err; } -int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel *c) +static int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel *c) { int ret = 0; int i; for (i = 0; i < c->qp.rq_num; i++) { - c->qp.rq[i].post_wqes(&c->qp.rq[i]); + c->qp.rq[i].post_wqes(&c->qp.rq[i], true); ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.rq[i].rqn, XSC_CMD_OP_RTR2RTS_QP); if (ret) @@ -774,8 +807,8 @@ int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel * return 0; } -int xsc_eth_modify_qps(struct xsc_adapter *adapter, - struct xsc_eth_channels *chls) +static int xsc_eth_modify_qps(struct xsc_adapter *adapter, + struct xsc_eth_channels *chls) { int ret; int i; @@ -791,20 +824,70 @@ int xsc_eth_modify_qps(struct xsc_adapter *adapter, return 0; } -u32 xsc_rx_get_linear_frag_sz(u32 mtu) +static u32 xsc_rx_get_linear_frag_sz(u32 mtu) { u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); return XSC_SKB_FRAG_SZ(byte_count); } -bool xsc_rx_is_linear_skb(u32 mtu) +static bool xsc_rx_is_linear_skb(u32 mtu) { u32 linear_frag_sz = xsc_rx_get_linear_frag_sz(mtu); return linear_frag_sz <= PAGE_SIZE; } +static bool is_mtu_valid(struct net_device *netdev, int mtu) +{ + if (mtu > netdev->max_mtu || mtu < netdev->min_mtu) { + netdev_err(netdev, "%s: Bad MTU (%d), valid range is: [%d..%d]\n", + __func__, mtu, netdev->min_mtu, netdev->max_mtu); + return false; + } + + return true; +} + +static int xsc_eth_get_mtu(struct xsc_adapter *adapter, u16 *pmtu) +{ + struct xsc_query_mtu_mbox_out *out; + struct xsc_query_mtu_mbox_in in; + int err; + struct xsc_core_device *dev = adapter->xdev; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_MTU); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err || out->hdr.status) { + if (out->hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) + xsc_core_info(dev, "not support get mtu\n"); + else + xsc_core_warn(dev, "get mtu failed! err=%d, out.status=%u\n", + err, out->hdr.status); + err = -ENOEXEC; + goto exit; + } + + if (!is_mtu_valid(adapter->netdev, be16_to_cpu(out->mtu))) { + err = -EINVAL; + goto exit; + } + + *pmtu = be16_to_cpu(out->mtu); + xsc_core_dbg(dev, "get mtu %u\n", *pmtu); + +exit: + kfree(out); + + return err; +} + static int xsc_eth_alloc_rq(struct xsc_channel *c, struct xsc_rq *prq, struct xsc_rq_param *prq_param) @@ -848,15 +931,19 @@ static int xsc_eth_alloc_rq(struct xsc_channel *c, goto err_init_di; prq->buff.map_dir = DMA_FROM_DEVICE; + prq->buff.page_order = prq_param->frags_info.page_order; +#ifdef XSC_PAGE_CACHE cache_init_sz = wq_sz << prq->wqe.info.log_num_frags; + cache_init_sz <<= 2; ret = xsc_rx_alloc_page_cache(prq, cpu_to_node(c->cpu), ilog2(cache_init_sz)); if (ret) goto err_create_pool; +#endif /* Create a page_pool and register it with rxq */ pool_size = wq_sz << prq->wqe.info.log_num_frags; - pagepool_params.order = XSC_RX_FRAG_SZ_ORDER; - pagepool_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pagepool_params.order = prq->buff.page_order; + pagepool_params.flags = 0; pagepool_params.pool_size = pool_size; pagepool_params.nid = cpu_to_node(c->cpu); pagepool_params.dev = c->adapter->dev; @@ -870,11 +957,12 @@ static int xsc_eth_alloc_rq(struct xsc_channel *c, } if (c->chl_idx == 0) - xsc_core_dbg(adapter->xdev, - "page pool: size=%d, cpu=%d, pool_numa=%d, cache_size=%d, mtu=%d, wqe_numa=%d\n", - pool_size, c->cpu, pagepool_params.nid, - cache_init_sz, adapter->nic_param.mtu, - prq_param->wq.buf_numa_node); + xsc_core_info(adapter->xdev, + "page pool: order=%d, size=%d, cpu=%d, pool_numa=%d, cache_size=%d, mtu=%d, wqe_numa=%d\n", + pagepool_params.order, pool_size, c->cpu, + pagepool_params.nid, cache_init_sz, + adapter->nic_param.mtu, + prq_param->wq.buf_numa_node); for (i = 0; i < wq_sz; i++) { struct xsc_eth_rx_wqe_cyc *wqe = @@ -883,15 +971,13 @@ static int xsc_eth_alloc_rq(struct xsc_channel *c, for (f = 0; f < prq->wqe.info.num_frags; f++) { u32 frag_size = prq->wqe.info.arr[f].frag_size; - wqe->data[f].seg_len = cpu_to_le32(frag_size); - wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + xsc_set_data_seg(adapter->xdev, &wqe->data[f], 0, + cpu_to_le32(XSC_INVALID_LKEY), cpu_to_le32(frag_size)); } - for (; f < prq->wqe.info.frags_max_num; f++) { - wqe->data[f].seg_len = 0; - wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); - wqe->data[f].va = 0; - } + for (; f < prq->wqe.info.frags_max_num; f++) + xsc_set_data_seg(adapter->xdev, &wqe->data[f], 0, + cpu_to_le32(XSC_INVALID_LKEY), 0); } prq->post_wqes = xsc_eth_post_rx_wqes; @@ -933,11 +1019,11 @@ static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, struct xsc_create_qp_request *req; u8 q_log_size = prq_param->rq_attr.q_log_size; int paslen = 0; - struct xsc_rq *prq; + struct xsc_rq *prq = NULL; struct xsc_channel *c; int rqn_base; int inlen; - int entry_len; + int entry_len = 0; int i, j, n; int hw_npages; @@ -987,6 +1073,7 @@ static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, req->cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); req->cqn_send = req->cqn_recv; req->glb_funcid = cpu_to_be16(adapter->xdev->glb_func_id); + req->page_shift = PAGE_SHIFT; xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &req->pas[0], hw_npages); n++; @@ -1101,6 +1188,7 @@ static int xsc_eth_open_qp_sq(struct xsc_channel *c, in->req.cqn_send = cpu_to_be16(psq->cq.xcq.cqn); in->req.cqn_recv = in->req.cqn_send; in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + in->req.page_shift = PAGE_SHIFT; xsc_fill_page_frag_array(&psq->wq_ctrl.buf, &in->req.pas[0], hw_npages); @@ -1206,10 +1294,10 @@ static int xsc_eth_close_qp_sq(struct xsc_channel *c, struct xsc_sq *psq) return 0; } -int xsc_eth_open_channel(struct xsc_adapter *adapter, - int idx, - struct xsc_channel *c, - struct xsc_channel_param *chl_param) +static int xsc_eth_open_channel(struct xsc_adapter *adapter, + int idx, + struct xsc_channel *c, + struct xsc_channel_param *chl_param) { int ret = 0; struct net_device *netdev = adapter->netdev; @@ -1307,28 +1395,12 @@ static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) goto out; } - if (byte_count <= DEFAULT_FRAG_SIZE) { - frags_info->arr[0].frag_size = DEFAULT_FRAG_SIZE; - frags_info->arr[0].frag_stride = DEFAULT_FRAG_SIZE; - frags_info->num_frags = 1; - } else if (byte_count <= PAGE_SIZE_4K) { + if (byte_count <= PAGE_SIZE_4K) { frags_info->arr[0].frag_size = PAGE_SIZE_4K; frags_info->arr[0].frag_stride = PAGE_SIZE_4K; frags_info->num_frags = 1; - } else if (byte_count <= (PAGE_SIZE_4K + DEFAULT_FRAG_SIZE)) { - if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { - frags_info->arr[0].frag_size = PAGE_SIZE_4K; - frags_info->arr[0].frag_stride = PAGE_SIZE_4K; - frags_info->arr[1].frag_size = PAGE_SIZE_4K; - frags_info->arr[1].frag_stride = PAGE_SIZE_4K; - frags_info->num_frags = 2; - } else { - frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; - frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; - frags_info->num_frags = 1; - } } else if (byte_count <= 2 * PAGE_SIZE_4K) { - if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K && frags_info->frags_max_num >= 2) { frags_info->arr[0].frag_size = PAGE_SIZE_4K; frags_info->arr[0].frag_stride = PAGE_SIZE_4K; frags_info->arr[1].frag_size = PAGE_SIZE_4K; @@ -1340,8 +1412,9 @@ static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) frags_info->num_frags = 1; } } else { - if (PAGE_SIZE < 4 * PAGE_SIZE_4K) { - frags_info->num_frags = roundup(byte_count, PAGE_SIZE_4K) / PAGE_SIZE_4K; + frags_info->num_frags = roundup(byte_count, PAGE_SIZE_4K) / PAGE_SIZE_4K; + if (PAGE_SIZE < 4 * PAGE_SIZE_4K && + frags_info->frags_max_num >= frags_info->num_frags) { for (i = 0; i < frags_info->num_frags; i++) { frags_info->arr[i].frag_size = PAGE_SIZE_4K; frags_info->arr[i].frag_stride = PAGE_SIZE_4K; @@ -1365,6 +1438,10 @@ static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) frags_info->wqe_bulk_min = frags_info->wqe_bulk; } + if (frags_info->arr[0].frag_size > PAGE_SIZE) + frags_info->page_order = + order_base_2(frags_info->arr[0].frag_size / PAGE_SIZE); + out: frags_info->log_num_frags = order_base_2(frags_info->num_frags); @@ -1375,8 +1452,9 @@ static void xsc_build_rq_frags_info(struct xsc_queue_attr *attr, struct xsc_rq_frags_info *frags_info, struct xsc_eth_params *params) { - params->rq_frags_size = xsc_get_rq_frag_info(frags_info, params->mtu); frags_info->frags_max_num = attr->ele_size / XSC_RECV_WQE_DS; + frags_info->page_order = 0; + params->rq_frags_size = xsc_get_rq_frag_info(frags_info, params->mtu); } static void xsc_eth_build_channel_param(struct xsc_adapter *adapter, @@ -1403,7 +1481,7 @@ static void xsc_eth_build_channel_param(struct xsc_adapter *adapter, &adapter->nic_param); } -int xsc_eth_open_channels(struct xsc_adapter *adapter) +static int xsc_eth_open_channels(struct xsc_adapter *adapter) { int ret = 0; int i; @@ -1510,13 +1588,13 @@ static void xsc_deactivate_rq(struct xsc_channel *c) clear_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); } -void xsc_eth_activate_channel(struct xsc_channel *c) +static void xsc_eth_activate_channel(struct xsc_channel *c) { xsc_eth_activate_txqsq(c); xsc_activate_rq(c); } -void xsc_eth_deactivate_channel(struct xsc_channel *c) +static void xsc_eth_deactivate_channel(struct xsc_channel *c) { xsc_deactivate_rq(c); xsc_eth_deactivate_txqsq(c); @@ -1562,7 +1640,7 @@ static void xsc_eth_build_tx2sq_maps(struct xsc_adapter *adapter) } } -void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) +static void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) { int num_txqs; struct net_device *netdev = adapter->netdev; @@ -1577,7 +1655,7 @@ void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); } -void xsc_eth_deactivate_priv_channels(struct xsc_adapter *adapter) +static void xsc_eth_deactivate_priv_channels(struct xsc_adapter *adapter) { netif_tx_disable(adapter->netdev); xsc_eth_deactivate_channels(&adapter->channels); @@ -1633,6 +1711,8 @@ static void xsc_eth_close_channels(struct xsc_adapter *adapter) static void xsc_eth_sw_deinit(struct xsc_adapter *adapter) { + xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_DROP)); + xsc_eth_deactivate_priv_channels(adapter); return xsc_eth_close_channels(adapter); @@ -1646,6 +1726,7 @@ int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter) struct xsc_event_set_led_status_mbox_out out; /*query linkstatus cmd*/ + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_LED_STATUS); in.port_id = id; @@ -1679,6 +1760,7 @@ int xsc_eth_get_link_info(struct xsc_adapter *adapter, struct xsc_event_query_linkinfo_mbox_out out; int i, err; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_LINK_INFO); err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); @@ -1708,6 +1790,7 @@ int xsc_eth_set_link_info(struct xsc_adapter *adapter, struct xsc_event_modify_linkinfo_mbox_out out; int err = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_LINK_INFO); memcpy(&in.ctx, plinkinfo, sizeof(*plinkinfo)); @@ -1735,7 +1818,7 @@ int xsc_get_link_speed(struct xsc_core_device *dev) } EXPORT_SYMBOL(xsc_get_link_speed); -int xsc_eth_change_link_status(struct xsc_adapter *adapter) +static int xsc_eth_change_link_status(struct xsc_adapter *adapter) { bool link_up; @@ -1752,59 +1835,6 @@ int xsc_eth_change_link_status(struct xsc_adapter *adapter) return 0; } -static void xsc_eth_event_work(struct work_struct *work) -{ - int err; - struct xsc_event_query_type_mbox_in in; - struct xsc_event_query_type_mbox_out out; - struct xsc_adapter *adapter = container_of(work, struct xsc_adapter, event_work); - - if (adapter->status != XSCALE_ETH_DRIVER_OK) - return; - - /*query cmd_type cmd*/ - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EVENT_TYPE); - - err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); - if (err || out.hdr.status) { - xsc_core_err(adapter->xdev, "failed to query event type, err=%d, stats=%d\n", - err, out.hdr.status); - goto failed; - } - - switch (out.ctx.resp_cmd_type) { - case XSC_CMD_EVENT_RESP_CHANGE_LINK: - err = xsc_eth_change_link_status(adapter); - if (err) { - xsc_core_err(adapter->xdev, "failed to change linkstatus, err=%d\n", err); - goto failed; - } - - xsc_core_dbg(adapter->xdev, "event cmdtype=%04x\n", out.ctx.resp_cmd_type); - break; - case XSC_CMD_EVENT_RESP_TEMP_WARN: - xsc_core_warn(adapter->xdev, "[Minor]nic chip temperature high warning\n"); - break; - case XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION: - xsc_core_warn(adapter->xdev, "[Critical]nic chip was over-temperature\n"); - break; - default: - xsc_core_info(adapter->xdev, "unknown event cmdtype=%04x\n", - out.ctx.resp_cmd_type); - break; - } - -failed: - return; -} - -void xsc_eth_event_handler(void *arg) -{ - struct xsc_adapter *adapter = (struct xsc_adapter *)arg; - - queue_work(adapter->workq, &adapter->event_work); -} - int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) { struct xsc_core_device *xdev = adapter->xdev; @@ -1818,6 +1848,7 @@ int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) if (xsc_get_user_mode(xdev)) return 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_NIC_HCA); in.rss.rss_en = 1; @@ -1836,9 +1867,15 @@ int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) caps |= BIT(XSC_TBM_CAP_PP_BYPASS); caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); - if (xsc_get_pct_drop_config(xdev) && !(netdev->flags & IFF_SLAVE)) - caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); - caps_mask |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + if (xsc_get_pf_isolate_config(xdev, true)) { + caps |= BIT(XSC_TBM_CAP_PF_ISOLATE_CONFIG); + caps_mask |= BIT(XSC_TBM_CAP_PF_ISOLATE_CONFIG); + } + + if (xsc_get_mac_drop_config(xdev, true)) { + caps |= BIT(XSC_TBM_CAP_MAC_DROP_CONFIG); + caps_mask |= BIT(XSC_TBM_CAP_MAC_DROP_CONFIG); + } memcpy(in.nic.mac_addr, netdev->dev_addr, ETH_ALEN); @@ -1856,16 +1893,9 @@ int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) return 0; } -int xsc_eth_restore_nic_hca(struct xsc_core_device *dev) -{ - return xsc_eth_enable_nic_hca((struct xsc_adapter *)dev->eth_priv); -} -EXPORT_SYMBOL(xsc_eth_restore_nic_hca); - -int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) +static int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) { struct xsc_core_device *xdev = adapter->xdev; - struct net_device *netdev = adapter->netdev; struct xsc_cmd_disable_nic_hca_mbox_in in = {}; struct xsc_cmd_disable_nic_hca_mbox_out out = {}; int err; @@ -1874,13 +1904,17 @@ int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) if (xsc_get_user_mode(xdev)) return 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_NIC_HCA); - if (xsc_get_pp_bypass_res(adapter->xdev, false)) + if (xsc_get_pp_bypass_res(xdev, false)) caps |= BIT(XSC_TBM_CAP_PP_BYPASS); - if (xsc_get_pct_drop_config(xdev) && !(netdev->priv_flags & IFF_BONDING)) - caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + if (xsc_get_pf_isolate_config(xdev, false)) + caps |= BIT(XSC_TBM_CAP_PF_ISOLATE_CONFIG); + + if (xsc_get_mac_drop_config(adapter->xdev, false)) + caps |= BIT(XSC_TBM_CAP_MAC_DROP_CONFIG); in.nic.caps = cpu_to_be16(caps); err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); @@ -1892,7 +1926,7 @@ int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) return 0; } -void xsc_eth_rss_params_change(struct xsc_adapter *adapter, u32 change, void *modify) +static void xsc_eth_rss_params_change(struct xsc_adapter *adapter, u32 change, void *modify) { struct xsc_core_device *xdev = adapter->xdev; struct xsc_rss_params *rss = &adapter->rss_params; @@ -1955,6 +1989,7 @@ int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 flags) struct xsc_cmd_modify_nic_hca_mbox_out out = {}; int err = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); xsc_eth_rss_params_change(adapter, flags, &in); @@ -1970,10 +2005,63 @@ int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 flags) return 0; } +int xsc_eth_query_pkt_dst_info(struct xsc_adapter *adapter, u8 mac_bitmap, + u16 pkt_bitmap, u16 *dst_info) +{ + struct xsc_cmd_query_pkt_dst_info_mbox_in in; + struct xsc_cmd_query_pkt_dst_info_mbox_out out; + int i, ret = 0; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_PKT_DST_INFO); + in.mac_bitmap = mac_bitmap; + in.pkt_bitmap = cpu_to_be16(pkt_bitmap); + + ret = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + if (out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) + return -EOPNOTSUPP; + xsc_core_err(adapter->xdev, + "failed to query pkt dst info, pkt=0x%x, mac=0x%x,err=%d\n", + pkt_bitmap, mac_bitmap, out.hdr.status); + return -ENOEXEC; + } + + for (i = 0; i < XSC_USER_MODE_FWD_PKT_NUM; i++) + dst_info[i] = be16_to_cpu(out.dst_info[i]); + + return ret; +} + +int xsc_eth_modify_pkt_dst_info(struct xsc_adapter *adapter, u8 mac_bitmap, + u16 pkt_bitmap, u16 dst_info) +{ + struct xsc_cmd_modify_pkt_dst_info_mbox_in in; + struct xsc_cmd_modify_pkt_dst_info_mbox_out out; + int ret = 0; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_PKT_DST_INFO); + in.mac_bitmap = mac_bitmap; + in.pkt_bitmap = cpu_to_be16(pkt_bitmap); + in.dst_info = cpu_to_be16(dst_info); + + ret = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + if (out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) + return -EOPNOTSUPP; + xsc_core_err(adapter->xdev, + "failed to modify pkt dst info, pkt=0x%x, mac=0x%x, dst_info=%d, err=%d\n", + pkt_bitmap, mac_bitmap, dst_info, out.hdr.status); + ret = -ENOEXEC; + } + + return ret; +} + static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, struct xsc_eth_params *params) { -#ifdef MSIX_SUPPORT struct xsc_core_device *xdev = priv->xdev; int num_comp_vectors, irq; @@ -1984,7 +2072,6 @@ static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, mask_cpu_by_node(xdev->priv.numa_node, xdev->xps_cpumask); netif_set_xps_queue(priv->netdev, xdev->xps_cpumask, irq); } -#endif } static int xsc_set_port_admin_status(struct xsc_adapter *adapter, @@ -1997,19 +2084,39 @@ static int xsc_set_port_admin_status(struct xsc_adapter *adapter, if (!xsc_core_is_pf(adapter->xdev)) return 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_PORT_ADMIN_STATUS); in.admin_status = cpu_to_be16(status); ret = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); if (ret || out.hdr.status) { - xsc_core_err(adapter->xdev, "failed to set port admin status, err=%d, status=%d\n", - ret, out.hdr.status); + if (out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) + return -EOPNOTSUPP; + xsc_core_err(adapter->xdev, "failed to set port admin status(%d), err=%d, status=%d\n", + status, ret, out.hdr.status); return -ENOEXEC; } return ret; } +static void xsc_link_event_handler(void *arg) +{ + struct xsc_core_device *dev = arg; + struct xsc_adapter *adapter = dev->eth_priv; + int err = 0; + + if (!adapter) + return; + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + return; + + err = xsc_eth_change_link_status(adapter); + if (err) + xsc_core_err(adapter->xdev, "failed to change linkstatus, err=%d\n", err); +} + int xsc_eth_open(struct net_device *netdev) { struct xsc_adapter *adapter = netdev_priv(netdev); @@ -2039,9 +2146,15 @@ int xsc_eth_open(struct net_device *netdev) if (ret) goto sw_deinit; - /*INIT_WORK*/ - INIT_WORK(&adapter->event_work, xsc_eth_event_work); - xdev->event_handler = xsc_eth_event_handler; +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) { + xsc_core_warn(xdev, "xsc_eth_rx_thread_create failed, err=%d\n", ret); + goto sw_deinit; + } +#endif + + xdev->link_event_handler = xsc_link_event_handler; if (xsc_eth_get_link_status(adapter)) { netdev_info(netdev, "Link up\n"); @@ -2088,6 +2201,11 @@ int xsc_eth_close(struct net_device *netdev) adapter->status = XSCALE_ETH_DRIVER_CLOSE; +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif + netif_carrier_off(adapter->netdev); xsc_eth_sw_deinit(adapter); @@ -2152,7 +2270,9 @@ static int xsc_update_netdev_queues(struct xsc_adapter *priv) int num_txqs, num_rxqs, nch, ntc; int old_num_txqs, old_ntc; int err; +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES bool disabling; +#endif old_num_txqs = netdev->real_num_tx_queues; old_ntc = netdev->num_tc ? : 1; @@ -2162,7 +2282,9 @@ static int xsc_update_netdev_queues(struct xsc_adapter *priv) num_txqs = nch * ntc; num_rxqs = nch;// * priv->profile->rq_groups; +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES disabling = num_txqs < netdev->real_num_tx_queues; +#endif xsc_netdev_set_tcs(priv, nch, ntc); @@ -2181,8 +2303,10 @@ static int xsc_update_netdev_queues(struct xsc_adapter *priv) goto err_txqs; } +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES if (disabling) synchronize_net(); +#endif return 0; @@ -2199,8 +2323,8 @@ static int xsc_update_netdev_queues(struct xsc_adapter *priv) return err; } -void xsc_build_default_indir_rqt(u32 *indirection_rqt, int len, - int num_channels) +static void xsc_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) { int i; @@ -2242,6 +2366,10 @@ int xsc_safe_switch_channels(struct xsc_adapter *adapter, carrier_ok = netif_carrier_ok(netdev); netif_carrier_off(netdev); +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif ret = xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_DROP)); if (ret) goto close_channels; @@ -2270,6 +2398,12 @@ int xsc_safe_switch_channels(struct xsc_adapter *adapter, if (ret) goto close_channels; +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) + goto close_channels; +#endif + adapter->status = XSCALE_ETH_DRIVER_OK; goto out; @@ -2287,7 +2421,7 @@ int xsc_safe_switch_channels(struct xsc_adapter *adapter, return ret; } -int xsc_eth_nic_mtu_changed(struct xsc_adapter *priv) +static int xsc_eth_nic_mtu_changed(struct xsc_adapter *priv) { u32 new_mtu = priv->nic_param.mtu; int ret; @@ -2303,22 +2437,10 @@ static int xsc_eth_change_mtu(struct net_device *netdev, int new_mtu) struct xsc_adapter *adapter = netdev_priv(netdev); int old_mtu = netdev->mtu; int ret = 0; - int max_buf_len = 0; - if (new_mtu > netdev->max_mtu || new_mtu < netdev->min_mtu) { - netdev_err(netdev, "%s: Bad MTU (%d), valid range is: [%d..%d]\n", - __func__, new_mtu, netdev->min_mtu, netdev->max_mtu); + if (!is_mtu_valid(netdev, new_mtu)) return -EINVAL; - } - if (!xsc_rx_is_linear_skb(new_mtu)) { - max_buf_len = adapter->xdev->caps.recv_ds_num * PAGE_SIZE; - if (new_mtu > max_buf_len) { - netdev_err(netdev, "Bad MTU (%d), max buf len is %d\n", - new_mtu, max_buf_len); - return -EINVAL; - } - } mutex_lock(&adapter->state_lock); adapter->nic_param.mtu = new_mtu; if (adapter->status != XSCALE_ETH_DRIVER_OK) { @@ -2357,7 +2479,7 @@ static void xsc_set_rx_mode(struct net_device *dev) queue_work(priv->workq, &priv->set_rx_mode_work); } -int xsc_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +static int xsc_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct xsc_adapter *adapter = netdev_priv(netdev); struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; @@ -2425,8 +2547,8 @@ static int xsc_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, return 0; } -int xsc_get_vf_config(struct net_device *dev, - int vf, struct ifla_vf_info *ivi) +static int xsc_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) { struct xsc_adapter *adapter = netdev_priv(dev); struct xsc_core_device *xdev = adapter->xdev; @@ -2442,8 +2564,8 @@ int xsc_get_vf_config(struct net_device *dev, return err; } -int xsc_set_vf_link_state(struct net_device *dev, int vf, - int link_state) +static int xsc_set_vf_link_state(struct net_device *dev, int vf, + int link_state) { struct xsc_adapter *adapter = netdev_priv(dev); struct xsc_core_device *xdev = adapter->xdev; @@ -2452,7 +2574,7 @@ int xsc_set_vf_link_state(struct net_device *dev, int vf, return xsc_eswitch_set_vport_state(esw, vf + 1, link_state); } -int set_feature_rxcsum(struct net_device *netdev, bool enable) +static int set_feature_rxcsum(struct net_device *netdev, bool enable) { struct xsc_adapter *adapter = netdev_priv(netdev); struct xsc_core_device *xdev = adapter->xdev; @@ -2460,6 +2582,7 @@ int set_feature_rxcsum(struct net_device *netdev, bool enable) struct xsc_cmd_modify_nic_hca_mbox_out out = {}; int err; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); in.nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_HASH_PPH)); in.nic.caps = cpu_to_be16(enable << XSC_TBM_CAP_HASH_PPH); @@ -2474,7 +2597,7 @@ int set_feature_rxcsum(struct net_device *netdev, bool enable) return 0; } -int set_feature_vlan_offload(struct net_device *netdev, bool enable) +static int set_feature_vlan_offload(struct net_device *netdev, bool enable) { int err = 0, i; struct xsc_adapter *adapter = netdev_priv(netdev); @@ -2526,7 +2649,7 @@ static int xsc_handle_feature(struct net_device *netdev, return 0; } -int xsc_eth_set_features(struct net_device *netdev, netdev_features_t features) +static int xsc_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t oper_features = netdev->features; int err = 0; @@ -2548,15 +2671,27 @@ int xsc_eth_set_features(struct net_device *netdev, netdev_features_t features) static netdev_features_t xsc_fix_features(struct net_device *netdev, netdev_features_t features) { + struct xsc_adapter *adapter = netdev_priv(netdev); + if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX)) features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX; + + if ((features & NETIF_F_TSO) && + xsc_support_hw_feature(adapter->xdev, XSC_HW_OFFLOAD_UNSUPPORT)) + features &= ~NETIF_F_TSO; + + if ((features & NETIF_F_TSO6) && + xsc_support_hw_feature(adapter->xdev, XSC_HW_OFFLOAD_UNSUPPORT)) + features &= ~NETIF_F_TSO6; + return features; } -u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) +static u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { - int txq_ix, up = 0; + int txq_ix = 0; + int up = 0; u16 num_channels; struct xsc_adapter *adapter = netdev_priv(dev); @@ -2571,10 +2706,14 @@ u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, if (skb_vlan_tag_present(skb)) { up = skb_vlan_tag_get_prio(skb); - if (adapter->nic_param.num_tc > 1) - up = up % (adapter->nic_param.num_tc - 1) + 1; - else + if (adapter->nic_param.num_tc > 1) { + if (is_dpu_soc_pf(adapter->xdev->pdev->device)) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = up % adapter->nic_param.num_tc; + } else { up = 0; + } } /* channel_ix can be larger than num_channels since @@ -2646,6 +2785,11 @@ static int xsc_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int return 0; } +static void xsc_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + netdev_err(dev, "TX timeout detected\n"); +} + static const struct net_device_ops xsc_netdev_ops = { .ndo_open = xsc_eth_open, .ndo_stop = xsc_eth_close, @@ -2656,7 +2800,7 @@ static const struct net_device_ops xsc_netdev_ops = { .ndo_set_mac_address = xsc_eth_set_mac, .ndo_change_mtu = xsc_eth_change_mtu, - .ndo_tx_timeout = NULL, + .ndo_tx_timeout = xsc_tx_timeout, .ndo_set_tx_maxrate = NULL, .ndo_vlan_rx_add_vid = xsc_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = xsc_vlan_rx_kill_vid, @@ -2671,7 +2815,7 @@ static const struct net_device_ops xsc_netdev_ops = { .ndo_set_vf_link_state = xsc_set_vf_link_state, .ndo_get_stats64 = xsc_get_stats, .ndo_setup_tc = NULL, - .ndo_set_features = xsc_eth_set_features, + .ndo_set_features = xsc_set_features, .ndo_fix_features = xsc_fix_features, .ndo_fdb_add = NULL, .ndo_bridge_setlink = NULL, @@ -2688,8 +2832,11 @@ static const struct net_device_ops xsc_netdev_ops = { static int xsc_get_max_num_channels(struct xsc_core_device *xdev) { - return min_t(int, xdev->dev_res->eq_table.num_comp_vectors, - XSC_ETH_MAX_NUM_CHANNELS); +#ifdef NEED_CREATE_RX_THREAD + return 8; +#else + return min_t(int, xsc_get_eth_channel_num(xdev), XSC_ETH_MAX_NUM_CHANNELS); +#endif } static int xsc_eth_netdev_init(struct xsc_adapter *adapter) @@ -2755,12 +2902,7 @@ static const struct xsc_tirc_config tirc_default_config[XSC_NUM_INDIR_TIRS] = { }, }; -struct xsc_tirc_config xsc_tirc_get_default_config(enum xsc_traffic_types tt) -{ - return tirc_default_config[tt]; -} - -void xsc_build_rss_params(struct xsc_rss_params *rss_params, u16 num_channels) +static void xsc_build_rss_params(struct xsc_rss_params *rss_params, u16 num_channels) { enum xsc_traffic_types tt; @@ -2778,7 +2920,7 @@ void xsc_build_rss_params(struct xsc_rss_params *rss_params, u16 num_channels) rss_params->rss_hash_tmpl = XSC_HASH_IP_PORTS | XSC_HASH_IP6_PORTS; } -void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_num) +static void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_num) { struct xsc_core_device *xdev = adapter->xdev; struct xsc_eth_params *params = &adapter->nic_param; @@ -2786,7 +2928,7 @@ void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_nu params->mtu = SW_DEFAULT_MTU; params->num_tc = tc_num; - params->comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + params->comp_vectors = ch_num; params->max_num_ch = ch_num; params->num_channels = ch_num; @@ -2806,7 +2948,7 @@ void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_nu params->max_num_ch, params->num_tc); } -void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) +static void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct xsc_core_device *xdev = adapter->xdev; @@ -2815,7 +2957,9 @@ void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; netdev->netdev_ops = &xsc_netdev_ops; +#ifdef CONFIG_XSC_CORE_EN_DCB netdev->dcbnl_ops = &xsc_dcbnl_ops; +#endif eth_set_ethtool_ops(netdev); netdev->min_mtu = SW_MIN_MTU; @@ -2823,25 +2967,35 @@ void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) /*mtu - macheaderlen - ipheaderlen should be aligned in 8B*/ netdev->mtu = SW_DEFAULT_MTU; - netdev->vlan_features |= NETIF_F_SG; - netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;//NETIF_F_HW_CSUM; - netdev->vlan_features |= NETIF_F_GRO; - netdev->vlan_features |= NETIF_F_TSO;//NETIF_F_TSO_ECN - netdev->vlan_features |= NETIF_F_TSO6; - //todo: enable rx csum - netdev->vlan_features |= NETIF_F_RXCSUM; - netdev->vlan_features |= NETIF_F_RXHASH; - netdev->vlan_features |= NETIF_F_GSO_PARTIAL; + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_GRO | + NETIF_F_GSO_PARTIAL; + + if (!xsc_support_hw_feature(xdev, XSC_HW_OFFLOAD_UNSUPPORT)) { + netdev->vlan_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_RXHASH | + NETIF_F_TSO | + NETIF_F_TSO6; + } netdev->hw_features = netdev->vlan_features; - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; - netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + if (!xsc_support_hw_feature(xdev, XSC_HW_OFFLOAD_UNSUPPORT)) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } if (xsc_vxlan_allowed(xdev) || xsc_geneve_tx_allowed(xdev) || xsc_any_tunnel_proto_supported(xdev)) { - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - netdev->hw_enc_features |= NETIF_F_TSO; //NETIF_F_TSO_ECN - netdev->hw_enc_features |= NETIF_F_TSO6; + if (!xsc_support_hw_feature(xdev, XSC_HW_OFFLOAD_UNSUPPORT)) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + } netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; } @@ -2871,16 +3025,11 @@ static void xsc_eth_nic_cleanup(struct xsc_adapter *adapter) kfree(adapter->txq2sq); } -/* create xdev resource,pd/domain/mkey */ -int xsc_eth_create_xdev_resources(struct xsc_core_device *xdev) -{ - return 0; -} - static int xsc_eth_init_nic_tx(struct xsc_adapter *adapter) { - /*create tis table*/ +#ifdef CONFIG_XSC_CORE_EN_DCB xsc_dcbnl_initialize(adapter); +#endif return 0; } @@ -2890,14 +3039,8 @@ static int xsc_eth_cleanup_nic_tx(struct xsc_adapter *adapter) return 0; } -/* init tx: create hw resource, set register according to spec */ -int xsc_eth_init_nic_rx(struct xsc_adapter *adapter) +static int xsc_eth_init_nic_rx(struct xsc_adapter *adapter) { - /* create rqt and tir table - * tir table:base on traffic type like ip4_tcp/ipv6_tcp/ - * each rqt table for a traffic type - */ - return 0; } @@ -2917,7 +3060,7 @@ static void xsc_eth_l2_addr_init(struct xsc_adapter *adapter) xsc_core_warn(adapter->xdev, "get mac failed %d, generate random mac...", ret); eth_random_addr(mac); } - memcpy(netdev->dev_addr, mac, 6); + ether_addr_copy(netdev->dev_addr, mac); if (!is_valid_ether_addr(netdev->perm_addr)) memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); @@ -2926,15 +3069,23 @@ static void xsc_eth_l2_addr_init(struct xsc_adapter *adapter) static int xsc_eth_nic_enable(struct xsc_adapter *adapter) { struct xsc_core_device *xdev = adapter->xdev; + u16 cfg_mtu; + int ret; if (xsc_core_is_pf(xdev)) xsc_lag_add_netdev(adapter->netdev); xsc_eth_l2_addr_init(adapter); + ret = xsc_eth_get_mtu(adapter, &cfg_mtu); + if (ret == 0) + adapter->nic_param.mtu = cfg_mtu; + xsc_eth_set_hw_mtu(xdev, XSC_SW2HW_MTU(adapter->nic_param.mtu), XSC_SW2HW_RX_PKT_LEN(adapter->nic_param.mtu)); +#ifdef CONFIG_XSC_CORE_EN_DCB xsc_dcbnl_init_app(adapter); +#endif rtnl_lock(); netif_device_attach(adapter->netdev); @@ -2994,10 +3145,6 @@ static int xsc_eth_attach(struct xsc_core_device *xdev, struct xsc_adapter *adap if (netif_device_present(adapter->netdev)) return 0; - err = xsc_eth_create_xdev_resources(xdev); - if (err) - return err; - err = xsc_attach_netdev(adapter); if (err) return err; @@ -3014,6 +3161,131 @@ static void xsc_eth_detach(struct xsc_core_device *xdev, struct xsc_adapter *ada xsc_detach_netdev(adapter); } +static inline void _xsc_get_ifname(struct net_device *netdev, u8 *ifname, int len) +{ + memcpy(ifname, netdev->name, len); +} + +static void xsc_get_ifname(void *dev, u8 *ifname, int len) +{ + struct xsc_adapter *adapter = + (struct xsc_adapter *)((struct xsc_core_device *)dev)->eth_priv; + struct net_device *netdev = adapter->netdev; + + _xsc_get_ifname(netdev, ifname, len); +} + +static void _xsc_get_ip_addr(struct net_device *netdev, u32 *ip_addr) +{ + struct in_device *in_dev; + struct in_ifaddr *ifa; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(netdev); + if (!in_dev) { + *ip_addr = 0; + goto out; + } + + ifa = in_dev->ifa_list; + if (!ifa) { + *ip_addr = 0; + goto out; + } + + *ip_addr = ntohl(ifa->ifa_address); + +out: + rcu_read_unlock(); +} + +static void xsc_get_ip_addr(void *dev, u32 *ip_addr) +{ + struct xsc_adapter *adapter = + (struct xsc_adapter *)((struct xsc_core_device *)dev)->eth_priv; + struct net_device *netdev = adapter->netdev; + + _xsc_get_ip_addr(netdev, ip_addr); +} + +static get_ibdev_name_func_t _xsc_get_mdev_ibdev_name; +void xsc_register_get_mdev_ibdev_name_func(get_ibdev_name_func_t fn) +{ + _xsc_get_mdev_ibdev_name = fn; +} +EXPORT_SYMBOL(xsc_register_get_mdev_ibdev_name_func); + +static int xsc_get_mdev_info(void *data) +{ + struct xsc_devinfo *devinfo = data; + struct net *net; + struct net_device *ndev; + struct pci_dev *pdev; + int count = 0; + + rcu_read_lock(); + down_read(&net_rwsem); + for_each_net(net) { + for_each_netdev(net, ndev) { + if (!ndev->dev.parent) + continue; + pdev = to_pci_dev(ndev->dev.parent); + if (pdev->vendor != PCI_VENDOR_ID_MELLANOX) + continue; + + devinfo->domain = cpu_to_be32(pci_domain_nr(pdev->bus)); + devinfo->bus = cpu_to_be32(pdev->bus->number); + devinfo->devfn = cpu_to_be32(pdev->devfn); + _xsc_get_ifname(ndev, devinfo->ifname, MAX_IFNAME_LEN); + _xsc_get_ip_addr(ndev, &devinfo->ip_addr); + devinfo->ip_addr = cpu_to_be32(devinfo->ip_addr); + devinfo->vendor_id = cpu_to_be32(PCI_VENDOR_ID_MELLANOX); + if (_xsc_get_mdev_ibdev_name) + _xsc_get_mdev_ibdev_name(ndev, devinfo->ibdev_name, MAX_IFNAME_LEN); + devinfo++; + count++; + } + } + up_read(&net_rwsem); + rcu_read_unlock(); + + return count; +} + +static void xsc_create_netlink_socket(struct xsc_core_device *xdev) +{ + int ret; + struct socket *sock; + struct file *file; + + ret = sock_create_kern(&init_net, AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE, &sock); + if (ret) { + xsc_core_err(xdev, "failed to create kernel netlink socket, err = %d\n", ret); + return; + } + + file = sock_alloc_file(sock, O_RDWR, 0); + if (!file) { + xsc_core_err(xdev, "failed to alloc file for netlink socket\n"); + sock_release(sock); + return; + } + xdev->sock = sock; +} + +static void xsc_bind_netlink_socket(struct xsc_core_device *xdev) +{ + struct sockaddr_nl addr; + int ret; + + memset(&addr, 0, sizeof(addr)); + addr.nl_family = AF_NETLINK; + + ret = kernel_bind(xdev->sock, (struct sockaddr *)&addr, sizeof(addr)); + if (ret) + xsc_core_err(xdev, "failed to bind kernel netlink socket, err = %d\n", ret); +} + static void *xsc_eth_add(struct xsc_core_device *xdev) { int err = -1; @@ -3042,6 +3314,8 @@ static void *xsc_eth_add(struct xsc_core_device *xdev) adapter->dev = &adapter->pdev->dev; adapter->xdev = (void *)xdev; xdev->eth_priv = adapter; + xdev->get_ifname = xsc_get_ifname; + xdev->get_ip_addr = xsc_get_ip_addr; err = xsc_eth_nic_init(adapter, rep_priv, num_chl, num_tc); if (err) { @@ -3072,6 +3346,12 @@ static void *xsc_eth_add(struct xsc_core_device *xdev) xdev->netdev = (void *)netdev; adapter->status = XSCALE_ETH_DRIVER_INIT; + if (is_dpu_host_pf(xdev->pdev->device)) { + xsc_create_netlink_socket(xdev); + xsc_bind_netlink_socket(xdev); + xdev->handle_netlink_cmd = xsc_handle_netlink_cmd; + xsc_register_get_mdev_info_func(xsc_get_mdev_info); + } return adapter; @@ -3096,6 +3376,9 @@ static void xsc_eth_remove(struct xsc_core_device *xdev, void *context) if (!xdev) return; + if (is_dpu_host_pf(xdev->pdev->device)) + sock_release(xdev->sock); + adapter = xdev->eth_priv; if (!adapter) { xsc_core_warn(xdev, "failed! adapter is null\n"); @@ -3126,7 +3409,7 @@ static struct xsc_interface xsc_interface = { .protocol = XSC_INTERFACE_PROTOCOL_ETH, }; -int xsc_net_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +static int xsc_net_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) { pr_info("xsc net driver recv %lu event\n", action); if (xsc_get_exit_flag()) diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c index 814a438dff81736a98b8af3e81d4447aed1c7cd6..eb507dd9c6f527dc60154070d4fb28f29d61c02c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c @@ -27,3 +27,98 @@ #include "xsc_eth_txrx.h" #include "xsc_eth_stats.h" #include "xsc_eth_debug.h" + +#ifdef NEED_CREATE_RX_THREAD + +extern void xsc_cq_notify_hw(struct xsc_cq *cq); + +DEFINE_PER_CPU(bool, txcqe_get); +EXPORT_PER_CPU_SYMBOL(txcqe_get); + +u32 xsc_eth_process_napi(struct xsc_adapter *adapter) +{ + int work_done = 0; + bool err = false; + int budget = 1; + int i, chl; + int errtx = false; + struct xsc_channel *c; + struct xsc_rq *prq; + struct xsc_ch_stats *ch_stats; + + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + for (chl = 0; chl < adapter->channels.num_chl; chl++) { + c = &adapter->channels.c[chl]; + prq = &c->qp.rq[0]; + ch_stats = c->stats; + ch_stats->poll++; + + for (i = 0; i < c->num_tc; i++) { + errtx |= xsc_poll_tx_cq(&c->qp.sq[i].cq, budget); + ETH_DEBUG_LOG("errtx=%u.\r\n", errtx); + if (likely(__this_cpu_read(txcqe_get))) { + xsc_cq_notify_hw(&c->qp.sq[i].cq); + __this_cpu_write(txcqe_get, false); + } + } + + work_done = xsc_poll_rx_cq(&prq->cq, budget); + + ETH_DEBUG_LOG("work_done=%d.\r\n", work_done); + + if (work_done != 0) { + xsc_cq_notify_hw(&prq->cq); + err |= prq->post_wqes(prq, false); + + ETH_DEBUG_LOG("err=%u.\r\n", err); + } else { + ETH_DEBUG_LOG("no-load.\r\n"); + } + + ch_stats->arm++; + } + } + + return XSCALE_RET_SUCCESS; +} + +int xsc_eth_rx_thread(void *arg) +{ + u32 ret = XSCALE_RET_SUCCESS; + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + while (kthread_should_stop() == 0) { + if (need_resched()) + schedule(); + ret = xsc_eth_process_napi(adapter); + if (ret != XSCALE_RET_SUCCESS) + ETH_DEBUG_LOG("unexpected branch.\r\n"); + + ETH_DEBUG_LOG("adapter=%p\r\n", adapter); + } + ETH_DEBUG_LOG("do_exit.\r\n"); + + return XSCALE_RET_SUCCESS; +} + +u32 g_thread_count; +u32 xsc_eth_rx_thread_create(struct xsc_adapter *adapter) +{ + struct task_struct *task = NULL; + + task = kthread_create(xsc_eth_rx_thread, (void *)adapter, + "xsc_rx%i", g_thread_count); + if (!task) + return XSCALE_RET_ERROR; + + ETH_DEBUG_LOG("thread_count=%d\r\n", g_thread_count); + + kthread_bind(task, g_thread_count); + wake_up_process(task); + adapter->task = task; + + g_thread_count++; + + return XSCALE_RET_SUCCESS; +} +#endif /* NEED_CREATE_RX_THREAD */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h index 08c0ac07d6da0f3b3083e1986f03a1a070ec1e73..1378be66b6156f6e6e16df29e5ffcdaf4a20a7b4 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h @@ -39,4 +39,5 @@ static inline bool xsc_any_tunnel_proto_supported(struct xsc_core_device *dev) { return false; } + #endif /* XSC_ACCEL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c index d784ba3c05e6452ed98be9520cbe3c9840a5bc0e..40b8a41713dd951e9f43c2ac331a57850f636eb9 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c @@ -14,6 +14,10 @@ #include "xsc_eth_debug.h" #include "xsc_hw_comm.h" +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + #define XSC_100MB (100000) #define XSC_1GB (1000000) #define XSC_RATE_LIMIT_BASE (16000) @@ -33,6 +37,7 @@ enum { XSC_LOWEST_PRIO_GROUP = 0, }; +#ifdef CONFIG_XSC_CORE_EN_DCB static int xsc_set_trust_state(struct xsc_adapter *priv, u8 trust_state); static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio); static u8 xsc_dcbnl_setall(struct net_device *netdev); @@ -298,17 +303,17 @@ static int xsc_set_port_pfc(struct xsc_core_device *xdev, u8 pfcbitmap) { u8 i; u8 pfc_en[IEEE_8021QAZ_MAX_TCS] = {0}; - struct xsc_pfc_set req; - struct xsc_pfc_set rsp; + struct xsc_pfc_set_new req; + struct xsc_pfc_set_new rsp; xsc_pfc_bitmap2array(pfcbitmap, pfc_en); memset(&req, 0, sizeof(struct xsc_pfc_set)); for (i = 0; i <= xsc_max_tc(xdev); i++) { req.pfc_on = pfc_en[i]; - req.priority = i; + req.req_prio = i; xsc_core_dbg(xdev, "%s: prio %d, pfc %d\n", __func__, i, req.pfc_on); - xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC, &req, &rsp); + xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC_NEW, &req, &rsp); } return 0; } @@ -1474,3 +1479,4 @@ void xsc_dcbnl_initialize(struct xsc_adapter *priv) xsc_ets_init(priv); } } +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h index 8b11a2b285c335d6521c6bb112711b38d6dacec2..a1d9bb0dfb9f0a3152fd452f2e2dfddf01109583 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h @@ -17,7 +17,9 @@ #define XSCALE_ETH_PHYPORT_DOWN 0 #define XSCALE_ETH_PHYPORT_UP 1 +#ifdef CONFIG_DCB #define CONFIG_XSC_CORE_EN_DCB 1 +#endif #define XSC_PAGE_CACHE 1 #define XSCALE_DRIVER_NAME "xsc_eth" @@ -135,7 +137,6 @@ struct xsc_adapter { struct workqueue_struct *workq; struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; - struct work_struct event_work; struct xsc_eth_channels channels; struct xsc_sq **txq2sq; @@ -161,7 +162,11 @@ struct xsc_rx_buffer { dma_addr_t dma; u32 len; struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) u32 page_offset; +#else + u16 page_offset; +#endif u16 pagecnt_bias; }; @@ -171,7 +176,11 @@ struct xsc_tx_buffer { dma_addr_t dma; u32 len; struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) u32 page_offset; +#else + u16 page_offset; +#endif u16 pagecnt_bias; }; @@ -180,6 +189,11 @@ struct xsc_tx_wqe { struct xsc_wqe_data_seg data[]; }; +struct xsc_user_mode_attr { + u16 pkt_bitmap; + u16 dst_info[8]; +}; + typedef int (*xsc_eth_fp_preactivate)(struct xsc_adapter *priv); typedef int (*xsc_eth_fp_postactivate)(struct xsc_adapter *priv); @@ -193,8 +207,13 @@ int xsc_eth_get_link_info(struct xsc_adapter *adapter, struct xsc_event_linkinfo *plinkinfo); int xsc_eth_set_link_info(struct xsc_adapter *adapter, struct xsc_event_linkinfo *plinkinfo); - int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter); +int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter); +int xsc_eth_query_pkt_dst_info(struct xsc_adapter *adapter, u8 mac_bitmap, + u16 pkt_bitmap, u16 *dst_info); +int xsc_eth_modify_pkt_dst_info(struct xsc_adapter *adapter, u8 mac_bitmap, + u16 pkt_bitmap, u16 dst_info); + /* Use this function to get max num channels after netdev was created */ static inline int xsc_get_netdev_max_channels(struct xsc_adapter *adapter) @@ -210,9 +229,11 @@ static inline int xsc_get_netdev_max_tc(struct xsc_adapter *adapter) return adapter->nic_param.num_tc; } +#ifdef CONFIG_XSC_CORE_EN_DCB extern const struct dcbnl_rtnl_ops xsc_dcbnl_ops; int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets); void xsc_dcbnl_initialize(struct xsc_adapter *priv); void xsc_dcbnl_init_app(struct xsc_adapter *priv); void xsc_dcbnl_delete_app(struct xsc_adapter *priv); +#endif #endif /* XSC_ETH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h index 9b465c9fcd6987d33b374fca023bd6dde2d4bb1b..db24669e4928e8b6fdceadb7b97c19f6baa17240 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h @@ -11,14 +11,16 @@ #include "common/xsc_pph.h" #include "common/xsc_hsi.h" -#define SW_MIN_MTU 64 +#define SW_MIN_MTU ETH_MIN_MTU #define SW_DEFAULT_MTU 1500 #define SW_MAX_MTU 9600 #define XSC_ETH_HW_MTU_SEND 9800 /*need to obtain from hardware*/ #define XSC_ETH_HW_MTU_RECV 9800 /*need to obtain from hardware*/ #define XSC_SW2HW_MTU(mtu) ((mtu) + 14 + 4) -#define XSC_SW2HW_FRAG_SIZE(mtu) ((mtu) + 14 + 8 + 4 + XSC_PPH_HEAD_LEN) +#define XSC_SW2HW_HLEN (14 + 8 + 4 + XSC_PPH_HEAD_LEN) +#define XSC_SW2HW_FRAG_SIZE(mtu) ((mtu) + XSC_SW2HW_HLEN) +#define XSC_HW2SW_MTU_SIZE(buf) ((buf) - XSC_SW2HW_HLEN) #define XSC_SW2HW_RX_PKT_LEN(mtu) ((mtu) + 14 + 256) #define XSC_RX_MAX_HEAD (256) @@ -127,7 +129,11 @@ struct xsc_eth_qp_attr { }; struct xsc_eth_rx_wqe_cyc { +#ifdef DECLARE_FLEX_ARRAY DECLARE_FLEX_ARRAY(struct xsc_wqe_data_seg, data); +#else + struct xsc_wqe_data_seg data[0]; +#endif }; struct xsc_eq_param { @@ -269,20 +275,4 @@ struct xsc_eth_redirect_rqt_param { }; }; -union xsc_send_doorbell { - struct{ - s32 next_pid : 16; - u32 qp_num : 15; - }; - u32 send_data; -}; - -union xsc_recv_doorbell { - struct{ - s32 next_pid : 13; - u32 qp_num : 15; - }; - u32 recv_data; -}; - #endif /* XSC_ETH_COMMON_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h index 46c44685fb419917b8bf113dfd80f0f66db37a8b..5e34982faa46aece80d0052c50956b059e3badef 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h @@ -7,4 +7,5 @@ #define XSC_ETH_COMPAT_H #define xsc_netdev_xmit_more(skb) netdev_xmit_more() -#endif /* XSC_ETH_COMPAT_H */ + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c index 8215d776df75fb3eb25dccb54ec69105bb249902..4a255b3d5d8cdd8a11a1e0862ee7417b620bcbca 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c @@ -7,11 +7,18 @@ #include #include #include +#include +#include +#include #include "common/xsc_core.h" #include "common/xsc_ioctl.h" #include "common/xsc_hsi.h" #include "common/xsc_port_ctrl.h" +#include "common/tunnel_cmd.h" #include "xsc_hw_comm.h" +#include "common/res_obj.h" +#include "xsc_eth.h" +#include "xsc_eth_ctrl.h" #define XSC_ETH_CTRL_NAME "eth_ctrl" @@ -52,6 +59,275 @@ static void decode_rlimit_get(void *data) resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); } +static void encode_roce_accl_set(void *data, u32 mac_port) +{ + struct xsc_roce_accl_set *req = + (struct xsc_roce_accl_set *)data; + + req->flag = __cpu_to_be32(req->flag); + req->sr_timeout = __cpu_to_be64(req->sr_timeout); + req->sr_count = __cpu_to_be16(req->sr_count); + req->sr_drop_limit = __cpu_to_be16(req->sr_drop_limit); + req->ndp_dst_port = __cpu_to_be16(req->ndp_dst_port); + + req->cont_sport_start = __cpu_to_be16(req->cont_sport_start); + req->max_num_exponent = __cpu_to_be16(req->max_num_exponent); + req->disturb_period = __cpu_to_be16(req->disturb_period); + req->disturb_th = __cpu_to_be16(req->disturb_th); + req->mac_port = mac_port; +} + +static void decode_roce_accl_get(void *data) +{ + struct xsc_roce_accl_get *resp = + (struct xsc_roce_accl_get *)data; + + resp->sr_timeout = __be64_to_cpu(resp->sr_timeout); + resp->sr_count = __be16_to_cpu(resp->sr_count); + resp->sr_drop_limit = __be16_to_cpu(resp->sr_drop_limit); + resp->ndp_dst_port = __be16_to_cpu(resp->ndp_dst_port); + + resp->cont_sport_start = __be16_to_cpu(resp->cont_sport_start); + resp->max_num_exponent = __be16_to_cpu(resp->max_num_exponent); + resp->disturb_period = __be16_to_cpu(resp->disturb_period); + resp->disturb_th = __be16_to_cpu(resp->disturb_th); +} + +static void encode_roce_accl_get(void *data, u32 mac_port) +{ + u8 *resp = (u8 *)data; + + *resp = mac_port; +} + +static void encode_roce_accl_disc_sport_set(void *data, u32 mac_port) +{ + int i; + struct xsc_roce_accl_disc_sport *req = + (struct xsc_roce_accl_disc_sport *)data; + + for (i = 0; i < req->discrete_sports_num; i++) + req->discrete_sports[i] = __cpu_to_be16(req->discrete_sports[i]); + + req->discrete_sports_num = __cpu_to_be32(req->discrete_sports_num); + req->mac_port = mac_port; +} + +static void decode_roce_accl_disc_sport_get(void *data) +{ + int i; + struct xsc_roce_accl_disc_sport *resp = + (struct xsc_roce_accl_disc_sport *)data; + + resp->discrete_sports_num = __be32_to_cpu(resp->discrete_sports_num); + + if (resp->discrete_sports_num > XSC_DISCRETE_SPORT_NUM_MAX) { + pr_err("sports_num:%u, out of range\n", resp->discrete_sports_num); + return; + } + + for (i = 0; i < resp->discrete_sports_num; i++) + resp->discrete_sports[i] = __be16_to_cpu(resp->discrete_sports[i]); +} + +static void encode_perf_rate_measure(void *data, u32 mac_port) +{ + struct xsc_perf_rate_measure *rate_m = (struct xsc_perf_rate_measure *)data; + int i; + + rate_m->qp_num = __cpu_to_be32(rate_m->qp_num); + rate_m->hw_ts = __cpu_to_be32(rate_m->hw_ts); + for (i = 0; i < XSC_QP_MEASURE_QP_NUM_MAX; i++) { + rate_m->qp_id_list[i] = __cpu_to_be32(rate_m->qp_id_list[i]); + rate_m->qp_byte_cnt[i] = __cpu_to_be32(rate_m->qp_byte_cnt[i]); + } +} + +static void decode_perf_rate_measure(void *data) +{ + struct xsc_perf_rate_measure *rate_m = (struct xsc_perf_rate_measure *)data; + int i; + + rate_m->qp_num = __be32_to_cpu(rate_m->qp_num); + rate_m->hw_ts = __be32_to_cpu(rate_m->hw_ts); + for (i = 0; i < XSC_QP_MEASURE_QP_NUM_MAX; i++) { + rate_m->qp_id_list[i] = __be32_to_cpu(rate_m->qp_id_list[i]); + rate_m->qp_byte_cnt[i] = __be32_to_cpu(rate_m->qp_byte_cnt[i]); + } +} + +static void encode_roce_accl_next_set(void *data, u32 mac_port) +{ + struct xsc_roce_accl_next_set *req = + (struct xsc_roce_accl_next_set *)data; + int i; + + req->flag = __cpu_to_be64(req->flag); + req->sack_threshold = __cpu_to_be32(req->sack_threshold); + req->sack_timeout = __cpu_to_be32(req->sack_timeout); + req->ack_aggregation_mode = __cpu_to_be32(req->ack_aggregation_mode); + req->ack_aggregation_req_threshold = __cpu_to_be32(req->ack_aggregation_req_threshold); + req->ack_aggregation_rsp_window = __cpu_to_be32(req->ack_aggregation_rsp_window); + req->ack_aggregation_rsp_timeout = __cpu_to_be32(req->ack_aggregation_rsp_timeout); + req->path_num = __cpu_to_be32(req->path_num); + req->packet_spray_mode = __cpu_to_be32(req->packet_spray_mode); + req->qp_id = __cpu_to_be32(req->qp_id); + req->path_udp_sport_num = __cpu_to_be32(req->path_udp_sport_num); + for (i = 0; i < ROCE_ACCL_NEXT_PATH_UDP_SPORT_NUM_MAX; i++) + req->path_udp_sport[i] = __cpu_to_be32(req->path_udp_sport[i]); +} + +static void decode_roce_accl_next_get_sport(void *data) +{ + struct xsc_roce_accl_next_set *resp = + (struct xsc_roce_accl_next_set *)data; + int i; + + resp->sack_threshold = __be32_to_cpu(resp->sack_threshold); + resp->sack_timeout = __be32_to_cpu(resp->sack_timeout); + resp->ack_aggregation_mode = __be32_to_cpu(resp->ack_aggregation_mode); + resp->ack_aggregation_req_threshold = __be32_to_cpu(resp->ack_aggregation_req_threshold); + resp->ack_aggregation_rsp_window = __be32_to_cpu(resp->ack_aggregation_rsp_window); + resp->ack_aggregation_rsp_timeout = __be32_to_cpu(resp->ack_aggregation_rsp_timeout); + resp->path_num = __be32_to_cpu(resp->path_num); + resp->packet_spray_mode = __be32_to_cpu(resp->packet_spray_mode); + resp->qp_id = __be32_to_cpu(resp->qp_id); + resp->path_udp_sport_num = __be32_to_cpu(resp->path_udp_sport_num); + for (i = 0; i < ROCE_ACCL_NEXT_PATH_UDP_SPORT_NUM_MAX; i++) + resp->path_udp_sport[i] = __be32_to_cpu(resp->path_udp_sport[i]); +} + +static void decode_roce_accl_next_get(void *data) +{ + struct xsc_roce_accl_next_get *resp = + (struct xsc_roce_accl_next_get *)data; + + resp->sack_threshold = __be32_to_cpu(resp->sack_threshold); + resp->sack_timeout = __be32_to_cpu(resp->sack_timeout); + resp->ack_aggregation_mode = __be32_to_cpu(resp->ack_aggregation_mode); + resp->ack_aggregation_req_threshold = __be32_to_cpu(resp->ack_aggregation_req_threshold); + resp->ack_aggregation_rsp_window = __be32_to_cpu(resp->ack_aggregation_rsp_window); + resp->ack_aggregation_rsp_timeout = __be32_to_cpu(resp->ack_aggregation_rsp_timeout); + resp->path_num = __be32_to_cpu(resp->path_num); + resp->packet_spray_mode = __be32_to_cpu(resp->packet_spray_mode); +} + +static void encode_flexcc_next_set(void *data, u32 mac_port) +{ + struct yun_cc_next_cmd_hdr *req = + (struct yun_cc_next_cmd_hdr *)data; + u32 tmp; + + switch (req->cmd) { + case YUN_CC_CMD_SET_SP_TH: + ((struct yun_cc_next_sp_th *)req->data)->threshold = + cpu_to_be32(((struct yun_cc_next_sp_th *)req->data)->threshold); + break; + case YUN_CC_CMD_SET_RTT_INTERVAL_INBAND: + tmp = ((struct yun_cc_next_rtt_interval_inband *)req->data)->interval; + ((struct yun_cc_next_rtt_interval_inband *)req->data)->interval = + cpu_to_be32(tmp); + break; + case YUN_CC_CMD_SET_RTT_INTERVAL_OUTBAND: + tmp = ((struct yun_cc_next_rtt_interval_outband *)req->data)->interval; + ((struct yun_cc_next_rtt_interval_outband *)req->data)->interval = + cpu_to_be32(tmp); + break; + case YUN_CC_CMD_SET_BYTE_RST_INTERVAL: + tmp = ((struct yun_cc_next_byte_rst_interval *)req->data)->interval; + ((struct yun_cc_next_byte_rst_interval *)req->data)->interval = + cpu_to_be32(tmp); + break; + case YUN_CC_CMD_SET_BWU_INTERVAL: + tmp = ((struct yun_cc_next_bwu_interval *)req->data)->interval; + ((struct yun_cc_next_bwu_interval *)req->data)->interval = + cpu_to_be32(tmp); + break; + case YUN_CC_CMD_SET_CSP_DSCP: + ((struct yun_cc_next_csp_dscp *)req->data)->dscp = + cpu_to_be32(((struct yun_cc_next_csp_dscp *)req->data)->dscp); + break; + case YUN_CC_CMD_SET_RTT_DSCP_OUTBAND: + tmp = ((struct yun_cc_next_rtt_dscp_outband *)req->data)->dscp; + ((struct yun_cc_next_rtt_dscp_outband *)req->data)->dscp = + cpu_to_be32(tmp); + break; + case YUN_CC_CMD_SET_CSP_ECN_AGGREGATION: + ((struct yun_cc_csp_ecn_aggregation *)req->data)->agg = + cpu_to_be32(((struct yun_cc_csp_ecn_aggregation *)req->data)->agg); + break; + case YUN_CC_CMD_SET_CC_ALG: + ((struct yun_cc_next_cc_alg *)req->data)->user_alg_en = + cpu_to_be32(((struct yun_cc_next_cc_alg *)req->data)->user_alg_en); + ((struct yun_cc_next_cc_alg *)req->data)->slot_mask = + cpu_to_be32(((struct yun_cc_next_cc_alg *)req->data)->slot_mask); + ((struct yun_cc_next_cc_alg *)req->data)->slot = + cpu_to_be32(((struct yun_cc_next_cc_alg *)req->data)->slot); + break; + case YUN_CC_CMD_SET_ENABLE: + ((struct yun_cc_enable *)req->data)->en = + cpu_to_be32(((struct yun_cc_enable *)req->data)->en); + break; + case YUN_CC_CMD_SET_CE_PROC_INTERVAL: + ((struct yun_cc_next_ce_proc_interval *)req->data)->interval = + cpu_to_be32(((struct yun_cc_next_ce_proc_interval *)req->data)->interval); + break; + } +} + +static void decode_flexcc_next_get(void *data) +{ + struct yun_cc_next_get_all *resp = + (struct yun_cc_next_get_all *)data; + + resp->sp_threshold = __be32_to_cpu(resp->sp_threshold); + resp->rtt_interval_inband = __be32_to_cpu(resp->rtt_interval_inband); + resp->rtt_interval_outband = __be32_to_cpu(resp->rtt_interval_outband); + resp->byte_rst_interval = __be32_to_cpu(resp->byte_rst_interval); + resp->bwu_interval = __be32_to_cpu(resp->bwu_interval); + resp->csp_dscp = __be32_to_cpu(resp->csp_dscp); + resp->rtt_dscp_outband = __be32_to_cpu(resp->rtt_dscp_outband); + resp->csp_ecn_aggregation = __be32_to_cpu(resp->csp_ecn_aggregation); + resp->enable = __be32_to_cpu(resp->enable); + resp->ce_proc_interval = __be32_to_cpu(resp->ce_proc_interval); + resp->cc_alg = __be32_to_cpu(resp->cc_alg); + resp->cc_alg_mask = __be32_to_cpu(resp->cc_alg_mask); +} + +static void decode_flexcc_next_get_stat(void *data) +{ + struct yun_cc_next_get_all_stat *resp = + (struct yun_cc_next_get_all_stat *)data; + + resp->evt_sp_deliverd = __be32_to_cpu(resp->evt_sp_deliverd); + resp->evt_ce_deliverd = __be32_to_cpu(resp->evt_ce_deliverd); + resp->evt_rtt_req_deliverd = __be32_to_cpu(resp->evt_rtt_req_deliverd); + resp->evt_rtt_rsp_deliverd = __be32_to_cpu(resp->evt_rtt_rsp_deliverd); + resp->evt_rto_deliverd = __be32_to_cpu(resp->evt_rto_deliverd); + resp->evt_sack_deliverd = __be32_to_cpu(resp->evt_sack_deliverd); + resp->evt_byte_deliverd = __be32_to_cpu(resp->evt_byte_deliverd); + resp->evt_time_deliverd = __be32_to_cpu(resp->evt_time_deliverd); + resp->evt_bwu_deliverd = __be32_to_cpu(resp->evt_bwu_deliverd); + resp->evt_sp_aggregated = __be32_to_cpu(resp->evt_sp_aggregated); + resp->evt_ce_aggregated = __be32_to_cpu(resp->evt_ce_aggregated); + resp->evt_rtt_req_aggregated = __be32_to_cpu(resp->evt_rtt_req_aggregated); + resp->evt_rtt_rsp_aggregated = __be32_to_cpu(resp->evt_rtt_rsp_aggregated); + resp->evt_rto_aggregated = __be32_to_cpu(resp->evt_rto_aggregated); + resp->evt_sack_aggregated = __be32_to_cpu(resp->evt_sack_aggregated); + resp->evt_byte_aggregated = __be32_to_cpu(resp->evt_byte_aggregated); + resp->evt_time_aggregated = __be32_to_cpu(resp->evt_time_aggregated); + resp->evt_bwu_aggregated = __be32_to_cpu(resp->evt_bwu_aggregated); + resp->evt_sp_dropped = __be32_to_cpu(resp->evt_sp_dropped); + resp->evt_ce_dropped = __be32_to_cpu(resp->evt_ce_dropped); + resp->evt_rtt_req_dropped = __be32_to_cpu(resp->evt_rtt_req_dropped); + resp->evt_rtt_rsp_dropped = __be32_to_cpu(resp->evt_rtt_rsp_dropped); + resp->evt_rto_dropped = __be32_to_cpu(resp->evt_rto_dropped); + resp->evt_sack_dropped = __be32_to_cpu(resp->evt_sack_dropped); + resp->evt_byte_dropped = __be32_to_cpu(resp->evt_byte_dropped); + resp->evt_time_dropped = __be32_to_cpu(resp->evt_time_dropped); + resp->evt_bwu_dropped = __be32_to_cpu(resp->evt_bwu_dropped); +} + static int xsc_get_port_pfc(struct xsc_core_device *xdev, u8 *pfc, u8 pfc_size) { int err = 0; @@ -346,7 +622,108 @@ static int handle_pfc_cfg(struct xsc_core_device *xdev, return err; } -static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, +static void xsc_get_pfc_cfg_status(struct xsc_core_device *xdev, + u8 mac_port, u8 *status, u8 *comp, + u8 tunnel_cmd, struct xsc_ioctl_tunnel_hdr *tunnel_hdr) +{ + struct xsc_get_pfc_cfg_status_mbox_in req; + struct xsc_get_pfc_cfg_status_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_get_pfc_cfg_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_get_pfc_cfg_status_mbox_out)); + + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS_NEW); + req.mac_port = mac_port; + + if (tunnel_cmd) + xsc_tunnel_cmd_exec(xdev, + &req, sizeof(struct xsc_get_pfc_cfg_status_mbox_in), + &rsp, sizeof(struct xsc_get_pfc_cfg_status_mbox_out), + tunnel_hdr); + else + xsc_cmd_exec(xdev, + &req, sizeof(struct xsc_get_pfc_cfg_status_mbox_in), + &rsp, sizeof(struct xsc_get_pfc_cfg_status_mbox_out)); + *status = rsp.status; + *comp = rsp.comp; +} + +static int handle_pfc_cfg_new(struct xsc_core_device *xdev, + struct xsc_qos_mbox_in *in, int in_size, + struct xsc_qos_mbox_out *out, int out_size, + u8 tunnel_cmd, struct xsc_ioctl_tunnel_hdr *tunnel_hdr) +{ + const struct xsc_pfc_set_new *req = (struct xsc_pfc_set_new *)in->data; + u8 mac_port = in->req_prfx.mac_port; + int err = 0; + u8 status = SET_PFC_STATUS_MAX, comp = SET_PFC_COMP_MAX; + u32 timeout_cnt = 0; + + if (req->req_prio < 0 || req->req_prio > PFC_PRIO_MAX) { + xsc_core_err(xdev, + "PFC cfg fail, req invalid req_prio: %d\n", + req->req_prio); + out->hdr.status = EINVAL; + + return -EINVAL; + } + if (tunnel_cmd) + err = xsc_tunnel_cmd_exec(xdev, in, in_size, out, out_size, tunnel_hdr); + else + err = xsc_cmd_exec(xdev, in, in_size, out, out_size); + if (out->hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + xsc_core_dbg(xdev, + "PFC cfg not support, status: %d\n", + out->hdr.status); + return err; + } else if (out->hdr.status == 0) { + xsc_core_dbg(xdev, + "PFC cfg not required\n"); + return 0; + } else if (out->hdr.status == EAGAIN) { + xsc_core_dbg(xdev, + "Try agine\n"); + return err; + } else if (out->hdr.status == EINPROGRESS) { + xsc_core_dbg(xdev, "PFC cfg in process\n"); + } + + timeout_cnt = 0; + msleep(PFC_CFG_CHECK_SLEEP_TIME_MS); + while (timeout_cnt < PFC_CFG_CHECK_TIMEOUT_CNT) { + xsc_get_pfc_cfg_status(xdev, mac_port, &status, &comp, tunnel_cmd, tunnel_hdr); + if (status == SET_PFC_STATUS_INIT && + comp == SET_PFC_COMP_TIMEOUT) { + err = -ETIMEDOUT; + out->hdr.status = ETIMEDOUT; + xsc_core_dbg(xdev, + "PFC cfg timeout, rsp hdr status: %d\n", + out->hdr.status); + break; + } else if (status == SET_PFC_STATUS_INIT && + comp == SET_PFC_COMP_SUCCESS) { + err = 0; + out->hdr.status = 0; + xsc_core_dbg(xdev, "PFC cfg success"); + break; + } else if (status == SET_PFC_STATUS_IN_PROCESS) { + timeout_cnt++; + msleep(PFC_CFG_CHECK_SLEEP_TIME_MS); + } + } + + if (timeout_cnt == PFC_CFG_CHECK_TIMEOUT_CNT) { + err = -ETIMEDOUT; + out->hdr.status = ETIMEDOUT; + xsc_core_dbg(xdev, + "PFC cfg timeout, rsp hdr status: %d\n", + out->hdr.status); + } + + return err; +} + +static int _eth_ctrl_ioctl_pfc(struct xsc_core_device *xdev, struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, u16 expect_req_size, @@ -358,6 +735,10 @@ static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, struct xsc_qos_mbox_out *out; u16 user_size; int err; + struct xsc_ioctl_tunnel_hdr tunnel_hdr = {0}; + + if (hdr->attr.tunnel_cmd) + hdr->attr.length -= sizeof(tunnel_hdr); user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; if (hdr->attr.length != user_size) @@ -370,12 +751,21 @@ static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, if (!out) goto err_out; - err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); - if (err) - goto err; + if (hdr->attr.tunnel_cmd) { + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + goto err; + err = copy_from_user(&in->data, user_hdr->attr.data + sizeof(tunnel_hdr), + expect_req_size); + if (err) + goto err; + } else { + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + } in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); - in->hdr.ver = cpu_to_be16(hdr->attr.ver); in->req_prfx.mac_port = xdev->mac_port; if (encode) @@ -384,6 +774,83 @@ static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, if (hdr->attr.opcode == XSC_CMD_OP_IOCTL_SET_PFC) err = handle_pfc_cfg(xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); + else + err = handle_pfc_cfg_new(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size, + hdr->attr.tunnel_cmd, &tunnel_hdr); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_qos_mbox_in *in; + struct xsc_qos_mbox_out *out; + u16 user_size; + int err; + struct xsc_ioctl_tunnel_hdr tunnel_hdr = {0}; + + if (hdr->attr.tunnel_cmd) + hdr->attr.length -= sizeof(tunnel_hdr); + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + if (hdr->attr.tunnel_cmd) { + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + goto err; + err = copy_from_user(&in->data, user_hdr->attr.data + sizeof(tunnel_hdr), + expect_req_size); + if (err) + goto err; + } else { + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + } + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->req_prfx.mac_port = xdev->mac_port; + + if (encode) + encode((void *)in->data, xdev->mac_port); + + if (hdr->attr.tunnel_cmd) + err = xsc_tunnel_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size, &tunnel_hdr); else err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, sizeof(*out) + expect_resp_size); @@ -421,6 +888,88 @@ static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, struct xsc_hwc_mbox_out *out; u16 user_size; int err; + struct xsc_ioctl_tunnel_hdr tunnel_hdr; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.tunnel_cmd) + hdr->attr.length -= sizeof(tunnel_hdr); + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + if (hdr->attr.tunnel_cmd) { + err = copy_from_user(&tunnel_hdr, user_hdr->attr.data, sizeof(tunnel_hdr)); + if (err) + goto err; + err = copy_from_user(&in->data, user_hdr->attr.data + sizeof(tunnel_hdr), + expect_req_size); + if (err) + goto err; + } else { + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + } + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + if (hdr->attr.tunnel_cmd) + err = xsc_tunnel_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size, &tunnel_hdr); + else + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + + if (err) + goto err; + + if (out->hdr.status) + xsc_core_info(xdev, "hwconfig, rsp hdr status: %d\n", + out->hdr.status); + + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int _eth_ctrl_ioctl_roce_accl(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_roce_accl_mbox_in *in; + struct xsc_roce_accl_mbox_out *out; + u16 user_size; + int err; user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; if (hdr->attr.length != user_size) @@ -466,6 +1015,239 @@ static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, return -EFAULT; } +static int _eth_ctrl_ioctl_rate_measure(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_perf_mbox_in *in; + struct xsc_perf_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int _eth_ctrl_ioctl_roce_accl_next(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_roce_accl_next_mbox_in *in; + struct xsc_roce_accl_next_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int xsc_ioctl_netlink_cmd(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + u8 *nlmsg; + int nlmsg_len; + int err = 0; + struct xsc_cmd_netlink_msg_mbox_in *in; + struct xsc_cmd_netlink_msg_mbox_out out; + int inlen; + struct xsc_ioctl_tunnel_hdr tunnel_hdr; + + nlmsg_len = hdr->attr.length; + nlmsg = kvzalloc(nlmsg_len, GFP_KERNEL); + if (!nlmsg) + return -ENOMEM; + + err = copy_from_user(nlmsg, user_hdr->attr.data, nlmsg_len); + if (err) + goto err; + + inlen = sizeof(*in) + nlmsg_len; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + goto err; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_IOCTL_NETLINK); + in->nlmsg_len = cpu_to_be16(nlmsg_len); + memcpy(in->data, nlmsg, nlmsg_len); + memset(&tunnel_hdr, 0, sizeof(tunnel_hdr)); + err = xsc_tunnel_cmd_exec(xdev, in, inlen, &out, sizeof(out), &tunnel_hdr); + + kvfree(in); + kvfree(nlmsg); + + if (err || out.hdr.status) + err = -EFAULT; + + return err; +err: + kvfree(nlmsg); + return -EFAULT; +} + +void xsc_handle_netlink_cmd(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_cmd_netlink_msg_mbox_in *_in = in; + struct xsc_cmd_netlink_msg_mbox_out *_out = out; + u8 *nlmsg = _in->data; + int nlmsg_len = _in->nlmsg_len; + int err; + struct socket *sock = xdev->sock; + struct kvec iov[1]; + struct msghdr msg; + + memset(&msg, 0, sizeof(msg)); + iov[0].iov_base = nlmsg; + iov[0].iov_len = nlmsg_len; + err = kernel_sendmsg(sock, &msg, iov, 1, nlmsg_len); + _out->hdr.status = err; +} + +static int _eth_ctrl_ioctl_flexcc_next(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_flexcc_next_mbox_in *in; + struct xsc_flexcc_next_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (user_size > YUN_CC_CMD_DATA_LEN_MAX) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, struct xsc_ioctl_hdr __user *user_hdr) { @@ -509,7 +1291,8 @@ static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, 0, sizeof(struct xsc_default_pri_get), NULL, NULL); case XSC_CMD_OP_IOCTL_SET_PFC: - return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + case XSC_CMD_OP_IOCTL_SET_PFC_NEW: + return _eth_ctrl_ioctl_pfc(xdev, user_hdr, &hdr, sizeof(struct xsc_pfc_set), sizeof(struct xsc_pfc_set), NULL, NULL); @@ -572,6 +1355,57 @@ static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, 0, sizeof(struct xsc_watchdog_period_get), NULL, decode_watchdog_get); + case XSC_CMD_OP_IOCTL_SET_ROCE_ACCL: + return _eth_ctrl_ioctl_roce_accl(xdev, user_hdr, &hdr, + sizeof(struct xsc_roce_accl_set), 0, + encode_roce_accl_set, NULL); + case XSC_CMD_OP_IOCTL_GET_ROCE_ACCL: + return _eth_ctrl_ioctl_roce_accl(xdev, user_hdr, &hdr, + sizeof(u8), sizeof(struct xsc_roce_accl_get), + encode_roce_accl_get, decode_roce_accl_get); + case XSC_CMD_OP_IOCTL_SET_ROCE_ACCL_DISC_SPORT: + return _eth_ctrl_ioctl_roce_accl(xdev, user_hdr, &hdr, + sizeof(struct xsc_roce_accl_disc_sport), 0, + encode_roce_accl_disc_sport_set, NULL); + case XSC_CMD_OP_IOCTL_GET_ROCE_ACCL_DISC_SPORT: + return _eth_ctrl_ioctl_roce_accl(xdev, user_hdr, &hdr, sizeof(u8), + sizeof(struct xsc_roce_accl_disc_sport), + encode_roce_accl_get, + decode_roce_accl_disc_sport_get); + case XSC_CMD_OP_IOCTL_GET_BYTE_CNT: + return _eth_ctrl_ioctl_rate_measure(xdev, user_hdr, &hdr, + sizeof(struct xsc_perf_rate_measure), + sizeof(struct xsc_perf_rate_measure), + encode_perf_rate_measure, + decode_perf_rate_measure); + case XSC_CMD_OP_IOCTL_SET_ROCE_ACCL_NEXT: + return _eth_ctrl_ioctl_roce_accl_next(xdev, user_hdr, &hdr, + sizeof(struct xsc_roce_accl_next_set), 0, + encode_roce_accl_next_set, NULL); + case XSC_CMD_OP_IOCTL_GET_ROCE_ACCL_NEXT: + return _eth_ctrl_ioctl_roce_accl_next(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_roce_accl_next_get), + NULL, decode_roce_accl_next_get); + case XSC_CMD_OP_IOCTL_NETLINK: + return xsc_ioctl_netlink_cmd(xdev, user_hdr, &hdr); + case XSC_CMD_OP_IOCTL_GET_SPORT_ROCE_ACCL_NEXT: + return _eth_ctrl_ioctl_roce_accl_next(xdev, user_hdr, &hdr, + sizeof(struct xsc_roce_accl_next_set), + sizeof(struct xsc_roce_accl_next_set), + encode_roce_accl_next_set, + decode_roce_accl_next_get_sport); + case XSC_CMD_OP_IOCTL_SET_FLEXCC_NEXT: + return _eth_ctrl_ioctl_flexcc_next(xdev, user_hdr, &hdr, + YUN_CC_CMD_DATA_LEN_MAX, 0, + encode_flexcc_next_set, NULL); + case XSC_CMD_OP_IOCTL_GET_FLEXCC_NEXT: + return _eth_ctrl_ioctl_flexcc_next(xdev, user_hdr, &hdr, + 0, sizeof(struct yun_cc_next_get_all), + NULL, decode_flexcc_next_get); + case XSC_CMD_OP_IOCTL_GET_STAT_FLEXCC_NEXT: + return _eth_ctrl_ioctl_flexcc_next(xdev, user_hdr, &hdr, + 0, sizeof(struct yun_cc_next_get_all_stat), + NULL, decode_flexcc_next_get_stat); default: return TRY_NEXT_CB; } @@ -603,6 +1437,138 @@ static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, return err; } +static void xsc_eth_restore_nic_hca(void *data) +{ + struct xsc_res_obj *obj = (struct xsc_res_obj *)data; + struct xsc_bdf_file *file = obj->file; + + xsc_eth_enable_nic_hca((struct xsc_adapter *)file->xdev->eth_priv); + xsc_free_user_mode_obj(file, XSC_IOCTL_OPCODE_VF_USER_MODE); +} + +static void xsc_eth_restore_pkt_dst_info(void *data) +{ + struct xsc_res_obj *obj = (struct xsc_res_obj *)data; + struct xsc_bdf_file *file = obj->file; + struct xsc_user_mode_attr *attrs = (struct xsc_user_mode_attr *)obj->data; + int i, j; + + for (i = 0; i < XSC_MAX_MAC_NUM; i++) { + if (!attrs[i].pkt_bitmap) + continue; + + for (j = 0; j < XSC_USER_MODE_FWD_PKT_NUM; j++) { + if (!(attrs[i].pkt_bitmap & BIT(j))) + continue; + + xsc_eth_modify_pkt_dst_info(file->xdev->eth_priv, BIT(i), + BIT(j), attrs[i].dst_info[j]); + } + } + xsc_free_user_mode_obj(file, XSC_IOCTL_OPCODE_PF_USER_MODE); +} + +static int xsc_change_user_mode(struct xsc_bdf_file *file, u16 opcode, + struct xsc_ioctl_user_mode_attr *attr) +{ + struct xsc_user_mode_attr *user_attr = NULL; + int i, err = 0; + + if (attr->enable) { + if (opcode == XSC_IOCTL_OPCODE_VF_USER_MODE) { + err = xsc_alloc_user_mode_obj(file, xsc_eth_restore_nic_hca, + opcode, (char *)attr, 0); + goto out; + } + + if (!attr->pkt_bitmap || !attr->mac_bitmap) + return 0; + + user_attr = kcalloc(XSC_MAX_MAC_NUM, sizeof(struct xsc_user_mode_attr), GFP_KERNEL); + if (unlikely(!user_attr)) { + err = -ENOMEM; + goto out; + } + + for (i = 0; i < XSC_MAX_MAC_NUM; i++) { + if (!(attr->mac_bitmap & BIT(i))) + continue; + + user_attr[i].pkt_bitmap = attr->pkt_bitmap; + err = xsc_eth_query_pkt_dst_info(file->xdev->eth_priv, BIT(i), + attr->pkt_bitmap, user_attr[i].dst_info); + if (err) + goto out; + } + err = xsc_eth_modify_pkt_dst_info(file->xdev->eth_priv, attr->mac_bitmap, + attr->pkt_bitmap, attr->dst_info); + if (err) + goto out; + + err = xsc_alloc_user_mode_obj(file, xsc_eth_restore_pkt_dst_info, + opcode, (char *)user_attr, + XSC_MAX_MAC_NUM * sizeof(struct xsc_user_mode_attr)); + } else { + if (xsc_get_user_mode(file->xdev)) + xsc_release_user_mode(file, opcode); + } + +out: + xsc_core_info(file->xdev, + "%s usr mode=0x%x, pkt=0x%x, mac=0x%x, dst_info=%d, err=%d\n", + attr->enable ? "enable" : "disable", opcode, + attr->pkt_bitmap, attr->mac_bitmap, attr->dst_info, err); + kfree(user_attr); + + return err; +} + +static int xsc_ioctl_user_mode(struct xsc_bdf_file *file, struct xsc_core_device *dev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_user_mode_attr *attr; + u8 *buf; + int err = 0; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(dev, "fail to copy from user hdr\n"); + return -EFAULT; + } + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); + return -EINVAL; + } + + buf = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = copy_from_user(buf, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(dev, "failed to copy ioctl user data.\n"); + kvfree(buf); + return -EFAULT; + } + + attr = (struct xsc_ioctl_user_mode_attr *)buf; + switch (hdr.attr.opcode) { + case XSC_IOCTL_OPCODE_VF_USER_MODE: + case XSC_IOCTL_OPCODE_PF_USER_MODE: + err = xsc_change_user_mode(file, hdr.attr.opcode, attr); + break; + default: + err = -EOPNOTSUPP; + break; + } + + kvfree(buf); + return err; +} + static int _eth_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, struct xsc_ioctl_hdr __user *user_hdr, void *data) { @@ -613,6 +1579,9 @@ static int _eth_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, case XSC_IOCTL_CMDQ: err = _eth_ctrl_ioctl_cmdq(xdev, user_hdr); break; + case XSC_IOCTL_USER_MODE: + err = xsc_ioctl_user_mode(file, xdev, user_hdr); + break; default: err = TRY_NEXT_CB; break; diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h index 6fda4dced73f2d9c2ab8e7c3211cae139e128ec1..b026016bec707f2fdaa862203ab0e7fbed470b36 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h @@ -8,4 +8,6 @@ void xsc_eth_ctrl_fini(void); int xsc_eth_ctrl_init(void); -#endif /* XSC_ETH_CTRL_H */ +void xsc_handle_netlink_cmd(struct xsc_core_device *xdev, void *in, void *out); + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h index 61850c2ea9dee622de2c0ca6b43e00bc68bd2ab5..b766ac83e4251adf29fd823c13854fa4a0453905 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h @@ -71,7 +71,6 @@ static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int dire for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; int fsz = skb_frag_size(frag); - buf = (char *)(page_address(frag->bv_page) + frag->bv_offset); for (i = 0; i < fsz; i++) { if (i % 16 == 0) diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h index 9ac98668f59f0b8ded1f5907690c5b09f23f4af1..1e3515db5eef996ca76a07f17ec7422f3349a970 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h @@ -43,4 +43,5 @@ void xsc_rx_dim_work(struct work_struct *work); void xsc_handle_tx_dim(struct xsc_sq *sq); void xsc_handle_rx_dim(struct xsc_rq *rq); + #endif /* XSC_ETH_DIM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c index f0ec02333a194251af0652a01a46102315496a2d..ab3a8f29b846525b99b837894b14df74676289da 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c @@ -34,7 +34,9 @@ enum { XSC_ST_LINK_STATE, XSC_ST_LINK_SPEED, XSC_ST_HEALTH_INFO, +#ifdef CONFIG_INET XSC_ST_LOOPBACK, +#endif XSC_ST_NUM, }; @@ -42,7 +44,9 @@ const char xsc_self_tests[XSC_ST_NUM][ETH_GSTRING_LEN] = { "Link Test", "Speed Test", "Health Test", +#ifdef CONFIG_INET "Loopback Test", +#endif }; static int xsc_test_loopback(struct xsc_adapter *adapter) @@ -175,16 +179,6 @@ static const struct pflag_desc xsc_priv_flags[XSC_NUM_PFLAGS] = { { "tx_cqe_moder", set_pflag_tx_cqe_moder}, }; -int xsc_priv_flags_num(void) -{ - return ARRAY_SIZE(xsc_priv_flags); -} - -const char *xsc_priv_flags_name(int flag) -{ - return xsc_priv_flags[flag].name; -} - static int xsc_handle_pflag(struct net_device *dev, u32 wanted_flags, enum xsc_eth_priv_flag flag) @@ -206,7 +200,7 @@ static int xsc_handle_pflag(struct net_device *dev, return err; } -int xsc_set_priv_flags(struct net_device *dev, u32 pflags) +static int xsc_set_priv_flags(struct net_device *dev, u32 pflags) { struct xsc_adapter *priv = netdev_priv(dev); enum xsc_eth_priv_flag pflag; @@ -319,7 +313,7 @@ static int xsc_get_module_eeprom(struct net_device *netdev, return 0; } -u32 xsc_get_priv_flags(struct net_device *dev) +static u32 xsc_get_priv_flags(struct net_device *dev) { struct xsc_adapter *priv = netdev_priv(dev); @@ -391,21 +385,21 @@ static void xsc_ethtool_get_strings(struct xsc_adapter *adapter, u32 stringset, case ETH_SS_STATS: xsc_fill_stats_strings(adapter, data); break; - + case ETH_SS_PHY_STATS: + xsc_get_prs_chk_err_stats_strings(adapter, data); + break; case ETH_SS_TEST: for (i = 0; i < xsc_self_test_num(adapter); i++) strscpy(data + i * ETH_GSTRING_LEN, xsc_self_tests[i], ETH_GSTRING_LEN); break; - case ETH_SS_PRIV_FLAGS: for (i = 0; i < XSC_NUM_PFLAGS; i++) strscpy(data + i * ETH_GSTRING_LEN, xsc_priv_flags[i].name, ETH_GSTRING_LEN); break; - default: ETH_DEBUG_LOG("wrong stringset\n"); break; @@ -428,6 +422,8 @@ static int xsc_ethtool_get_sset_count(struct xsc_adapter *adapter, int sset) for (i = 0; i < xsc_num_stats_grps; i++) num_stats += xsc_stats_grps[i].get_num_stats(adapter); return num_stats; + case ETH_SS_PHY_STATS: + return xsc_get_prs_chk_err_stats_count(adapter); case ETH_SS_PRIV_FLAGS: return XSC_NUM_PFLAGS; case ETH_SS_TEST: @@ -448,7 +444,9 @@ static int (*xsc_st_func[XSC_ST_NUM])(struct xsc_adapter *) = { xsc_test_link_state, xsc_test_link_speed, xsc_test_health_info, +#ifdef CONFIG_INET xsc_test_loopback, +#endif }; static void xsc_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) @@ -511,6 +509,14 @@ static void xsc_get_ethtool_stats(struct net_device *dev, xsc_ethtool_get_ethtool_stats(adapter, stats, data); } +static void xsc_get_ethtool_phy_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + xsc_fill_prs_chk_err_stats(adapter, data); +} + static u32 xsc_get_msglevel(struct net_device *dev) { return ((struct xsc_adapter *)netdev_priv(dev))->msglevel; @@ -793,7 +799,7 @@ static int xsc_set_rss_hash_opt(struct xsc_adapter *priv, return ret; } -int xsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) +static int xsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct xsc_adapter *priv = netdev_priv(dev); struct xsc_eth_params *params = &priv->nic_param; @@ -816,7 +822,7 @@ int xsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_ return err; } -int xsc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +static int xsc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct xsc_adapter *priv = netdev_priv(dev); int err = 0; @@ -845,7 +851,7 @@ static u32 xsc_get_rxfh_indir_size(struct net_device *netdev) return XSC_INDIR_RQT_SIZE; } -int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +static int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct xsc_adapter *priv = netdev_priv(netdev); struct xsc_rss_params *rss = &priv->rss_params; @@ -864,7 +870,7 @@ int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) return 0; } -int xsc_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) +static int xsc_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct xsc_adapter *priv = netdev_priv(dev); struct xsc_rss_params *rss = &priv->rss_params; @@ -902,6 +908,7 @@ static int xsc_get_link_ksettings(struct net_device *netdev, { struct xsc_adapter *adapter = netdev_priv(netdev); struct xsc_event_linkinfo linkinfo; + u32 nbits = 0; if (xsc_eth_get_link_info(adapter, &linkinfo)) return -EINVAL; @@ -935,6 +942,7 @@ static int xsc_get_link_ksettings(struct net_device *netdev, cmd->base.speed = LINKSPEED_MODE_200G; break; case MODULE_SPEED_400G_R8: + case MODULE_SPEED_400G_R4: cmd->base.speed = LINKSPEED_MODE_400G; break; default: @@ -951,15 +959,25 @@ static int xsc_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); - bitmap_copy(cmd->link_modes.supported, (unsigned long *)linkinfo.supported_speed, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_copy(cmd->link_modes.advertising, (unsigned long *)linkinfo.advertising_speed, - __ETHTOOL_LINK_MODE_MASK_NBITS); + nbits = min_t(u32, __ETHTOOL_LINK_MODE_MASK_NBITS, + sizeof(linkinfo.supported_speed) * 8); + bitmap_copy(cmd->link_modes.supported, + (unsigned long *)linkinfo.supported_speed, nbits); + nbits = min_t(u32, __ETHTOOL_LINK_MODE_MASK_NBITS, + sizeof(linkinfo.advertising_speed) * 8); + bitmap_copy(cmd->link_modes.advertising, + (unsigned long *)linkinfo.advertising_speed, nbits); + + nbits = min_t(u32, __ETHTOOL_LINK_MODE_MASK_NBITS, + sizeof(linkinfo.supported) * 8); bitmap_or(cmd->link_modes.supported, cmd->link_modes.supported, - (unsigned long *)&linkinfo.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + (unsigned long *)&linkinfo.supported, nbits); + + nbits = min_t(u32, __ETHTOOL_LINK_MODE_MASK_NBITS, + sizeof(linkinfo.advertising) * 8); bitmap_or(cmd->link_modes.advertising, cmd->link_modes.advertising, - (unsigned long *)&linkinfo.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + (unsigned long *)&linkinfo.advertising, nbits); return 0; } @@ -1000,6 +1018,115 @@ static int xsc_set_link_ksettings(struct net_device *netdev, return err; } +struct xsc_ethtool_link_ext_state_opcode_mapping { + u32 state_opcode; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct xsc_ethtool_link_ext_state_opcode_mapping +xsc_link_ext_state_opcode_map[] = { + {1, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, + {2, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {6, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED}, + {7, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + {8, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + {10, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {19, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED}, + {20, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {21, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {22, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + {23, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS}, + {24, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {25, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {26, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {27, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE}, +}; + +static void +xsc_set_link_ext_state(struct xsc_ethtool_link_ext_state_opcode_mapping + link_ext_state_mapping, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + switch (link_ext_state_mapping.link_ext_state) { + case ETHTOOL_LINK_EXT_STATE_AUTONEG: + link_ext_state_info->autoneg = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE: + link_ext_state_info->link_training = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH: + link_ext_state_info->link_logical_mismatch = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY: + link_ext_state_info->bad_signal_integrity = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE: + link_ext_state_info->cable_issue = + link_ext_state_mapping.link_ext_substate; + break; + default: + break; + } + + link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state; +} + +static int xsc_get_link_ext_state(struct net_device *dev, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + struct xsc_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping; + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_link_sub_state_mbox_in in; + struct xsc_link_sub_state_mbox_out out; + int i, err; + u32 state_code; + + if (netif_carrier_ok(dev)) + return -ENODATA; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_GET_LINK_SUB_STATE); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get link ext state, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + state_code = be32_to_cpu(out.state_code); + + for (i = 0; i < ARRAY_SIZE(xsc_link_ext_state_opcode_map); i++) { + link_ext_state_mapping = xsc_link_ext_state_opcode_map[i]; + if (link_ext_state_mapping.state_opcode == state_code) { + xsc_set_link_ext_state(link_ext_state_mapping, + link_ext_state_info); + return 0; + } + } + + return -ENODATA; +} + static int xsc_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct xsc_adapter *adapter = netdev_priv(dev); @@ -1029,6 +1156,7 @@ static int xsc_set_fecparam(struct net_device *netdev, u32 new_fec = fec->fec; int err = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_FEC_PARAM); in.fec = cpu_to_be32(new_fec); @@ -1050,6 +1178,7 @@ static int xsc_get_fecparam(struct net_device *netdev, struct xsc_event_query_fecparam_mbox_out out; int err = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_FEC_PARAM); err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); @@ -1157,17 +1286,44 @@ static int xsc_set_coalesce(struct net_device *netdev, return err; } +static void xsc_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + pause->autoneg = 0; + pause->rx_pause = 0; + pause->tx_pause = 0; +} + +static void xsc_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + stats->tx_pause_frames = 0; + stats->rx_pause_frames = 0; +} + +static int xsc_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return -EOPNOTSUPP; +} + static const struct ethtool_ops xsc_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | +#ifdef ETHTOOL_COALESCE_USECS_LOW_HIGH ETHTOOL_COALESCE_USECS_LOW_HIGH | +#endif +#ifdef ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH | +#endif ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = xsc_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ext_state = xsc_get_link_ext_state, .get_strings = xsc_get_strings, .get_sset_count = xsc_get_sset_count, .get_ethtool_stats = xsc_get_ethtool_stats, + .get_ethtool_phy_stats = xsc_get_ethtool_phy_stats, .get_ringparam = xsc_get_ringparam, .set_ringparam = xsc_set_ringparam, .set_channels = xsc_set_channels, @@ -1185,6 +1341,9 @@ static const struct ethtool_ops xsc_ethtool_ops = { .set_rxnfc = xsc_set_rxnfc, .get_module_info = xsc_get_module_info, .get_module_eeprom = xsc_get_module_eeprom, + .get_pauseparam = xsc_get_pauseparam, + .set_pauseparam = xsc_set_pauseparam, + .get_pause_stats = xsc_get_pause_stats, .get_priv_flags = xsc_get_priv_flags, .set_priv_flags = xsc_set_priv_flags, .get_msglevel = xsc_get_msglevel, diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h index 9c6d1558e8f61d16ff922a658b986a1c785359f4..eb2eb3491c148560ef9f108b6099b73a91f1f5b9 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h @@ -9,6 +9,11 @@ void eth_set_ethtool_ops(struct net_device *dev); /* EEPROM Standards for plug in modules */ +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif #define LED_ACT_ON_HW 0xff + #endif /* XSC_ETH_ETHTOOL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c index 0cbb7492a87d9f447c6d58f6f498bed1d0f1fdb5..84d12c3b505e2b7e536c7758849ef178ab3fc4cd 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c @@ -19,21 +19,12 @@ static inline void xsc_rq_notify_hw(struct xsc_rq *rq) { struct xsc_core_device *xdev = rq->cq.xdev; struct xsc_wq_cyc *wq = &rq->wqe.wq; - union xsc_recv_doorbell doorbell_value; u64 rqwqe_id = wq->wqe_ctr << (ilog2(xdev->caps.recv_ds_num)); - ETH_DEBUG_LOG("rq%d_db_val=0x%x, recv_ds=%d\n", - rq->rqn, doorbell_value.recv_data, - xdev->caps.recv_ds_num); - /*reverse wqe index to ds index*/ - doorbell_value.next_pid = rqwqe_id; - doorbell_value.qp_num = rq->rqn; + ETH_DEBUG_LOG("rq=%d, next_pid=%#x, recv_ds=%d\n", + rq->rqn, rqwqe_id, xdev->caps.recv_ds_num); - /* Make sure that descriptors are written before - * updating doorbell record and ringing the doorbell - */ - wmb(); - writel(doorbell_value.recv_data, REG_ADDR(xdev, xdev->regs.rx_db)); + xsc_update_rx_db(xdev, rq->rqn, rqwqe_id); } static inline void xsc_skb_set_hash(struct xsc_adapter *adapter, @@ -87,6 +78,89 @@ static inline unsigned short from32to16(unsigned int x) static inline bool handle_udp_frag_csum(struct sk_buff *skb, struct epp_pph *pph) { +#ifdef XSC_UDP_FRAG_CSUM + char *head = (char *)pph; + struct iphdr *iph; + u8 l3_proto = PPH_OUTER_IP_TYPE(head); + u8 l4_proto = PPH_OUTER_TP_TYPE(head); + u16 csum_off = (u16)PPH_CSUM_OFST(head); + u16 csum_plen = (u16)PPH_CSUM_PLEN(head); + u8 payload_off = PPH_PAYLOAD_OFST(head); + u32 hw_csum = PPH_CSUM_VAL(head); + u16 udp_check = 0; + u16 udp_len = 0; + u32 off = 64; + __wsum csum1, csum2, csum3, csum; + +#ifdef CUM_SKB_DATA + head = (char *)skb->data; + off = 0; +#endif + + if (l4_proto != L4_PROTO_UDP && l4_proto != L4_PROTO_NONE) + return false; + + off += ETH_HLEN; + if (l3_proto == L3_PROTO_IP) { + iph = (struct iphdr *)(head + off); + if (!ip_is_fragment(iph)) + return false; + +#ifdef UDP_CSUM_DEBUG + netdev_dbg("ip_id=%d frag_off=0x%x l4_prt=%d l3_prt=%d iph_off=%d ip_len=%d csum_off=%d pload_off=%d\n", + ntohs(iph->id), ntohs(iph->frag_off), + l4_proto, l3_proto, PPH_OUTER_IP_OFST(head), PPH_OUTER_IP_LEN(pph), + csum_off, payload_off); +#endif + + off += iph->ihl * 4; + if (l4_proto == L4_PROTO_UDP) { + struct udphdr *uh = (struct udphdr *)(head + off); + + udp_check = uh->check; + udp_len = ntohs(uh->len); + } + + if (csum_off == 0) + csum_off = 256; + + netdev_dbg("%s: ip_id=%d frag_off=0x%x skb_len=%d data_len=%d csum_off=%d csum_plen=%d payload_off=%d udp_off=%d udp_len=%d udp_check=0x%x\n", + __func__, ntohs(iph->id), ntohs(iph->frag_off), + skb->len, skb->data_len, + csum_off, csum_plen, payload_off, off, udp_len, udp_check); +#ifdef CUM_RAW_DATA_DUMP + xsc_pkt_pph_dump((char *)head, 272); +#endif + + if (csum_off < off) { + csum1 = csum_partial((char *)(head + csum_off), (off - csum_off), 0); + csum2 = htons(from32to16(hw_csum)); + csum = csum_sub(csum2, csum1); + } else if (csum_off > off) { + csum2 = csum_partial((char *)(head + csum_off), csum_plen, 0); + csum1 = csum_partial((char *)(head + off), (csum_off - off), 0); + csum = htons(from32to16(hw_csum)); + csum = csum_partial((char *)(head + off), (csum_off - off), csum); + csum3 = csum_partial((char *)(head + off), (skb->len - off + 64), 0); + } else { + csum = htons(from32to16(hw_csum)); + } + skb->csum = csum_unfold(from32to16(csum)); + + ETH_DEBUG_LOG("%s: sw_cal_csum[%d:%d]=0x%x -> 0x%x\n", + __func__, off, csum_off, csum1, from32to16(csum1)); + ETH_DEBUG_LOG("%s: sw_cal_hw_csum[%d:%d]=0x%x -> 0x%x, hw_csum=0x%x -> 0x%x\n", + __func__, csum_off, csum_plen, csum2, from32to16(csum2), + hw_csum, from32to16(hw_csum)); + ETH_DEBUG_LOG("%s: sw_cal_tot_csum[%d:%d]=0x%x -> 0x%x, skb_csum=0x%x -> 0x%x\n", + __func__, off, skb->len, csum3, from32to16(csum3), csum, skb->csum); + + skb->ip_summed = CHECKSUM_COMPLETE; + + return true; + } +#endif + return false; } @@ -275,7 +349,11 @@ struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, u16 frag_consumed_bytes = 0; int i = 0; +#ifndef NEED_CREATE_RX_THREAD skb = napi_alloc_skb(rq->cq.napi, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#else + skb = netdev_alloc_skb(netdev, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#endif if (unlikely(!skb)) { rq->stats->buff_alloc_err++; return NULL; @@ -403,12 +481,13 @@ static inline bool xsc_rx_cache_put(struct xsc_rq *rq, return true; } -void xsc_page_dma_unmap(struct xsc_rq *rq, struct xsc_dma_info *dma_info) +static void xsc_page_dma_unmap(struct xsc_rq *rq, struct xsc_dma_info *dma_info) { struct xsc_channel *c = rq->cq.channel; struct device *dev = c->adapter->dev; - dma_unmap_page(dev, dma_info->addr, XSC_RX_FRAG_SZ, rq->buff.map_dir); + dma_unmap_page(dev, dma_info->addr, + PAGE_SIZE << rq->buff.page_order, rq->buff.map_dir); } static inline void xsc_put_page(struct xsc_dma_info *dma_info) @@ -420,8 +499,10 @@ void xsc_page_release_dynamic(struct xsc_rq *rq, struct xsc_dma_info *dma_info, bool recycle) { if (likely(recycle)) { +#ifdef XSC_PAGE_CACHE if (xsc_rx_cache_put(rq, dma_info)) return; +#endif xsc_page_dma_unmap(rq, dma_info); page_pool_recycle_direct(rq->page_pool, dma_info->page); @@ -465,7 +546,8 @@ static void xsc_dump_error_rqcqe(struct xsc_rq *rq, net_err_ratelimited("Error cqe on dev=%s, cqn=%d, ci=%d, rqn=%d, qpn=%d, error_code=0x%x\n", netdev->name, rq->cq.xcq.cqn, ci, - rq->rqn, cqe->qp_id, get_cqe_opcode(cqe)); + rq->rqn, cqe->qp_id, xsc_get_cqe_error_code(rq->cq.xdev, cqe)); + } void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, @@ -473,7 +555,6 @@ void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, { struct xsc_wq_cyc *wq = &rq->wqe.wq; struct xsc_channel *c = rq->cq.channel; - u8 cqe_opcode = get_cqe_opcode(cqe); struct xsc_wqe_frag_info *wi; struct sk_buff *skb; u32 cqe_bcnt; @@ -481,7 +562,7 @@ void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, ci = xsc_wq_cyc_ctr2ix(wq, cqwq->cc); wi = get_frag(rq, ci); - if (unlikely(cqe_opcode & BIT(7))) { + if (unlikely(xsc_is_err_cqe(rq->cq.xdev, cqe))) { xsc_dump_error_rqcqe(rq, cqe); rq->stats->cqe_err++; goto free_wqe; @@ -511,7 +592,11 @@ void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, cqe->has_pph == 1 ? cqe_bcnt - XSC_PPH_HEAD_LEN : cqe_bcnt, skb, wi); +#ifdef NEED_CREATE_RX_THREAD + netif_rx_ni(skb); +#else napi_gro_receive(rq->cq.napi, skb); +#endif free_wqe: xsc_free_rx_wqe(rq, wi, true); @@ -569,17 +654,20 @@ static inline int xsc_page_alloc_mapped(struct xsc_rq *rq, struct xsc_channel *c = rq->cq.channel; struct device *dev = c->adapter->dev; +#ifdef XSC_PAGE_CACHE if (xsc_rx_cache_get(rq, dma_info)) return 0; rq->stats->cache_alloc++; +#endif dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); if (unlikely(!dma_info->page)) return -ENOMEM; dma_info->addr = dma_map_page(dev, dma_info->page, 0, - XSC_RX_FRAG_SZ, rq->buff.map_dir); + PAGE_SIZE << rq->buff.page_order, + rq->buff.map_dir); if (unlikely(dma_mapping_error(dev, dma_info->addr))) { page_pool_recycle_direct(rq->page_pool, dma_info->page); dma_info->page = NULL; @@ -669,7 +757,7 @@ static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) return err; } -bool xsc_eth_post_rx_wqes(struct xsc_rq *rq) +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq, bool force) { struct xsc_wq_cyc *wq = &rq->wqe.wq; u8 wqe_bulk, wqe_bulk_min; @@ -677,6 +765,9 @@ bool xsc_eth_post_rx_wqes(struct xsc_rq *rq) u16 head; int err; + if (!force && !test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + return false; + wqe_bulk = rq->wqe.info.wqe_bulk; wqe_bulk_min = rq->wqe.info.wqe_bulk_min; if (xsc_wq_cyc_missing(wq) < wqe_bulk) diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c index 8b75ce05afb132ff7a9080ec72a23e950af3d3fc..9358f81f4d57226aa2a0ab85f3950436bd7dd085 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c @@ -93,7 +93,7 @@ static int xsc_grp_sw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx return idx; } -void xsc_grp_sw_update_stats(struct xsc_adapter *adapter) +static void xsc_grp_sw_update_stats(struct xsc_adapter *adapter) { struct xsc_sw_stats *s = &adapter->stats->sw; int max_tc = xsc_get_netdev_max_tc(adapter); @@ -426,6 +426,12 @@ static const struct counter_desc pfc_stall_stats_desc[] = { { XSC_DECLARE_STAT(struct xsc_pfc_stall_stats, tx_pause_storm_triggered) }, }; +static const struct counter_desc hw_eth_uc_stats_pf_desc[] = { + /*for uc statistcs*/ + { XSC_DECLARE_STAT(struct xsc_hw_uc_stats_eth, tx_unicast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_uc_stats_eth, rx_unicast_phy) }, +}; + static int get_hw_stats_eth(struct xsc_core_device *dev, struct xsc_hw_stats_eth *stats_eth) { int ret; @@ -450,18 +456,34 @@ static int get_hw_stats_eth(struct xsc_core_device *dev, struct xsc_hw_stats_eth return 0; } +static u32 mask2size(u32 mask) +{ + u32 size = 0; + + while (mask) { + size += mask & 0x1; + mask >>= 1; + } + + return size; +} + static int xsc_hw_get_num_stats(struct xsc_adapter *adapter) { int ret = 0; + u32 mask = xsc_get_eth_stat_mask(adapter->xdev); + u32 eth_stats_size = mask2size(mask); if (is_support_hw_pf_stats(adapter->xdev)) { - ret = ARRAY_SIZE(hw_prio_stats_desc) + ARRAY_SIZE(hw_eth_stats_pf_desc) + + ret = ARRAY_SIZE(hw_prio_stats_desc) + eth_stats_size + (is_support_pfc_prio_statistic(adapter->xdev) ? ARRAY_SIZE(hw_pfc_prio_stats_desc) : 0) + + (is_support_pf_uc_statistic(adapter->xdev) ? + ARRAY_SIZE(hw_eth_uc_stats_pf_desc) : 0) + (is_support_pfc_stall_stats(adapter->xdev) ? ARRAY_SIZE(pfc_stall_stats_desc) : 0); } else { - ret = ARRAY_SIZE(hw_eth_stats_vf_desc); + ret = eth_stats_size; } return ret; @@ -471,8 +493,10 @@ static int xsc_hw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) { int i; struct xsc_core_device *xdev; + u32 mask = 0; xdev = adapter->xdev; + mask = xsc_get_eth_stat_mask(xdev); if (is_support_hw_pf_stats(xdev)) { for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) @@ -486,10 +510,19 @@ static int xsc_hw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) hw_pfc_prio_stats_desc[i].format, ETH_GSTRING_LEN); - for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) { + if (!((1 << i) & mask)) + continue; strscpy(data + (idx++) * ETH_GSTRING_LEN, hw_eth_stats_pf_desc[i].format, ETH_GSTRING_LEN); + } + + if (is_support_pf_uc_statistic(xdev)) + for (i = 0; i < ARRAY_SIZE(hw_eth_uc_stats_pf_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_eth_uc_stats_pf_desc[i].format, + ETH_GSTRING_LEN); if (is_support_pfc_stall_stats(xdev)) for (i = 0; i < ARRAY_SIZE(pfc_stall_stats_desc); i++) @@ -497,10 +530,13 @@ static int xsc_hw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) pfc_stall_stats_desc[i].format, ETH_GSTRING_LEN); } else { - for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) { + if (!((1 << i) & mask)) + continue; strscpy(data + (idx++) * ETH_GSTRING_LEN, hw_eth_stats_vf_desc[i].format, ETH_GSTRING_LEN); + } } return idx; @@ -514,6 +550,8 @@ static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) struct xsc_pfc_prio_stats_mbox_out pfc_prio_out; struct xsc_pfc_stall_stats_mbox_in pfc_stall_in; struct xsc_pfc_stall_stats_mbox_out pfc_stall_out; + struct xsc_hw_uc_stats_mbox_in hw_ucstats_in; + struct xsc_hw_uc_stats_mbox_out hw_ucstats_out; struct xsc_core_device *xdev; int ret; u32 i; @@ -521,9 +559,11 @@ static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) u8 *stats; struct xsc_hw_stats_eth stats_eth; int ret_s; + u32 mask = 0; xdev = adapter->xdev; ret_s = get_hw_stats_eth(xdev, &stats_eth); + mask = xsc_get_eth_stat_mask(xdev); if (is_support_hw_pf_stats(xdev)) { memset(&in, 0, sizeof(in)); @@ -567,11 +607,37 @@ static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) if (!ret_s && stats_eth.is_pf) { stats = (u8 *)&stats_eth.stats.pf_stats; for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) { + if (!((1 << i) & mask)) + continue; val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_pf_desc, i); data[idx++] = __be64_to_cpu(val); } } + if (is_support_pf_uc_statistic(xdev)) { + memset(&hw_ucstats_in, 0, sizeof(hw_ucstats_in)); + memset(&hw_ucstats_out, 0, sizeof(hw_ucstats_out)); + hw_ucstats_in.hdr.opcode = + __cpu_to_be16(XSC_CMD_OP_QUERY_HW_PF_UC_STATS); + hw_ucstats_in.mac_port = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, + (void *)&hw_ucstats_in, + sizeof(struct xsc_hw_uc_stats_mbox_in), + (void *)&hw_ucstats_out, + sizeof(struct xsc_hw_uc_stats_mbox_out)); + if (ret == 0 && hw_ucstats_out.hdr.status == 0 && + hw_ucstats_out.hw_uc_stats.is_pf) { + stats = (u8 *)&hw_ucstats_out.hw_uc_stats.eth_uc_stats; + for (i = 0; i < ARRAY_SIZE(hw_eth_uc_stats_pf_desc); i++) { + val = XSC_READ_CTR64_CPU(stats, + hw_eth_uc_stats_pf_desc, + i); + data[idx++] = __be64_to_cpu(val); + } + } + } + if (is_support_pfc_stall_stats(xdev)) { memset(&pfc_stall_in, 0, sizeof(pfc_stall_in)); memset(&pfc_stall_out, 0, sizeof(pfc_stall_out)); @@ -596,6 +662,8 @@ static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) if (!ret_s && !stats_eth.is_pf) { stats = (u8 *)&stats_eth.stats.vf_stats; for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) { + if (!((1 << i) & mask)) + continue; val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_vf_desc, i); data[idx++] = __be64_to_cpu(val); } @@ -649,3 +717,75 @@ void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 * } } } + +static const struct counter_desc xsc_prs_chk_err_stats_desc[] = { + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_sip_dip_eq) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_sip_invalid) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_smac_invalid) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_ip_ver) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_smac_dmac_eq) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_dmac_zero) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_sip_dip_eq) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_sip_invalid) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_smac_invalid) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_ip_ver) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_smac_dmac_eq) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_dmac_zero) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_udp_len) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_tp_checksum) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_ipv4_checksum) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_ip_ttl) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_ip_len) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, inner_ipv4_ihl) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_udp_len) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_tp_checksum) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_ipv4_checksum) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_ip_ttl) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_ip_len) }, + { XSC_DECLARE_STAT(struct xsc_prs_chk_err_stats, outer_ipv4_ihl) }, +}; + +#define XSC_PHY_STATS_CNT ARRAY_SIZE(xsc_prs_chk_err_stats_desc) + +int xsc_get_prs_chk_err_stats_count(struct xsc_adapter *adapter) +{ + return XSC_PHY_STATS_CNT; +} + +void xsc_get_prs_chk_err_stats_strings(struct xsc_adapter *adapter, u8 *data) +{ + int i; + + for (i = 0; i < XSC_PHY_STATS_CNT; i++) + strscpy(data + i * ETH_GSTRING_LEN, + xsc_prs_chk_err_stats_desc[i].format, + ETH_GSTRING_LEN); +} + +int xsc_fill_prs_chk_err_stats(struct xsc_adapter *adapter, u64 *data) +{ + struct xsc_query_hw_prs_chk_err_stats_mbox_out out; + struct xsc_query_hw_prs_chk_err_stats_mbox_in in; + __be64 val; + int err; + int i; + + memset(&out, 0, sizeof(out)); + memset(&in, 0, sizeof(in)); + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_QUERY_HW_PRS_CHK_ERR_STATS); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(in), + (void *)&out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + for (i = 0; i < XSC_PHY_STATS_CNT; i++) { + val = XSC_READ_CTR64_CPU(&out.stats, + xsc_prs_chk_err_stats_desc, i); + data[i] = __be64_to_cpu(val); + } + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h index 069c5d8ad0dbbda55f1a56bbf768ae2738554290..2f9df3cad0804d6c65d4a78078aa596003edd01d 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h @@ -178,6 +178,10 @@ struct xsc_stats { extern const struct xsc_stats_grp xsc_stats_grps[]; extern const int xsc_num_stats_grps; +int xsc_get_prs_chk_err_stats_count(struct xsc_adapter *adapter); +void xsc_get_prs_chk_err_stats_strings(struct xsc_adapter *adapter, u8 *data); +int xsc_fill_prs_chk_err_stats(struct xsc_adapter *adapter, u64 *data); + void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s); #endif /* XSC_EN_STATS_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c index 8709b22c3b879e766f14de9fe2400b92637f3354..8e432485f6ff961bd0e36c5bbaf366b7c23e343d 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c @@ -8,6 +8,9 @@ #include #include #include +#include +#include +#include #include "common/xsc_core.h" #include "common/xsc_cmd.h" @@ -33,7 +36,7 @@ static void pcie_lat_hw_work(struct work_struct *work) (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) { xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); } schedule_delayed_work_on(smp_processor_id(), dwork, msecs_to_jiffies(pcie_lat->period * 1000)); @@ -55,13 +58,13 @@ static void pcie_lat_hw_init(struct xsc_core_device *xdev) (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) { xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); } } static ssize_t pcie_lat_enable_show(struct device *device, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); int err; @@ -78,7 +81,7 @@ static ssize_t pcie_lat_enable_show(struct device *device, (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) { xsc_core_err(adapter->xdev, "Failed to get pcie_lat en, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -86,8 +89,8 @@ static ssize_t pcie_lat_enable_show(struct device *device, } static ssize_t pcie_lat_enable_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) + struct device_attribute *attr, + const char *buf, size_t count) { struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); struct xsc_pcie_lat_work *pcie_lat = adapter->xdev->pcie_lat; @@ -101,16 +104,16 @@ static ssize_t pcie_lat_enable_store(struct device *device, return -EINVAL; if (pcie_lat_enable != XSC_PCIE_LAT_EN_DISABLE && - pcie_lat_enable != XSC_PCIE_LAT_EN_ENABLE) { + pcie_lat_enable != XSC_PCIE_LAT_EN_ENABLE) { xsc_core_err(adapter->xdev, - "pcie_lat_enable should be set as %d or %d, cannot be %d\n", - XSC_PCIE_LAT_EN_DISABLE, XSC_PCIE_LAT_EN_ENABLE, - pcie_lat_enable); + "pcie_lat_enable should be set as %d or %d, cannot be %d\n", + XSC_PCIE_LAT_EN_DISABLE, XSC_PCIE_LAT_EN_ENABLE, + pcie_lat_enable); return -EPERM; } if (pcie_lat_enable == XSC_PCIE_LAT_EN_ENABLE && - pcie_lat->enable == XSC_PCIE_LAT_EN_DISABLE) { + pcie_lat->enable == XSC_PCIE_LAT_EN_DISABLE) { pcie_lat_hw_init(adapter->xdev); pcie_lat->adapter = adapter; INIT_DELAYED_WORK(&pcie_lat->work, pcie_lat_hw_work); @@ -134,7 +137,7 @@ static ssize_t pcie_lat_enable_store(struct device *device, (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) { xsc_core_err(adapter->xdev, "Failed to set pcie_lat en, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -144,8 +147,8 @@ static ssize_t pcie_lat_enable_store(struct device *device, static DEVICE_ATTR_RW(pcie_lat_enable); static ssize_t pcie_lat_interval_show(struct device *device, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); int err, i; @@ -163,7 +166,7 @@ static ssize_t pcie_lat_interval_show(struct device *device, (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) { xsc_core_err(adapter->xdev, "Failed to get pcie_lat interval, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -179,8 +182,8 @@ static ssize_t pcie_lat_interval_show(struct device *device, static DEVICE_ATTR_RO(pcie_lat_interval); static ssize_t pcie_lat_period_show(struct device *device, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; @@ -189,8 +192,8 @@ static ssize_t pcie_lat_period_show(struct device *device, } static ssize_t pcie_lat_period_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) + struct device_attribute *attr, + const char *buf, size_t count) { struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; @@ -202,10 +205,10 @@ static ssize_t pcie_lat_period_store(struct device *device, return -EINVAL; if (pcie_lat_period < XSC_PCIE_LAT_PERIOD_MIN || - pcie_lat_period > XSC_PCIE_LAT_PERIOD_MAX) { + pcie_lat_period > XSC_PCIE_LAT_PERIOD_MAX) { xsc_core_err(adapter->xdev, "pcie_lat_period should be set between [%d-%d], cannot be %d\n", - XSC_PCIE_LAT_PERIOD_MIN, XSC_PCIE_LAT_PERIOD_MAX, - pcie_lat_period); + XSC_PCIE_LAT_PERIOD_MIN, XSC_PCIE_LAT_PERIOD_MAX, + pcie_lat_period); return -EPERM; } @@ -217,8 +220,8 @@ static ssize_t pcie_lat_period_store(struct device *device, static DEVICE_ATTR_RW(pcie_lat_period); static ssize_t pcie_lat_histogram_show(struct device *device, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); int i, err; @@ -236,8 +239,8 @@ static ssize_t pcie_lat_histogram_show(struct device *device, (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) { xsc_core_err(adapter->xdev, - "Failed to get pcie_lat histogram, err(%u), status(%u)\n", - err, out.hdr.status); + "Failed to get pcie_lat histogram, err(%u), status(%u)\n", + err, out.hdr.status); return -EINVAL; } @@ -271,7 +274,7 @@ static ssize_t pcie_lat_peak_show(struct device *device, (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) { xsc_core_err(adapter->xdev, "Failed to get pcie_lat peak, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); return -EINVAL; } @@ -342,7 +345,7 @@ static void xsc_pcie_lat_sysfs_fini(struct net_device *dev, struct xsc_core_devi (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); if (err || out.hdr.status) xsc_core_err(xdev, "Failed to set pcie_lat disable, err(%u), status(%u)\n", - err, out.hdr.status); + err, out.hdr.status); if (tmp->enable == XSC_PCIE_LAT_EN_ENABLE) cancel_delayed_work_sync(&tmp->work); @@ -356,18 +359,213 @@ static void xsc_pcie_lat_sysfs_fini(struct net_device *dev, struct xsc_core_devi xdev->pcie_lat = NULL; } +static ssize_t ooo_statistic_reset_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + u16 ooo_statistic_reset; + struct xsc_ooo_statistic_feat_mbox_in in; + struct xsc_ooo_statistic_feat_mbox_out out; + + err = kstrtou16(buf, 0, &ooo_statistic_reset); + if (err != 0) + return -EINVAL; + + if (ooo_statistic_reset != XSC_OOO_STATISTIC_RESET) { + xsc_core_err(adapter->xdev, + "ooo_statistic_reset can only be set to 1, cannot be %d\n", + ooo_statistic_reset); + return -EPERM; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_OOO_STATISTIC_FEAT); + in.xsc_ooo_statistic_feature_opcode = __cpu_to_be16(XSC_OOO_STATISTIC_FEAT_SET_RESET); + in.ooo_statistic.ooo_statistic_reset = ooo_statistic_reset; + + err = xsc_cmd_exec(adapter->xdev, + (void *)&in, sizeof(struct xsc_ooo_statistic_feat_mbox_in), + (void *)&out, sizeof(struct xsc_ooo_statistic_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to set ooo_statistic_reset, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_WO(ooo_statistic_reset); + +static ssize_t ooo_statistic_range_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int i, err; + u32 count = 0; + struct xsc_ooo_statistic_feat_mbox_in in; + struct xsc_ooo_statistic_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_OOO_STATISTIC_FEAT); + in.xsc_ooo_statistic_feature_opcode = __cpu_to_be16(XSC_OOO_STATISTIC_FEAT_GET_RANGE); + + err = xsc_cmd_exec(adapter->xdev, + (void *)&in, sizeof(struct xsc_ooo_statistic_feat_mbox_in), + (void *)&out, sizeof(struct xsc_ooo_statistic_feat_mbox_in)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to get ooo_statistic_range, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_OOO_STATISTIC_RANGE_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.ooo_statistic.ooo_statistic_range[i])); + + count += sprintf(&buf[count], "%u\n", + __be32_to_cpu(out.ooo_statistic.ooo_statistic_range[i])); + + return count; +} + +#define OOO_STATISTIC_RANGE_FORMAT "%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u" +static ssize_t ooo_statistic_range_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err, i; + struct xsc_ooo_statistic_feat_mbox_in in; + struct xsc_ooo_statistic_feat_mbox_out out; + u32 *ptr = in.ooo_statistic.ooo_statistic_range; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + err = sscanf(buf, OOO_STATISTIC_RANGE_FORMAT, + &ptr[0], &ptr[1], &ptr[2], &ptr[3], &ptr[4], &ptr[5], &ptr[6], &ptr[7], + &ptr[8], &ptr[9], &ptr[10], &ptr[11], &ptr[12], &ptr[13], &ptr[14], &ptr[15]); + if (err != XSC_OOO_STATISTIC_RANGE_MAX) + return -EINVAL; + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_OOO_STATISTIC_FEAT); + in.xsc_ooo_statistic_feature_opcode = __cpu_to_be16(XSC_OOO_STATISTIC_FEAT_SET_RANGE); + + for (i = 0 ; i < XSC_OOO_STATISTIC_RANGE_MAX; i++) + in.ooo_statistic.ooo_statistic_range[i] = __cpu_to_be32(ptr[i]); + + err = xsc_cmd_exec(adapter->xdev, + (void *)&in, sizeof(struct xsc_ooo_statistic_feat_mbox_in), + (void *)&out, sizeof(struct xsc_ooo_statistic_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to set ooo_statistic_range, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_RW(ooo_statistic_range); + +static ssize_t ooo_statistic_show_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err, i; + u32 count = 0; + struct xsc_ooo_statistic_feat_mbox_in in; + struct xsc_ooo_statistic_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_OOO_STATISTIC_FEAT); + in.xsc_ooo_statistic_feature_opcode = __cpu_to_be16(XSC_OOO_STATISTIC_FEAT_GET_SHOW); + + err = xsc_cmd_exec(adapter->xdev, + (void *)&in, sizeof(struct xsc_ooo_statistic_feat_mbox_in), + (void *)&out, sizeof(struct xsc_ooo_statistic_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to get ooo_statistic_show, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_OOO_STATISTIC_SHOW_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.ooo_statistic.ooo_statistic_show[i])); + + count += sprintf(&buf[count], "%u\n", + __be32_to_cpu(out.ooo_statistic.ooo_statistic_show[i])); + + return count; +} + +static DEVICE_ATTR_RO(ooo_statistic_show); + +static struct attribute *ooo_statistic_attrs[] = { + &dev_attr_ooo_statistic_reset.attr, + &dev_attr_ooo_statistic_range.attr, + &dev_attr_ooo_statistic_show.attr, + NULL, +}; + +static struct attribute_group ooo_statistic_group = { + .name = "ooo_statistic", + .attrs = ooo_statistic_attrs, +}; + +static int xsc_ooo_statistic_sysfs_init(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + + err = sysfs_create_group(&dev->dev.kobj, &ooo_statistic_group); + if (err) + goto remove_ooo_statistic; + + return 0; + +remove_ooo_statistic: + sysfs_remove_group(&dev->dev.kobj, &ooo_statistic_group); + + return err; +} + +static void xsc_ooo_statistic_sysfs_fini(struct net_device *dev, struct xsc_core_device *xdev) +{ + sysfs_remove_group(&dev->dev.kobj, &ooo_statistic_group); +} + int xsc_eth_sysfs_create(struct net_device *dev, struct xsc_core_device *xdev) { int err = 0; - if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) { err = xsc_pcie_lat_sysfs_init(dev, xdev); + err = xsc_ooo_statistic_sysfs_init(dev, xdev); + } return err; } void xsc_eth_sysfs_remove(struct net_device *dev, struct xsc_core_device *xdev) { - if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) { xsc_pcie_lat_sysfs_fini(dev, xdev); + xsc_ooo_statistic_sysfs_fini(dev, xdev); + } } diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c index e9094e98f62598fb6cca80506d6066a023721e1b..0b788d9c4c8ef7cb17b2093aaa174954b3214a48 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c @@ -11,6 +11,7 @@ #include "common/qp.h" #include "xsc_eth.h" #include "xsc_eth_txrx.h" +#include "xsc_accel.h" #define XSC_OPCODE_RAW 0x7 @@ -19,7 +20,6 @@ static inline void *xsc_sq_fetch_wqe(struct xsc_sq *sq, size_t size, u16 *pi) struct xsc_wq_cyc *wq = &sq->wq; void *wqe; - /*caution, sp->pc is default to be zero*/ *pi = xsc_wq_cyc_ctr2ix(wq, sq->pc); wqe = xsc_wq_cyc_get_wqe(wq, *pi); memset(wqe, 0, size); @@ -27,7 +27,7 @@ static inline void *xsc_sq_fetch_wqe(struct xsc_sq *sq, size_t size, u16 *pi) return wqe; } -u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) +static u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) { struct xsc_sq_stats *stats = sq->stats; u16 ihs; @@ -48,9 +48,9 @@ u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) return ihs; } -void xsc_txwqe_build_cseg_csum(struct xsc_sq *sq, - struct sk_buff *skb, - struct xsc_send_wqe_ctrl_seg *cseg) +static void xsc_txwqe_build_cseg_csum(struct xsc_sq *sq, + struct sk_buff *skb, + struct xsc_send_wqe_ctrl_seg *cseg) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (skb->encapsulation) { @@ -79,7 +79,6 @@ static inline void xsc_dma_push(struct xsc_sq *sq, dma_addr_t addr, u32 size, dma->addr = addr; dma->size = size; dma->type = map_type; - ETH_DEBUG_LOG("dma = %p, dma->addr = %#llx\n", dma, dma->addr); } static inline void xsc_tx_dma_unmap(struct device *dev, struct xsc_sq_dma *dma) @@ -92,7 +91,7 @@ static inline void xsc_tx_dma_unmap(struct device *dev, struct xsc_sq_dma *dma) dma_unmap_page(dev, dma->addr, dma->size, DMA_TO_DEVICE); break; default: - ETH_DEBUG_LOG("%s\n", "xsc_tx_dma_unmap unknown DMA type!\n"); + net_err_ratelimited("%s: unknown DMA type=%d\n", __func__, dma->type); } } @@ -152,9 +151,8 @@ static int xsc_txwqe_build_dsegs(struct xsc_sq *sq, struct sk_buff *skb, if (unlikely(dma_mapping_error(dev, dma_addr))) goto dma_unmap_wqe_err; - dseg->va = cpu_to_le64(dma_addr); - dseg->mkey = cpu_to_le32(sq->mkey_be); - dseg->seg_len = cpu_to_le32(headlen); + xsc_set_data_seg(adapter->xdev, dseg, cpu_to_le64(dma_addr), + cpu_to_le32(sq->mkey_be), cpu_to_le32(headlen)); WQE_DSEG_DUMP("dseg-headlen", dseg); @@ -171,9 +169,8 @@ static int xsc_txwqe_build_dsegs(struct xsc_sq *sq, struct sk_buff *skb, if (unlikely(dma_mapping_error(dev, dma_addr))) goto dma_unmap_wqe_err; - dseg->va = cpu_to_le64(dma_addr); - dseg->mkey = cpu_to_le32(sq->mkey_be); - dseg->seg_len = cpu_to_le32(fsz); + xsc_set_data_seg(adapter->xdev, dseg, cpu_to_le64(dma_addr), + cpu_to_le32(sq->mkey_be), cpu_to_le32(fsz)); WQE_DSEG_DUMP("dseg-frag", dseg); @@ -200,25 +197,15 @@ static inline void xsc_sq_notify_hw(struct xsc_wq_cyc *wq, u16 pc, { struct xsc_adapter *adapter = sq->channel->adapter; struct xsc_core_device *xdev = adapter->xdev; - union xsc_send_doorbell doorbell_value; int send_ds_num_log = ilog2(xdev->caps.send_ds_num); - /*reverse wqe index to ds index*/ - doorbell_value.next_pid = pc << send_ds_num_log; - doorbell_value.qp_num = sq->sqn; - - /* Make sure that descriptors are written before - * updating doorbell record and ringing the doorbell - */ - wmb(); - ETH_DEBUG_LOG("pc = %d sqn = %d\n", pc, sq->sqn); - ETH_DEBUG_LOG("doorbell_value = %#x\n", doorbell_value.send_data); - writel(doorbell_value.send_data, REG_ADDR(xdev, xdev->regs.tx_db)); + xsc_update_tx_db(xdev, sq->sqn, pc << send_ds_num_log); } -void xsc_txwqe_complete(struct xsc_sq *sq, struct sk_buff *skb, - u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, - struct xsc_tx_wqe_info *wi) +static void xsc_txwqe_complete(struct xsc_sq *sq, struct sk_buff *skb, + u8 opcode, u16 ds_cnt, u8 num_wqebbs, + u32 num_bytes, u8 num_dma, + struct xsc_tx_wqe_info *wi) { struct xsc_wq_cyc *wq = &sq->wq; @@ -231,23 +218,16 @@ void xsc_txwqe_complete(struct xsc_sq *sq, struct sk_buff *skb, netdev_tx_sent_queue(sq->txq, num_bytes); ETH_SQ_STATE(sq); - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - ETH_DEBUG_LOG("%s\n", "hw tstamp\n"); - } - /*1*/ sq->pc += wi->num_wqebbs; - ETH_DEBUG_LOG("%d\n", sq->pc); if (unlikely(!xsc_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) { netif_tx_stop_queue(sq->txq); sq->stats->stopped++; - ETH_DEBUG_LOG("%p %d %d %d\n", wq, sq->cc, sq->pc, sq->stop_room); } - ETH_DEBUG_LOG("%d %d\n", xsc_netdev_xmit_more(skb), netif_xmit_stopped(sq->txq)); - if (!xsc_netdev_xmit_more(skb) || netif_xmit_stopped(sq->txq)) xsc_sq_notify_hw(wq, sq->pc, sq); } @@ -260,7 +240,8 @@ static void xsc_dump_error_sqcqe(struct xsc_sq *sq, net_err_ratelimited("Err cqe on dev %s cqn=0x%x ci=0x%x sqn=0x%x err_code=0x%x qpid=0x%x\n", netdev->name, sq->cq.xcq.cqn, ci, - sq->sqn, get_cqe_opcode(cqe), cqe->qp_id); + sq->sqn, xsc_get_cqe_error_code(sq->cq.xdev, cqe), cqe->qp_id); + } void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq) @@ -297,6 +278,10 @@ void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq) netdev_tx_completed_queue(sq->txq, npkts, nbytes); } +#ifdef NEED_CREATE_RX_THREAD + DECLARE_PER_CPU(bool, txcqe_get); +#endif + bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) { struct xsc_adapter *adapter; @@ -323,12 +308,16 @@ bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) stats = sq->stats; - if (unlikely(get_cqe_opcode(cqe) & BIT(7))) { + if (unlikely(xsc_is_err_cqe(sq->cq.xdev, cqe))) { xsc_dump_error_sqcqe(sq, cqe); stats->cqe_err++; return false; } +#ifdef NEED_CREATE_RX_THREAD + __this_cpu_write(txcqe_get, true); +#endif + sqcc = sq->cc; /* avoid dirtying sq cache line every cqe */ @@ -358,12 +347,19 @@ bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) xsc_tx_dma_unmap(dev, dma); } +#ifndef NEED_CREATE_RX_THREAD npkts++; nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; napi_consume_skb(skb, napi_budget); - ETH_DEBUG_LOG("ci=%d, sqcc=%d, pkts=%d\n", ci, sqcc, npkts); - +#else + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + if (refcount_read(&skb->users) < 1) + stats->txdone_skb_refcnt_err++; + napi_consume_skb(skb, 0); +#endif } while ((++i <= napi_budget) && (cqe = xsc_cqwq_get_cqe(&cq->wq))); stats->cqes += i; @@ -375,7 +371,6 @@ bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) sq->dma_fifo_cc = dma_fifo_cc; sq->cc = sqcc; - ETH_DEBUG_LOG("dma_fifo_cc=%d, sqcc=%d\n", dma_fifo_cc, sqcc); ETH_SQ_STATE(sq); netdev_tx_completed_queue(sq->txq, npkts, nbytes); @@ -404,8 +399,9 @@ static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, u16 ds_cnt; u16 mss, ihs, headlen; u8 opcode; - u32 num_bytes, num_dma; - u8 num_wqebbs; + u32 num_bytes; + u32 num_dma = 0; + u8 num_wqebbs = 0; retry_send: /* Calc ihs and ds cnt, no writes to wqe yet */ @@ -431,9 +427,6 @@ static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, headlen = skb->len - skb->data_len; ds_cnt += !!headlen; ds_cnt += skb_shinfo(skb)->nr_frags; - ETH_DEBUG_LOG("skb_len=%d data_len=%d nr_frags=%d mss=%d ihs=%d headlen=%d ds_cnt=%d\n", - skb->len, skb->data_len, skb_shinfo(skb)->nr_frags, - mss, ihs, headlen, ds_cnt); /*to make the connection, only linear data is present*/ skbdata_debug_dump(skb, headlen, 1); @@ -446,7 +439,7 @@ static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, num_wqebbs = DIV_ROUND_UP(ds_cnt, xdev->caps.send_ds_num); /*if ds_cnt exceed one wqe, drop it*/ - if (num_wqebbs != 1) { + if (num_wqebbs != 1 || xsc_skb_need_linearize(xdev, ds_cnt)) { sq->stats->skb_linear++; if (skb_linearize(skb)) goto err_drop; @@ -481,8 +474,8 @@ static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; err_drop: - ETH_DEBUG_LOG("%s: drop skb, ds_cnt=%d, num_wqebbs=%d, num_dma=%d\n", - __func__, ds_cnt, num_wqebbs, num_dma); + net_err_ratelimited("%s: drop skb, ds_cnt=%d, num_wqebbs=%d, num_dma=%d\n", + __func__, ds_cnt, num_wqebbs, num_dma); stats->dropped++; dev_kfree_skb_any(skb); @@ -491,47 +484,20 @@ static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev) { - u32 ret; - u32 queue_id; + struct xsc_adapter *adapter = netdev_priv(netdev); struct xsc_sq *sq; struct xsc_tx_wqe *wqe; u16 pi; - struct xsc_adapter *adapter = netdev_priv(netdev); - struct xsc_core_device *xdev = adapter->xdev; - - if (!skb) { - ETH_DEBUG_LOG("skb == NULL\n"); - return NETDEV_TX_OK; - } - if (!adapter) { - ETH_DEBUG_LOG("adapter == NULL\n"); + if (!adapter || !adapter->xdev || adapter->status != XSCALE_ETH_DRIVER_OK) return NETDEV_TX_BUSY; - } - if (adapter->status != XSCALE_ETH_DRIVER_OK) { - ETH_DEBUG_LOG("adapter->status = %d\n", adapter->status); + sq = adapter->txq2sq[skb_get_queue_mapping(skb)]; + if (unlikely(!sq)) return NETDEV_TX_BUSY; - } - - queue_id = skb_get_queue_mapping(skb); - ETH_DEBUG_LOG("queue_id = %d\n", queue_id); - assert(adapter->xdev, queue_id < XSC_ETH_MAX_TC_TOTAL); - - sq = adapter->txq2sq[queue_id]; - if (!sq) { - ETH_DEBUG_LOG("sq = NULL\n"); - return NETDEV_TX_BUSY; - } - ETH_DEBUG_LOG("sqn = %d\n", sq->sqn); - - wqe = xsc_sq_fetch_wqe(sq, xdev->caps.send_ds_num * XSC_SEND_WQE_DS, &pi); - ETH_DEBUG_LOG("wqe = %p pi = %d\n", wqe, pi); - assert(adapter->xdev, wqe); - - ret = xsc_eth_xmit_frame(skb, sq, wqe, pi); - ETH_DEBUG_LOG("ret = %d\n", ret); + wqe = xsc_sq_fetch_wqe(sq, adapter->xdev->caps.send_ds_num * XSC_SEND_WQE_DS, &pi); + skb = xsc_accel_handle_tx(skb); - return ret; + return xsc_eth_xmit_frame(skb, sq, wqe, pi); } diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c index 160cffa21c232582aa1712346ed66571fb1fa12d..8593ee734eb9f1de6fbee80b35c23d379bb79d97 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c @@ -10,18 +10,8 @@ void xsc_cq_notify_hw_rearm(struct xsc_cq *cq) { - union xsc_cq_doorbell db; - ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); - - db.val = 0; - db.cq_next_cid = cpu_to_le32(cq->wq.cc); - db.cq_id = cpu_to_le32(cq->xcq.cqn); - db.arm = 0; - - /* ensure doorbell record is visible to device before ringing the doorbell */ - wmb(); - writel(db.val, REG_ADDR(cq->xdev, cq->xdev->regs.complete_db)); + xsc_arm_cq(cq->xdev, cq->xcq.cqn, cq->wq.cc, 0); if (cq->channel && cq->channel->stats) cq->channel->stats->arm++; } @@ -29,17 +19,9 @@ void xsc_cq_notify_hw_rearm(struct xsc_cq *cq) void xsc_cq_notify_hw(struct xsc_cq *cq) { struct xsc_core_device *xdev = cq->xdev; - union xsc_cq_doorbell db; ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); - - dma_wmb(); - - db.val = 0; - db.cq_next_cid = cpu_to_le32(cq->wq.cc); - db.cq_id = cpu_to_le32(cq->xcq.cqn); - - writel(db.val, REG_ADDR(xdev, xdev->regs.complete_reg)); + xsc_update_cq_ci(xdev, cq->xcq.cqn, cq->wq.cc); if (cq->channel && cq->channel->stats) cq->channel->stats->noarm++; } @@ -86,7 +68,7 @@ int xsc_eth_napi_poll(struct napi_struct *napi, int budget) busy |= work_done == budget; } - busy |= rq->post_wqes(rq); + busy |= rq->post_wqes(rq, false); if (busy) { if (likely(xsc_channel_no_affinity_change(c))) { diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h index da7ece2fea4eeeab0f5861de321e9b61105734d4..effbcf87558ba37ff65b76d8fd04d5eef8e64618 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h @@ -17,8 +17,6 @@ enum { XSC_ETH_WQE_INNER_AND_OUTER_CSUM, }; -#define ANDES_DRIVER - static inline u32 xsc_cqwq_get_size(struct xsc_cqwq *wq) { return wq->fbc.sz_m1 + 1; @@ -67,7 +65,7 @@ struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); -bool xsc_eth_post_rx_wqes(struct xsc_rq *rq); +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq, bool force); void xsc_cq_notify_hw(struct xsc_cq *cq); void xsc_cq_notify_hw_rearm(struct xsc_cq *cq); void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix); @@ -78,4 +76,5 @@ void xsc_page_release_dynamic(struct xsc_rq *rq, bool recycle); enum hrtimer_restart xsc_dim_reduce_timer_fn(struct hrtimer *timer); + #endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c index 7379574f1a7e3f032fc60b44cce3f52eedb34bc8..0ac9b759ccb1bce562e54acfa2d437d251538d69 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c @@ -99,6 +99,190 @@ int xsc_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) return 0; } +static inline int xsc_hash_l2(const u8 *addr) +{ + return addr[5]; +} + +static void xsc_add_l2_to_hash(struct hlist_head *hash, const u8 *addr) +{ + struct xsc_l2_hash_node *hn; + int ix = xsc_hash_l2(addr); + int found = 0; + + hlist_for_each_entry(hn, &hash[ix], hlist) + if (ether_addr_equal(hn->mac_addr, addr)) { + found = 1; + break; + } + + if (found) { + hn->action = XSC_ACTION_NONE; + return; + } + + hn = kzalloc(sizeof(*hn), GFP_ATOMIC); + if (!hn) + return; + + ether_addr_copy(hn->mac_addr, addr); + hn->action = XSC_ACTION_ADD; + + hlist_add_head(&hn->hlist, &hash[ix]); +} + +static void xsc_del_l2_from_hash(struct xsc_l2_hash_node *hn) +{ + hlist_del(&hn->hlist); + kfree(hn); +} + +static void xsc_sync_netdev_uc_addr(struct xsc_core_device *xdev, + struct net_device *netdev, + struct xsc_flow_steering *fs) +{ + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + + netdev_for_each_uc_addr(ha, netdev) { + xsc_add_l2_to_hash(fs->l2.netdev_uc, ha->addr); + } + + netif_addr_unlock_bh(netdev); +} + +static void xsc_vport_context_update_uc_mac(struct xsc_core_device *xdev, + struct xsc_flow_steering *fs, + struct xsc_l2_hash_node *hn) +{ + int err = 0; + u16 pct_prio; + + switch (hn->action) { + case XSC_ACTION_ADD: + err = xsc_nic_vport_add_uc_mac(xdev, hn->mac_addr, &pct_prio); + if (err) { + xsc_core_err(xdev, "failed to add pct entry for uc mac %pM\n", + hn->mac_addr); + xsc_del_l2_from_hash(hn); + } else { + hn->action = XSC_ACTION_NONE; + hn->pct_prio = pct_prio; + } + xsc_core_info(xdev, "pct add for uc mac %pM, priority: %d\n", + hn->mac_addr, pct_prio); + break; + case XSC_ACTION_DEL: + xsc_core_info(xdev, "pct del for uc mac %pM, priority: %d\n", + hn->mac_addr, hn->pct_prio); + err = xsc_nic_vport_del_uc_mac(xdev, hn->pct_prio); + if (err) + xsc_core_err(xdev, "failed to del pct entry for uc mac %pM\n", + hn->mac_addr); + xsc_del_l2_from_hash(hn); + break; + } +} + +static void xsc_apply_netdev_uc_addr(struct xsc_core_device *xdev, + struct xsc_flow_steering *fs) +{ + struct xsc_l2_hash_node *hn; + struct hlist_node *tmp; + int i; + + for (i = 0; i < XSC_L2_ADDR_HASH_SIZE; i++) + hlist_for_each_entry_safe(hn, tmp, &fs->l2.netdev_uc[i], hlist) + xsc_vport_context_update_uc_mac(xdev, fs, hn); +} + +static void xsc_vport_context_update_mc_mac(struct xsc_core_device *xdev, + struct xsc_flow_steering *fs, + struct xsc_l2_hash_node *hn) +{ + int err = 0; + + switch (hn->action) { + case XSC_ACTION_ADD: + err = xsc_nic_vport_modify_mc_mac(xdev, hn->mac_addr, XSC_JOIN); + if (err) { + xsc_core_err(xdev, "failed to join mcg\n"); + xsc_del_l2_from_hash(hn); + } else { + hn->action = XSC_ACTION_NONE; + } + break; + case XSC_ACTION_DEL: + xsc_del_l2_from_hash(hn); + err = xsc_nic_vport_modify_mc_mac(xdev, hn->mac_addr, XSC_LEAVE); + if (err) { + xsc_core_err(xdev, "failed to leave mcg\n"); + xsc_add_l2_to_hash(fs->l2.netdev_mc, hn->mac_addr); + } + break; + default: + break; + } + + if (err) + xsc_core_info(xdev, "action=%u, mac=%02X:%02X:%02X:%02X:%02X:%02X\n", + hn->action, hn->mac_addr[0], hn->mac_addr[1], hn->mac_addr[2], + hn->mac_addr[3], hn->mac_addr[4], hn->mac_addr[5]); +} + +static void xsc_sync_netdev_mc_addr(struct xsc_core_device *xdev, + struct net_device *netdev, + struct xsc_flow_steering *fs) +{ + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + + netdev_for_each_mc_addr(ha, netdev) { + xsc_add_l2_to_hash(fs->l2.netdev_mc, ha->addr); + } + + netif_addr_unlock_bh(netdev); +} + +static void xsc_apply_netdev_mc_addr(struct xsc_core_device *xdev, + struct xsc_flow_steering *fs) +{ + struct xsc_l2_hash_node *hn; + struct hlist_node *tmp; + int i; + + for (i = 0; i < XSC_L2_ADDR_HASH_SIZE; i++) + hlist_for_each_entry_safe(hn, tmp, &fs->l2.netdev_mc[i], hlist) + xsc_vport_context_update_mc_mac(xdev, fs, hn); +} + +static void xsc_handle_netdev_addr(struct xsc_core_device *xdev, + struct net_device *netdev, + struct xsc_flow_steering *fs) +{ + struct xsc_l2_hash_node *hn; + struct hlist_node *tmp; + int i; + + for (i = 0; i < XSC_L2_ADDR_HASH_SIZE; i++) + hlist_for_each_entry_safe(hn, tmp, &fs->l2.netdev_uc[i], hlist) + hn->action = XSC_ACTION_DEL; + + xsc_sync_netdev_uc_addr(xdev, netdev, fs); + + xsc_apply_netdev_uc_addr(xdev, fs); + + for (i = 0; i < XSC_L2_ADDR_HASH_SIZE; i++) + hlist_for_each_entry_safe(hn, tmp, &fs->l2.netdev_mc[i], hlist) + hn->action = XSC_ACTION_DEL; + + xsc_sync_netdev_mc_addr(xdev, netdev, fs); + + xsc_apply_netdev_mc_addr(xdev, fs); +} + void xsc_set_rx_mode_work(struct work_struct *work) { int err = 0; @@ -130,4 +314,6 @@ void xsc_set_rx_mode_work(struct work_struct *work) l2->promisc_enabled = promisc_enabled; l2->allmulti_enabled = allmulti_enabled; + + xsc_handle_netdev_addr(adapter->xdev, dev, &adapter->fs); } diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c index 32eb74563e4be442ef9e2956118250c9b39e8406..34699f42398ef25e9c03596e02f305081b0933a0 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c @@ -14,6 +14,7 @@ #include "common/xsc_cmd.h" #include "xsc_eth.h" #include "xsc_eth_debug.h" +#include "xsc_hw_comm.h" static void precmd_rlimit_set(void *data, u32 mac_port) { @@ -103,97 +104,91 @@ static int xsc_dcbx_hw_common(struct xsc_core_device *xdev, u16 opcode, int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp) { + int ret = 0; + switch (opcode) { case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, sizeof(struct xsc_rate_limit_get), sizeof(struct xsc_rate_limit_get), NULL, postcmd_rlimit_get); - fallthrough; case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, sizeof(struct xsc_rate_limit_set), 0, precmd_rlimit_set, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_GET_PFC: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, sizeof(struct xsc_pfc_get), NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_SET_PFC: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, sizeof(struct xsc_pfc_set), sizeof(struct xsc_pfc_set), NULL, NULL); - fallthrough; + case XSC_CMD_OP_IOCTL_SET_PFC_NEW: + ret = xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set_new), + sizeof(struct xsc_pfc_set_new), + NULL, NULL); + break; case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, sizeof(struct xsc_trust_mode_get), NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, sizeof(struct xsc_trust_mode_set), 0, NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, sizeof(struct xsc_dscp_pmt_get), NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, sizeof(struct xsc_dscp_pmt_set), 0, NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_GET_SP: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, sizeof(struct xsc_sp_get), NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_SET_SP: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, sizeof(struct xsc_sp_set), 0, NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_GET_WEIGHT: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, sizeof(struct xsc_weight_get), NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_SET_WEIGHT: return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, sizeof(struct xsc_weight_set), 0, NULL, NULL); - fallthrough; case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: return xsc_dcbx_hw_common(xdev, opcode, req, rsp, sizeof(struct xsc_pfc_prio_stats_mbox_in), sizeof(struct xsc_pfc_prio_stats_mbox_out), NULL, NULL); - fallthrough; case XSC_CMD_OP_GET_LLDP_STATUS: case XSC_CMD_OP_SET_LLDP_STATUS: return xsc_dcbx_hw_common(xdev, opcode, req, rsp, sizeof(struct xsc_lldp_status_mbox_in), sizeof(struct xsc_lldp_status_mbox_out), NULL, NULL); - fallthrough; case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: - return xsc_dcbx_hw_common(xdev, opcode, req, rsp, - sizeof(struct xsc_pfc_set_drop_th_mbox_in), - sizeof(struct xsc_pfc_set_drop_th_mbox_out), - NULL, NULL); - fallthrough; + ret = xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set_drop_th_mbox_in), + sizeof(struct xsc_pfc_set_drop_th_mbox_out), + NULL, NULL); + break; case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: - return xsc_dcbx_hw_common(xdev, opcode, req, rsp, - sizeof(struct xsc_pfc_get_cfg_status_mbox_in), - sizeof(struct xsc_pfc_get_cfg_status_mbox_out), - NULL, NULL); - fallthrough; + ret = xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_get_cfg_status_mbox_in), + sizeof(struct xsc_pfc_get_cfg_status_mbox_out), + NULL, NULL); + break; default: xsc_core_dbg(xdev, "unknown type=%d\n", opcode); } - return 0; + return ret; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h index 4b46698ca5b5566f39c53d7dcad7f9c064576983..a9043f85fa057cd159e9877da8e861c985c3e5c7 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h @@ -7,4 +7,5 @@ #define XSC_HW_COMMON_H int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp); -#endif /* XSC_HW_COMMON_H */ + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h index 071eec670d38905229f16415ada9ffac2b54d5e1..c69f4c5add2ad4c6530ac5bba071a384d566fb5f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h @@ -7,7 +7,6 @@ #define XSC_QUEUE_H #include - #include #include "../pci/wq.h" @@ -65,6 +64,7 @@ struct xsc_rq_frags_info { u8 wqe_bulk; u8 wqe_bulk_min; u8 frags_max_num; + u8 page_order; }; #define xsc_dim_t struct dim @@ -133,7 +133,7 @@ struct xsc_rq; struct xsc_cqe; typedef void (*xsc_fp_handle_rx_cqe)(struct xsc_cqwq *cqwq, struct xsc_rq *rq, struct xsc_cqe *cqe); -typedef bool (*xsc_fp_post_rx_wqes)(struct xsc_rq *rq); +typedef bool (*xsc_fp_post_rx_wqes)(struct xsc_rq *rq, bool force); typedef void (*xsc_fp_dealloc_wqe)(struct xsc_rq *rq, u16 ix); typedef struct sk_buff * (*xsc_fp_skb_from_cqe)(struct xsc_rq *rq, struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); @@ -151,6 +151,8 @@ struct xsc_rq { struct { u16 headroom; u8 map_dir; /* dma map direction */ + u8 page_order; + } buff; struct page_pool *page_pool; @@ -264,11 +266,6 @@ struct xsc_wqe_ctrl_seg { u32 rsv : 30; }; -static inline u8 get_cqe_opcode(struct xsc_cqe *cqe) -{ - return cqe->msg_opcode; -} - static inline void xsc_dump_err_cqe(struct xsc_core_device *dev, struct xsc_cqe *cqe) { diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile index 45a7d473cac795fc063c5c39df596772455031dd..83c8793b5effc41d3cd3bb0e69d3bab2f35d6c4c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile @@ -4,13 +4,20 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc +ifeq ($(HAVE_TO_USE_M_IB_CORE), 1) + ccflags-y += $(xsc-ccflags) +endif + obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o xsc_pci-y := main.o eq.o intf.o debugfs.o alloc.o wq.o cq.o qp.o \ - cmd2.o fw.o port.o mr.o pd.o xsc_lag.o xsc_pci_ctrl.o\ + cmd2.o fw.o port.o mr.o pd.o xsc_lag.o xsc_pci_ctrl.o xsc_prgrmmbl_cc_ctrl.o\ pci_irq.o vport.o sriov.o sriov_sysfs.o devlink.o eswitch.o xsc_port_ctrl.o res_obj.o qpts.o\ fw/cmd.o \ fw/xsc_flow.o \ fw/xsc_res.o \ - fw/osdep.o \ - fw/xsc_mem.o + hal/xsc_hal.o \ + hal/andes_impl.o \ + hal/diamond_impl.o \ + hal/diamond_next_impl.o \ + tunnel_cmd.o \ No newline at end of file diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c index 84bf3d2ca5c93181ed47650e3099f56f505a3ca7..cdef1b996fdfb13b7ac07b2d62fa61f18df96f0a 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c @@ -153,7 +153,11 @@ static void *xsc_dma_zalloc_coherent_node(struct xsc_core_device *xdev, /* WA for kernels that don't use numa_mem_id in alloc_pages_node */ if (node == NUMA_NO_NODE) +#ifdef HAVE_NUMA_MEM_ID + node = numa_mem_id(); +#else node = first_memory_node; +#endif mutex_lock(&dev_res->alloc_mutex); original_node = dev_to_node(device); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c index 259c003a545cec6101f1410e4c3ded07804ed011..8111d31e029ebba56bfad91369192fd95ef37e88 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c @@ -17,7 +17,6 @@ #include #include "common/xsc_hsi.h" #include "common/xsc_core.h" -#include "tmp_cmdq_defines.h" enum { CMD_IF_REV = 3, @@ -390,6 +389,12 @@ const char *xsc_command_str(int command) case XSC_CMD_OP_MODIFY_FEC_PARAM: return "MODIFY_FEC_PARAM"; + case XSC_CMD_OP_MODIFY_NIC_VPORT_UC_MAC: + return "MODIFY_NIC_VPORT_UC_MAC"; + + case XSC_CMD_OP_MODIFY_NIC_VPORT_MC_MAC: + return "MODIFY_NIC_VPORT_MC_MAC"; + case XSC_CMD_OP_QUERY_FEC_PARAM: return "QUERY_FEC_PARAM"; @@ -447,15 +452,12 @@ const char *xsc_command_str(int command) case XSC_CMD_OP_IOCTL_SET_PFC: return "SET_PFC"; - case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: - return "SET_PFC_DROP_TH"; + case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: + return "GET_PFC_CFG_STATUS"; case XSC_CMD_OP_IOCTL_GET_PFC: return "GET_PFC"; - case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: - return "GET_PFC_CFG_STATUS"; - case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: return "SET_RATE_LIMIT"; @@ -630,9 +632,57 @@ const char *xsc_command_str(int command) case XSC_CMD_OP_QUERY_HW_STATS_ETH: return "QUERY_HW_STATS_ETH"; + case XSC_CMD_OP_QUERY_HW_PF_UC_STATS: + return "QUERY_HW_PF_UC_STATS"; + case XSC_CMD_OP_SET_VPORT_RATE_LIMIT: return "SET_VPORT_RATE_LIMIT"; + case XSC_CMD_OP_GET_LINK_SUB_STATE: + return "GET_LINK_SUB_STATE"; + + case XSC_CMD_OP_IOCTL_SET_ROCE_ACCL_NEXT: + return "SET_ROCE_ACCL_NEXT"; + + case XSC_CMD_OP_IOCTL_GET_ROCE_ACCL_NEXT: + return "GET_ROCE_ACCL_NEXT"; + + case XSC_CMD_OP_ENABLE_RELAXED_ORDER: + return "ENABLE_RELAXED_ORDER"; + + case XSC_CMD_OP_QUERY_GUID: + return "QUERY_GUID"; + + case XSC_CMD_OP_ACTIVATE_HW_CONFIG: + return "ACTIVATE_HW_CONFIG"; + + case XSC_CMD_OP_QUERY_READ_FLUSH: + return "QUERY_READ_FLUSH"; + + case XSC_CMD_OP_SEND_TUNNEL_CMD_REQ: + return "SEND_TUNNEL_CMD_REQ"; + + case XSC_CMD_OP_RECV_TUNNEL_CMD_REQ: + return "RECV_TUNNEL_CMD_REQ"; + + case XSC_CMD_OP_SEND_TUNNEL_CMD_RESP: + return "SEND_TUNNEL_CMD_RESP"; + + case XSC_CMD_OP_RECV_TUNNEL_CMD_RESP: + return "RECV_TUNNEL_CMD_RESP"; + + case XSC_CMD_OP_GET_IOCTL_INFO: + return "GET_IOCTL_INFO"; + + case XSC_CMD_OP_ANNOUNCE_DRIVER_INSTANCE: + return "ANNOUNCE_DRIVER_INSTANCE"; + + case XSC_CMD_OP_SYNC_MR_TO_FW: + return "SYNC_MR_TO_FW"; + + case XSC_CMD_OP_SYNC_MR_FROM_FW: + return "SYNC_MR_FROM_FW"; + default: return "unknown command opcode"; } } @@ -712,9 +762,10 @@ static void cmd_work_handler(struct work_struct *work) wmb(); cmd->cmd_pid = (cmd->cmd_pid + 1) % (1 << cmd->log_sz); - writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); + xsc_update_cmdq_req_pid(xdev, cmd->cmd_pid); mmiowb(); spin_unlock_irqrestore(&cmd->doorbell_lock, flags); + } static const char *deliv_status_to_str(u8 status) @@ -807,8 +858,11 @@ static int xsc_cmd_invoke(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, } err = wait_func(xdev, ent); - if (err == -ETIMEDOUT) + if (err == -ETIMEDOUT) { + xsc_core_err(xdev, "cmd(%s) timeout\n", xsc_command_str(msg_to_opcode(in))); goto out; + } + t1 = timespec64_to_ktime(ent->ts1); t2 = timespec64_to_ktime(ent->ts2); delta = ktime_sub(t2, t1); @@ -1425,9 +1479,8 @@ static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, /* ring doorbell after the descriptor is valid */ wmb(); - writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); - if (readl(REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)) != 0) - writel(0xF, REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)); + xsc_update_cmdq_req_pid(xdev, cmd->cmd_pid); + xsc_check_cmdq_status(xdev); mmiowb(); xsc_core_dbg(xdev, "write 0x%x to command doorbell, idx %u ~ %u\n", cmd->cmd_pid, @@ -1539,8 +1592,8 @@ static int request_pid_cid_mismatch_restore(struct xsc_core_device *xdev) int err; - req_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); - req_cid = readl(REG_ADDR(xdev, cmd->reg.req_cid_addr)); + req_pid = xsc_get_cmdq_req_pid(xdev); + req_cid = xsc_get_cmdq_req_cid(xdev); if (req_pid >= (1 << cmd->log_sz) || req_cid >= (1 << cmd->log_sz)) { xsc_core_err(xdev, "req_pid %d, req_cid %d, out of normal range!!! max value is %d\n", req_pid, req_cid, (1 << cmd->log_sz)); @@ -1573,8 +1626,10 @@ int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out u8 status = 0; struct xsc_cmd *cmd = &xdev->cmd; - if (cmd->cmd_status == XSC_CMD_STATUS_TIMEDOUT) + if (cmd->cmd_status == XSC_CMD_STATUS_TIMEDOUT) { + xsc_core_warn(xdev, "cmd queue is blocked, return directly\n"); return -ETIMEDOUT; + } inb = alloc_msg(xdev, in_size); if (IS_ERR(inb)) { @@ -1672,6 +1727,14 @@ static int create_msg_cache(struct xsc_core_device *xdev) return err; } +static inline void xsc_cmd_reset_err_handler_retry_cnt(struct xsc_core_device *xdev) +{ + if (xdev->cmd.retry_cnt) { + xdev->cmd.retry_cnt = 0; + xsc_core_info(xdev, "get the retry cmd response, so reset retry cnt.\n"); + } +} + static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, struct xsc_rsp_layout *rsp) { struct xsc_cmd *cmd = &xdev->cmd; @@ -1702,6 +1765,7 @@ static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, struct xs free_ent(cmd, ent->idx); complete(&ent->done); up(&cmd->sem); + xsc_cmd_reset_err_handler_retry_cnt(xdev); } static int cmd_cq_polling(void *data) @@ -1714,7 +1778,7 @@ static int cmd_cq_polling(void *data) while (!kthread_should_stop()) { if (need_resched()) schedule(); - cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + cq_pid = xsc_get_cmdq_rsp_pid(xdev); if (cmd->cq_cid == cq_pid) { mdelay(3); continue; @@ -1728,8 +1792,9 @@ static int cmd_cq_polling(void *data) } if (cmd->owner_bit != rsp->owner_bit) { //hw update cq doorbell but buf may not ready - xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", - cmd->cq_cid, cq_pid); + xsc_core_err_rl(xdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + msleep(20); continue; } @@ -1737,7 +1802,7 @@ static int cmd_cq_polling(void *data) cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); - writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + xsc_update_cmdq_rsp_cid(xdev, cmd->cq_cid); if (cmd->cq_cid == 0) cmd->owner_bit = !cmd->owner_bit; } @@ -1746,48 +1811,7 @@ static int cmd_cq_polling(void *data) int xsc_cmd_err_handler(struct xsc_core_device *xdev) { - union interrupt_stat { - struct { - u32 hw_read_req_err:1; - u32 hw_write_req_err:1; - u32 req_pid_err:1; - u32 rsp_cid_err:1; - }; - u32 raw; - } stat; - int err = 0; - int retry = 0; - - stat.raw = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); - while (stat.raw != 0) { - err++; - if (stat.hw_read_req_err) { - retry = 1; - stat.hw_read_req_err = 0; - xsc_core_err(xdev, "hw report read req from host failed!\n"); - } else if (stat.hw_write_req_err) { - retry = 1; - stat.hw_write_req_err = 0; - xsc_core_err(xdev, "hw report write req to fw failed!\n"); - } else if (stat.req_pid_err) { - stat.req_pid_err = 0; - xsc_core_err(xdev, "hw report unexpected req pid!\n"); - } else if (stat.rsp_cid_err) { - stat.rsp_cid_err = 0; - xsc_core_err(xdev, "hw report unexpected rsp cid!\n"); - } else { - stat.raw = 0; - xsc_core_err(xdev, "ignore unknown interrupt!\n"); - } - } - - if (retry) - writel(xdev->cmd.cmd_pid, REG_ADDR(xdev, xdev->cmd.reg.req_pid_addr)); - - if (err) - writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); - - return err; + return xsc_handle_cmdq_interrupt(xdev); } void xsc_cmd_resp_handler(struct xsc_core_device *xdev) @@ -1799,7 +1823,7 @@ void xsc_cmd_resp_handler(struct xsc_core_device *xdev) int count = 0; while (count < budget) { - cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + cq_pid = xsc_get_cmdq_rsp_pid(xdev); if (cq_pid == cmd->cq_cid) return; @@ -1817,7 +1841,7 @@ void xsc_cmd_resp_handler(struct xsc_core_device *xdev) xsc_cmd_comp_handler(xdev, rsp->idx, rsp); cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); - writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + xsc_update_cmdq_rsp_cid(xdev, cmd->cq_cid); if (cmd->cq_cid == 0) cmd->owner_bit = !cmd->owner_bit; @@ -1830,14 +1854,14 @@ static void xsc_cmd_handle_rsp_before_reload { u32 rsp_pid, rsp_cid; - rsp_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); - rsp_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + rsp_pid = xsc_get_cmdq_rsp_pid(xdev); + rsp_cid = xsc_get_cmdq_rsp_cid(xdev); if (rsp_pid == rsp_cid) return; cmd->cq_cid = rsp_pid; - writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + xsc_update_cmdq_rsp_cid(xdev, cmd->cq_cid); } int xsc_cmd_init(struct xsc_core_device *xdev) @@ -1846,40 +1870,9 @@ int xsc_cmd_init(struct xsc_core_device *xdev) int align = roundup_pow_of_two(size); struct xsc_cmd *cmd = &xdev->cmd; u32 cmd_h, cmd_l; - u32 err_stat; int err; int i; - //sriov need adapt for this process. - //now there is 544 cmdq resource, soc using from id 514 - if (xsc_core_is_pf(xdev)) { - cmd->reg.req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR; - cmd->reg.req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR; - cmd->reg.rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR; - cmd->reg.rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR; - cmd->reg.req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; - cmd->reg.req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; - cmd->reg.rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; - cmd->reg.rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; - cmd->reg.msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR; - cmd->reg.element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR; - cmd->reg.q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR; - cmd->reg.interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR; - } else { - cmd->reg.req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR; - cmd->reg.req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR; - cmd->reg.rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR; - cmd->reg.rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR; - cmd->reg.req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; - cmd->reg.req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; - cmd->reg.rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; - cmd->reg.rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; - cmd->reg.msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR; - cmd->reg.element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR; - cmd->reg.q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR; - cmd->reg.interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR; - } - cmd->pool = pci_pool_create("xsc_cmd", xdev->pdev, size, align, 0); if (!cmd->pool) return -ENOMEM; @@ -1909,8 +1902,8 @@ int xsc_cmd_init(struct xsc_core_device *xdev) goto err_map_cmd; } - cmd->cmd_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); - cmd->cq_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + cmd->cmd_pid = xsc_get_cmdq_req_pid(xdev); + cmd->cq_cid = xsc_get_cmdq_rsp_cid(xdev); cmd->ownerbit_learned = 0; xsc_cmd_handle_rsp_before_reload(cmd, xdev); @@ -1919,8 +1912,8 @@ int xsc_cmd_init(struct xsc_core_device *xdev) #define Q_DEPTH_LOG 5 //32 cmd->log_sz = Q_DEPTH_LOG; - cmd->log_stride = readl(REG_ADDR(xdev, cmd->reg.element_sz_addr)); - writel(1 << cmd->log_sz, REG_ADDR(xdev, cmd->reg.q_depth_addr)); + cmd->log_stride = xsc_get_cmdq_log_stride(xdev); + xsc_set_cmdq_depth(xdev, 1 << cmd->log_sz); if (cmd->log_stride != ELEMENT_SIZE_LOG) { dev_err(&xdev->pdev->dev, "firmware failed to init cmdq, log_stride=(%d, %d)\n", cmd->log_stride, ELEMENT_SIZE_LOG); @@ -1960,9 +1953,7 @@ int xsc_cmd_init(struct xsc_core_device *xdev) err = -ENOMEM; goto err_map; } - - writel(cmd_h, REG_ADDR(xdev, cmd->reg.req_buf_h_addr)); - writel(cmd_l, REG_ADDR(xdev, cmd->reg.req_buf_l_addr)); + xsc_set_cmdq_req_buf_addr(xdev, cmd_h, cmd_l); cmd_h = (u32)((u64)(cmd->cq_dma) >> 32); cmd_l = (u32)(cmd->cq_dma); @@ -1971,8 +1962,7 @@ int xsc_cmd_init(struct xsc_core_device *xdev) err = -ENOMEM; goto err_map; } - writel(cmd_h, REG_ADDR(xdev, cmd->reg.rsp_buf_h_addr)); - writel(cmd_l, REG_ADDR(xdev, cmd->reg.rsp_buf_l_addr)); + xsc_set_cmdq_rsp_buf_addr(xdev, cmd_h, cmd_l); /* Make sure firmware sees the complete address before we proceed */ wmb(); @@ -2018,11 +2008,7 @@ int xsc_cmd_init(struct xsc_core_device *xdev) } // clear abnormal state to avoid the impact of previous error - err_stat = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); - if (err_stat) { - xsc_core_warn(xdev, "err_stat 0x%x when initializing, clear it\n", err_stat); - writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); - } + xsc_check_cmdq_status(xdev); return 0; @@ -2075,71 +2061,55 @@ void xsc_cmd_cleanup(struct xsc_core_device *xdev) } EXPORT_SYMBOL(xsc_cmd_cleanup); -static const char *cmd_status_str(u8 status) -{ - switch (status) { - case XSC_CMD_STAT_OK: - return "OK"; - case XSC_CMD_STAT_INT_ERR: - return "internal error"; - case XSC_CMD_STAT_BAD_OP_ERR: - return "bad operation"; - case XSC_CMD_STAT_BAD_PARAM_ERR: - return "bad parameter"; - case XSC_CMD_STAT_BAD_SYS_STATE_ERR: - return "bad system state"; - case XSC_CMD_STAT_BAD_RES_ERR: - return "bad resource"; - case XSC_CMD_STAT_RES_BUSY: - return "resource busy"; - case XSC_CMD_STAT_LIM_ERR: - return "limits exceeded"; - case XSC_CMD_STAT_BAD_RES_STATE_ERR: - return "bad resource state"; - case XSC_CMD_STAT_IX_ERR: - return "bad index"; - case XSC_CMD_STAT_NO_RES_ERR: - return "no resources"; - case XSC_CMD_STAT_BAD_INP_LEN_ERR: - return "bad input length"; - case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: - return "bad output length"; - case XSC_CMD_STAT_BAD_QP_STATE_ERR: - return "bad QP state"; - case XSC_CMD_STAT_BAD_PKT_ERR: - return "bad packet (discarded)"; - case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: - return "bad size too many outstanding CQEs"; - default: - return "unknown status"; - } -} +static const struct xsc_cmd_status_code_map cmd_status_code_map[XSC_CMD_STATUS_CODE_COUNT] = { + /* common status code */ + [XSC_CMD_STATUS_OK] = { 0, "OK" }, + [XSC_CMD_STATUS_FAIL] = { -EIO, "operation failed" }, + [XSC_CMD_STATUS_NOT_SUPPORTED] = { -EOPNOTSUPP, "operation not supported" }, + [XSC_CMD_STATUS_BAD_PARAM] = { -EINVAL, "bad parameter" }, + [XSC_CMD_STATUS_INVAL_RES] = { -EIO, "invalid resource" }, + [XSC_CMD_STATUS_BUSY] = { -EBUSY, "operation busy" }, + [XSC_CMD_STATUS_PENDING] = { -EIO, "operation pending" }, + [XSC_CMD_STATUS_INVAL_DATA] = { -EIO, "invalid data" }, + [XSC_CMD_STATUS_NOT_FOUND] = { -ENODEV, "function or resource not found" }, + [XSC_CMD_STATUS_NO_RES] = { -EIO, "out of resources" }, + + /* extended status code */ + [XSC_CMD_STATUS_INVAL_FUNC] = { -ENODEV, "invalid function" }, + [XSC_CMD_STATUS_NO_MPT_RES] = { -EIO, "no MPT resources" }, + [XSC_CMD_STATUS_NO_MTT_RES] = { -EIO, "no MTT resources" }, + [XSC_CMD_STATUS_NO_EQN_RES] = { -EIO, "no EQN resource" }, + [XSC_CMD_STATUS_NO_EQ_PA_RES] = { -EIO, "no EQ PA resource" }, + [XSC_CMD_STATUS_NO_CQN_RES] = { -EIO, "no CQN resource" }, + [XSC_CMD_STATUS_NO_CQ_PA_RES] = { -EIO, "no CQ PA resource" }, + [XSC_CMD_STATUS_NO_QPN_RES] = { -EIO, "no QPN resource" }, + [XSC_CMD_STATUS_NO_QP_PA_RES] = { -EIO, "no QP PA resource" }, + [XSC_CMD_STATUS_NO_PDN_RES] = { -EIO, "no PDN resource" }, + [XSC_CMD_STATUS_QP_FLUSH_BUSY] = { -EBUSY, "QP flushing busy" }, + [XSC_CMD_STATUS_QP_FLUSH_PENDING] = { -EIO, "QP flushing pending" }, + + /* Cmdq prototol status code */ + [XSC_CMD_STATUS_BAD_INBUF] = { -EIO, "bad input buffer size" }, + [XSC_CMD_STATUS_BAD_OUTBUF] = { -EIO, "bad output buffer size" }, + [XSC_CMD_STATUS_INVAL_OPCODE] = { -EOPNOTSUPP, "invalid operation code" }, +}; int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr) { + const struct xsc_cmd_status_code_map *map; + if (!hdr->status) return 0; - pr_warn("command failed, status %s(0x%x)\n", - cmd_status_str(hdr->status), hdr->status); - - switch (hdr->status) { - case XSC_CMD_STAT_OK: return 0; - case XSC_CMD_STAT_INT_ERR: return -EIO; - case XSC_CMD_STAT_BAD_OP_ERR: return -EOPNOTSUPP; - case XSC_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; - case XSC_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; - case XSC_CMD_STAT_BAD_RES_ERR: return -EINVAL; - case XSC_CMD_STAT_RES_BUSY: return -EBUSY; - case XSC_CMD_STAT_LIM_ERR: return -EINVAL; - case XSC_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; - case XSC_CMD_STAT_IX_ERR: return -EINVAL; - case XSC_CMD_STAT_NO_RES_ERR: return -EAGAIN; - case XSC_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; - case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; - case XSC_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; - case XSC_CMD_STAT_BAD_PKT_ERR: return -EINVAL; - case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; - default: return -EIO; - } + map = &cmd_status_code_map[hdr->status]; + + if (hdr->status != XSC_CMD_STATUS_NOT_SUPPORTED) + pr_warn("command failed, status %s(0x%x)\n", + map->str ? map->str : "unknown", hdr->status); + + if (map->str) + return map->errno; + + return -EIO; } +EXPORT_SYMBOL(xsc_cmd_status_to_err); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c index 49a00f759b5fdecf9ac82c70c64f92d18081b6b4..47382aea8f403cd240682f7bde82ced2a7fafc9f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c @@ -33,33 +33,142 @@ void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type) complete(&cq->free); } +static int xsc_set_cq_context(struct xsc_core_device *dev, struct xsc_cq_context_ex *ctx_ex, + u32 *pa_list_base, u32 *cqn) +{ + struct xsc_set_cq_context_mbox_in in; + struct xsc_set_cq_context_mbox_out out; + int ret = 0; + + memset(&in, 0, sizeof(in)); + memcpy(&in.ctx_ex, ctx_ex, sizeof(*ctx_ex)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_CQ_CONTEXT); + memset(&out, 0, sizeof(out)); + ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to set cq context\n"); + return -1; + } + + *pa_list_base = be32_to_cpu(out.cq_pa_list_base); + *cqn = be32_to_cpu(out.cqn); + return ret; +} + +static int xsc_set_cq_buf_pa(struct xsc_core_device *dev, struct xsc_create_cq_ex_mbox_in *req, + u32 pa_list_base, u32 cqn) +{ + struct xsc_set_cq_buf_pa_mbox_in *in; + struct xsc_set_cq_buf_pa_mbox_out out; + u16 pa_num_total = be16_to_cpu(req->ctx_ex.ctx.pa_num); + u16 pa_num_for_each_max = (dev->caps.max_cmd_in_len - sizeof(*in)) / sizeof(__be64); + u16 pa_num_left = pa_num_total; + u16 pa_num = 0; + u32 copy_len = 0; + int ret = 0; + int in_len = 0; + + while (pa_num_left) { + pa_num = min(pa_num_for_each_max, pa_num_left); + copy_len = pa_num * sizeof(__be64); + in_len = sizeof(*in) + copy_len; + in = kvzalloc(in_len, GFP_KERNEL); + if (!in) + return -ENOMEM; + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_CQ_BUF_PA); + in->pa_list_start = cpu_to_be32(pa_list_base); + in->pa_num = cpu_to_be32(pa_num); + memcpy(in->pas, &req->pas[pa_num_total - pa_num_left], copy_len); + pa_num_left -= pa_num; + pa_list_base += pa_num; + memset(&out, 0, sizeof(out)); + + ret = xsc_cmd_exec(dev, in, in_len, &out, sizeof(out)); + kvfree(in); + + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to set cq buf pa, cqn = %d\n", cqn); + return -EINVAL; + } + } + + return 0; +} + +int xsc_create_cq_compat_handler(struct xsc_core_device *dev, struct xsc_create_cq_ex_mbox_in *in, + struct xsc_create_cq_mbox_out *out) +{ + struct xsc_create_cq_mbox_in *_in; + int _inlen = sizeof(*_in) + + be16_to_cpu(in->ctx_ex.ctx.pa_num) * sizeof(__be64); + int err = 0; + + _in = kvzalloc(_inlen, GFP_KERNEL); + if (!_in) + return -ENOMEM; + + _in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + memcpy(&_in->ctx, &in->ctx_ex.ctx, sizeof(_in->ctx)); + memcpy(&_in->pas, &in->pas, _inlen - sizeof(*_in)); + memset(out, 0, sizeof(*out)); + err = xsc_cmd_exec(dev, _in, _inlen, out, sizeof(*out)); + kvfree(_in); + + if (err) + return err; + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_create_cq_compat_handler); + int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, - struct xsc_create_cq_mbox_in *in, int inlen) + struct xsc_create_cq_ex_mbox_in *in, int inlen) { int err; struct xsc_cq_table *table = &dev->dev_res->cq_table; struct xsc_create_cq_mbox_out out; struct xsc_destroy_cq_mbox_in din; struct xsc_destroy_cq_mbox_out dout; + u32 pa_list_base = 0; + u32 cqn = 0; - in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); - memset(&out, 0, sizeof(out)); - err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) - return err; + if (inlen < dev->caps.max_cmd_in_len) { + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ_EX); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; - if (out.hdr.status) - return xsc_cmd_status_to_err(&out.hdr); + if (out.hdr.status && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED) + return xsc_cmd_status_to_err(&out.hdr); + + if (out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + err = xsc_create_cq_compat_handler(dev, in, &out); + if (err) + return err; + } - cq->cqn = be32_to_cpu(out.cqn); + cqn = be32_to_cpu(out.cqn); + } else { + err = xsc_set_cq_context(dev, &in->ctx_ex, &pa_list_base, &cqn); + if (err) + return err; + + err = xsc_set_cq_buf_pa(dev, in, pa_list_base, cqn); + if (err) + goto err_cmd; + } + + cq->cqn = cqn; cq->cons_index = 0; cq->arm_sn = 0; - cq->arm_db = dev->regs.complete_db; - cq->ci_db = dev->regs.complete_reg; cq->dev = dev; atomic_set(&cq->refcount, 1); init_completion(&cq->free); + xsc_arm_cq(dev, cq->cqn, 0, 0); + spin_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, cq->cqn, cq); spin_unlock_irq(&table->lock); @@ -77,6 +186,7 @@ int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, memset(&din, 0, sizeof(din)); memset(&dout, 0, sizeof(dout)); din.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + din.cqn = cpu_to_be32(cqn); xsc_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); return err; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c index 5ea8d8a29107272059704a9041130f17e647818c..c5093625d3843468279f58ef8c200784fff8475d 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c @@ -62,147 +62,15 @@ static char *cq_fields[] = { struct dentry *xsc_debugfs_root; EXPORT_SYMBOL(xsc_debugfs_root); -static ssize_t xsc_debugfs_reg_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - char *buf; - int len; - char xsc_debugfs_reg_buf[256] = ""; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - "xsc debugfs", - xsc_debugfs_reg_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - - return len; -} - -static ssize_t xsc_debugfs_reg_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct xsc_core_device *xdev = filp->private_data; - u64 reg; - int cnt, len; - int num; - int offset; - char xsc_debugfs_reg_buf[256] = ""; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - - if (count >= sizeof(xsc_debugfs_reg_buf)) - return -ENOSPC; - - len = simple_write_to_buffer(xsc_debugfs_reg_buf, - sizeof(xsc_debugfs_reg_buf) - 1, - ppos, buffer, count); - if (len < 0) - return len; - - xsc_debugfs_reg_buf[len] = '\0'; - - if (strncmp(xsc_debugfs_reg_buf, "write", 5) == 0) { - cnt = sscanf(&xsc_debugfs_reg_buf[5], "%llx %n", - ®, &offset); - if (cnt == 1) { - int tmp; - int value; - int buf[8]; - int *ptr; - - offset += 5; - num = 0; - while (1) { - cnt = sscanf(&xsc_debugfs_reg_buf[offset], "%x %n", &value, &tmp); - if (cnt < 2) - break; - xsc_core_info(xdev, "write: 0x%llx = 0x%x\n", - (reg + sizeof(int) * num), value); - offset += tmp; - buf[num++] = value; - if (num == 8) - break; - } - if (num > 1) { - ptr = &buf[0]; - IA_WRITE(xdev, reg, ptr, num); - } else if (num == 1) { - REG_WR32(xdev, reg, buf[0]); - } - } else { - xsc_core_err(xdev, "write \n"); - } - } else if (strncmp(xsc_debugfs_reg_buf, "read", 4) == 0) { - cnt = sscanf(&xsc_debugfs_reg_buf[4], "%llx %d %n", ®, &num, &offset); - if (cnt == 2) { - int *buf; - int i; - int *ptr; - - buf = kcalloc(num, sizeof(int), GFP_KERNEL); - if (!buf) - return -ENOMEM; - ptr = buf; - IA_READ(xdev, reg, ptr, num); - xsc_core_info(xdev, "read: 0x%llx num:%d\n", reg, num); - for (i = 0; i < num; i++) - xsc_core_info(xdev, "read:0x%llx = %#x\n", - (reg + sizeof(int) * i), buf[i]); - } else if (cnt == 1) { - int value = REG_RD32(xdev, reg); - - xsc_core_info(xdev, "read: 0x%llx = %#x\n", reg, value); - } else { - xsc_core_err(xdev, "read \n"); - } - } else { - xsc_core_err(xdev, "Unknown command %s\n", xsc_debugfs_reg_buf); - xsc_core_err(xdev, "Available commands:\n"); - xsc_core_err(xdev, "read \n"); - xsc_core_err(xdev, "write \n"); - } - return count; -} - -static const struct file_operations xsc_debugfs_reg_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = xsc_debugfs_reg_read, - .write = xsc_debugfs_reg_write, -}; - int xsc_debugfs_init(struct xsc_core_device *dev) { const char *name = pci_name(dev->pdev); - struct dentry *pfile; if (!xsc_debugfs_root) return -ENOMEM; dev->dev_res->dbg_root = debugfs_create_dir(name, xsc_debugfs_root); - if (dev->dev_res->dbg_root) { - pfile = debugfs_create_file("reg_ops", 0600, - dev->dev_res->dbg_root, dev, - &xsc_debugfs_reg_fops); - if (!pfile) - xsc_core_err(dev, "failed to create debugfs ops for %s\n", name); - } else { + if (!dev->dev_res->dbg_root) { xsc_core_err(dev, "failed to create debugfs dir for %s\n", name); return -ENOMEM; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h index c0b394d99a9374d7171bb0883f661c77934bb6d6..287e8019e73f0b64d05da5faf131d1ebdd91b1bb 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h @@ -12,4 +12,5 @@ struct devlink *xsc_devlink_alloc(void); void xsc_devlink_free(struct devlink *devlink); int xsc_devlink_register(struct devlink *devlink, struct device *dev); void xsc_devlink_unregister(struct devlink *devlink); + #endif /* XSC_DEVLINK_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c index 1ce0123fcdd2ee66582bdf557a6a97dcc15b8b7a..3457fd931fb1d372070991fee7947c35ff61e8e1 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c @@ -64,20 +64,7 @@ static struct xsc_eqe *next_eqe_sw(struct xsc_eq *eq) return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; } -static void eq_update_ci(struct xsc_eq *eq, int arm) -{ - union xsc_eq_doorbell db; - - db.val = 0; - db.arm = !!arm; - db.eq_next_cid = eq->cons_index; - db.eq_id = eq->eqn; - writel(db.val, REG_ADDR(eq->dev, eq->doorbell)); - /* We still want ordering, just not swabbing, so add a barrier */ - mb(); -} - -void xsc_cq_completion(struct xsc_core_device *dev, u32 cqn) +static void xsc_cq_completion(struct xsc_core_device *dev, u32 cqn) { struct xsc_core_cq *cq; struct xsc_cq_table *table = &dev->dev_res->cq_table; @@ -104,7 +91,7 @@ void xsc_cq_completion(struct xsc_core_device *dev, u32 cqn) complete(&cq->free); } -void xsc_eq_cq_event(struct xsc_core_device *dev, u32 cqn, int event_type) +static void xsc_eq_cq_event(struct xsc_core_device *dev, u32 cqn, int event_type) { struct xsc_core_cq *cq; struct xsc_cq_table *table = &dev->dev_res->cq_table; @@ -132,6 +119,7 @@ static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) int eqes_found = 0; int set_ci = 0; u32 cqn, qpn, queue_id; + int eq_db_arm = 1; while ((eqe = next_eqe_sw(eq))) { /* Make sure we read EQ entry contents after we've @@ -177,12 +165,13 @@ static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) if (unlikely(set_ci >= XSC_NUM_SPARE_EQE)) { xsc_core_dbg(dev, "EQ%d eq_num=%d qpn=%d, db_noarm\n", eq->eqn, set_ci, eqe->queue_id); - eq_update_ci(eq, 0); + xsc_update_eq_ci(eq->dev, eq->eqn, eq->cons_index, 0); set_ci = 0; } } - eq_update_ci(eq, 1); + xsc_update_eq_ci(eq->dev, eq->eqn, eq->cons_index, eq_db_arm); + return eqes_found; } @@ -191,7 +180,6 @@ static irqreturn_t xsc_msix_handler(int irq, void *eq_ptr) { struct xsc_eq *eq = eq_ptr; struct xsc_core_device *dev = eq->dev; - xsc_eq_int(dev, eq); /* MSI-X vectors always belong to us */ @@ -244,6 +232,7 @@ int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, in->ctx.pa_num = cpu_to_be16(hw_npages); in->ctx.glb_func_id = cpu_to_be16(dev->glb_func_id); in->ctx.is_async_eq = (vecidx == XSC_EQ_VEC_ASYNC ? 1 : 0); + in->ctx.page_shift = PAGE_SHIFT; err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) @@ -260,7 +249,6 @@ int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, eq->eqn = be32_to_cpu(out.eqn); eq->irqn = pci_irq_vector(dev->pdev, vecidx); eq->dev = dev; - eq->doorbell = dev->regs.event_db; eq->index = vecidx; xsc_core_dbg(dev, "msix%d request vector%d eq%d irq%d\n", vecidx, msix_vec_offset, eq->eqn, eq->irqn); @@ -272,7 +260,7 @@ int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, /* EQs are created in ARMED state */ - eq_update_ci(eq, 1); + xsc_update_eq_ci(eq->dev, eq->eqn, eq->cons_index, 1); xsc_vfree(in); return 0; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c index 7c0c8c10abd5e91bc551a97e5cffba2e4fb62f56..37afa4310d855cc7c17903baf974731cb11fe3a3 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c @@ -163,9 +163,9 @@ static void esw_vport_change_handler(struct work_struct *work) mutex_unlock(&esw->state_lock); } -void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, - struct xsc_vport *vport, - enum xsc_eswitch_vport_event enabled_events) +static void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, + struct xsc_vport *vport, + enum xsc_eswitch_vport_event enabled_events) { mutex_lock(&esw->state_lock); if (vport->enabled) @@ -184,26 +184,8 @@ void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, mutex_unlock(&esw->state_lock); } -void xsc_eswitch_disable_vport(struct xsc_eswitch *esw, - struct xsc_vport *vport) -{ - u16 vport_num = vport->vport; - - mutex_lock(&esw->state_lock); - if (!vport->enabled) - goto done; - - xsc_core_dbg(esw->dev, "Disabling vport(%d)\n", vport_num); - /* Mark this vport as disabled to discard new events */ - vport->enabled = false; - vport->enabled_events = 0; - esw->enabled_vports--; -done: - mutex_unlock(&esw->state_lock); -} - -void xsc_eswitch_enable_pf_vf_vports(struct xsc_eswitch *esw, - enum xsc_eswitch_vport_event enabled_events) +static void xsc_eswitch_enable_pf_vf_vports(struct xsc_eswitch *esw, + enum xsc_eswitch_vport_event enabled_events) { struct xsc_vport *vport; int i; @@ -220,7 +202,7 @@ void xsc_eswitch_enable_pf_vf_vports(struct xsc_eswitch *esw, XSC_VPORT_PROMISC_CHANGE | \ XSC_VPORT_VLAN_CHANGE) -int esw_legacy_enable(struct xsc_eswitch *esw) +static int esw_legacy_enable(struct xsc_eswitch *esw) { struct xsc_vport *vport; unsigned long i; @@ -323,12 +305,13 @@ int xsc_eswitch_init(struct xsc_core_device *dev) esw->first_host_vport = xsc_eswitch_first_host_vport_num(dev); esw->work_queue = create_singlethread_workqueue("xsc_esw_wq"); if (!esw->work_queue) { + xsc_core_err(dev, "failed to create eswitch work queue\n"); err = -ENOMEM; goto abort; } - esw->vports = kcalloc(total_vports, sizeof(struct xsc_vport), - GFP_KERNEL); + esw->vports = xsc_vzalloc(total_vports * sizeof(struct xsc_vport)); if (!esw->vports) { + xsc_core_err(dev, "failed to alloc mem for eswitch vports\n"); err = -ENOMEM; goto abort; } @@ -356,9 +339,9 @@ int xsc_eswitch_init(struct xsc_core_device *dev) abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); - kfree(esw->vports); + xsc_vfree(esw->vports); kfree(esw); - return 0; + return err; } void xsc_eswitch_cleanup(struct xsc_core_device *dev) @@ -369,16 +352,34 @@ void xsc_eswitch_cleanup(struct xsc_core_device *dev) xsc_core_dbg(dev, "cleanup\n"); destroy_workqueue(dev->priv.eswitch->work_queue); - kfree(dev->priv.eswitch->vports); + xsc_vfree(dev->priv.eswitch->vports); kfree(dev->priv.eswitch); } +#ifdef XSC_ESW_GUID_ENABLE +static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) +{ + ((u8 *)node_guid)[7] = mac[0]; + ((u8 *)node_guid)[6] = mac[1]; + ((u8 *)node_guid)[5] = mac[2]; + ((u8 *)node_guid)[4] = 0xff; + ((u8 *)node_guid)[3] = 0xfe; + ((u8 *)node_guid)[2] = mac[3]; + ((u8 *)node_guid)[1] = mac[4]; + ((u8 *)node_guid)[0] = mac[5]; +} +#endif + int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, u16 vport, u8 mac[ETH_ALEN]) { struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); int err = 0; +#ifdef XSC_ESW_GUID_ENABLE + u64 node_guid; +#endif + if (IS_ERR(evport)) return PTR_ERR(evport); @@ -402,6 +403,21 @@ int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, ether_addr_copy(evport->info.mac, mac); +#ifdef XSC_ESW_GUID_ENABLE + node_guid_gen_from_mac(&node_guid, mac); + err = xsc_modify_other_nic_vport_node_guid(esw->dev, vport, node_guid); + if (err) + xsc_core_err(esw->dev, + "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", + vport, err); + evport->info.node_guid = node_guid; +#endif + +#ifdef XSC_ESW_FDB_ENABLE + if (evport->enabled && esw->mode == XSC_ESWITCH_LEGACY) + err = esw_vport_ingress_config(esw, evport); +#endif + unlock: mutex_unlock(&esw->state_lock); return err; @@ -422,8 +438,8 @@ int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, return 0; } -int __xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, u16 vlan, - u8 qos, __be16 proto, u8 set_flags) +static int __xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, u16 vlan, + u8 qos, __be16 proto, u8 set_flags) { struct xsc_modify_nic_vport_context_in *in; int err, in_sz; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h index 2fee1d7feb4fc524b02de8b5cf2bfa33ddd892b1..536ec8f7f01063f645c20c7466e8e210e22d632c 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h @@ -148,6 +148,12 @@ static inline bool xsc_host_is_dpu_mode(struct xsc_core_device *dev) dev->pdev->device == XSC_MV_HOST_PF_DEV_ID); } +static inline bool xsc_is_soc_pf(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MF_SOC_PF_DEV_ID || + dev->pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + static inline bool xsc_pf_vf_is_dpu_mode(struct xsc_core_device *dev) { return (dev->pdev->device == XSC_MF_HOST_PF_DEV_ID || @@ -161,7 +167,7 @@ static inline bool xsc_get_pp_bypass_res(struct xsc_core_device *dev, bool esw_s return esw_set || xsc_pf_vf_is_dpu_mode(dev); } -static inline bool xsc_get_pct_drop_config(struct xsc_core_device *dev) +static inline bool xsc_dev_is_pf(struct xsc_core_device *dev) { return (dev->pdev->device == XSC_MC_PF_DEV_ID) || (dev->pdev->device == XSC_MF_SOC_PF_DEV_ID) || @@ -169,4 +175,22 @@ static inline bool xsc_get_pct_drop_config(struct xsc_core_device *dev) (dev->pdev->device == XSC_MV_SOC_PF_DEV_ID); } +static inline bool xsc_get_pf_isolate_config(struct xsc_core_device *dev, bool up) +{ + struct net_device *netdev = dev->netdev; + bool is_not_slave = up ? (!(netdev->flags & IFF_SLAVE)) : + (!(netdev->priv_flags & IFF_BONDING)); + + return xsc_dev_is_pf(dev) && is_not_slave; +} + +static inline bool xsc_get_mac_drop_config(struct xsc_core_device *dev, bool up) +{ + struct net_device *netdev = dev->netdev; + bool is_not_slave = up ? (!(netdev->flags & IFF_SLAVE)) : + (!(netdev->priv_flags & IFF_BONDING)); + + return xsc_dev_is_pf(dev) && is_not_slave; +} + #endif /* ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c index efe45d9ee1cfe11680fc4158f046990c9e6f47d1..d2a84eaa8a8471ae5ab9a9ce481f0e07cb785b7e 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c @@ -37,6 +37,8 @@ static struct xsc_board_info *xsc_alloc_board_info(void) return NULL; memset(board_info[i], 0, sizeof(*board_info[i])); board_info[i]->board_id = i; + rwlock_init(&board_info[i]->mr_sync_lock); + INIT_LIST_HEAD(&board_info[i]->func_list); return board_info[i]; } @@ -63,6 +65,7 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HCA_CAP); + in.hdr.ver = cpu_to_be16(CMD_QUERY_HCA_CAP_V2); in.cpu_num = cpu_to_be16(num_online_cpus()); err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); @@ -75,14 +78,66 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, } dev->glb_func_id = be32_to_cpu(out->hca_cap.glb_func_id); - caps->pf0_vf_funcid_base = be16_to_cpu(out->hca_cap.pf0_vf_funcid_base); - caps->pf0_vf_funcid_top = be16_to_cpu(out->hca_cap.pf0_vf_funcid_top); - caps->pf1_vf_funcid_base = be16_to_cpu(out->hca_cap.pf1_vf_funcid_base); - caps->pf1_vf_funcid_top = be16_to_cpu(out->hca_cap.pf1_vf_funcid_top); - caps->pcie0_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_base); - caps->pcie0_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_top); - caps->pcie1_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_base); - caps->pcie1_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_top); + if (be16_to_cpu(out->hdr.ver) >= CMD_QUERY_HCA_CAP_V1) { + caps->max_mr_size = be64_to_cpu(out->hca_cap.max_mr_size); + caps->max_cmd_in_len = be16_to_cpu(out->hca_cap.max_cmd_in_len); + caps->max_cmd_out_len = be16_to_cpu(out->hca_cap.max_cmd_out_len); + } else { + caps->max_mr_size = (1ull << 32) - 1; + caps->max_cmd_in_len = 10240; + caps->max_cmd_out_len = 2048; + } + if (be16_to_cpu(out->hdr.ver) >= CMD_QUERY_HCA_CAP_V2) { + caps->max_qp = be32_to_cpu(out->hca_cap.max_qp); + caps->max_cq = be32_to_cpu(out->hca_cap.max_cq); + caps->max_pd = be32_to_cpu(out->hca_cap.max_pd); + caps->max_mtt = be32_to_cpu(out->hca_cap.max_mtt); + } else { + caps->max_qp = 1 << (out->hca_cap.log_max_qp & 0x1f); + caps->max_cq = 1 << (out->hca_cap.log_max_cq & 0x1f); + caps->max_pd = 1 << (out->hca_cap.log_max_pd & 0x1f); + caps->max_mtt = 1 << (out->hca_cap.log_max_mtt); + } + + if (be16_to_cpu(out->hdr.ver) >= CMD_QUERY_HCA_CAP_V3) { + caps->mpt_tbl_addr = be32_to_cpu(out->hca_cap.mpt_tbl_addr); + caps->mpt_tbl_depth = be32_to_cpu(out->hca_cap.mpt_tbl_depth); + caps->mpt_tbl_width = be32_to_cpu(out->hca_cap.mpt_tbl_width); + caps->mtt_inst_base_addr = be32_to_cpu(out->hca_cap.mtt_inst_base_addr); + caps->mtt_inst_stride = be32_to_cpu(out->hca_cap.mtt_inst_stride); + caps->mtt_inst_num_log = be32_to_cpu(out->hca_cap.mtt_inst_num_log); + caps->mtt_inst_depth = be32_to_cpu(out->hca_cap.mtt_inst_depth); + xsc_set_mtt_info(dev); + } + if (be16_to_cpu(out->hdr.ver) >= CMD_QUERY_HCA_CAP_V4) { + dev->pcie_no = out->hca_cap.pcie_no; + dev->pf_id = out->hca_cap.pf_id; + dev->vf_id = be16_to_cpu(out->hca_cap.vf_id); + dev->pcie_host_num = out->hca_cap.pcie_host_num; + dev->pf_num_per_pcie = out->hca_cap.pf_num_per_pcie; + caps->pf0_vf_funcid_base = + be16_to_cpu(out->hca_cap.vf_funcid_base[dev->pcie_no][0]); + caps->pf0_vf_funcid_top = be16_to_cpu(out->hca_cap.vf_funcid_top[dev->pcie_no][0]); + caps->pf1_vf_funcid_base = + be16_to_cpu(out->hca_cap.vf_funcid_base[dev->pcie_no][1]); + caps->pf1_vf_funcid_top = be16_to_cpu(out->hca_cap.vf_funcid_top[dev->pcie_no][1]); + caps->pcie0_pf_funcid_base = be16_to_cpu(out->hca_cap.pf_funcid_base[0]); + caps->pcie0_pf_funcid_top = be16_to_cpu(out->hca_cap.pf_funcid_top[0]); + caps->pcie1_pf_funcid_base = be16_to_cpu(out->hca_cap.pf_funcid_base[1]); + caps->pcie1_pf_funcid_top = be16_to_cpu(out->hca_cap.pf_funcid_top[1]); + } else { + caps->pf0_vf_funcid_base = be16_to_cpu(out->hca_cap.pf0_vf_funcid_base); + caps->pf0_vf_funcid_top = be16_to_cpu(out->hca_cap.pf0_vf_funcid_top); + caps->pf1_vf_funcid_base = be16_to_cpu(out->hca_cap.pf1_vf_funcid_base); + caps->pf1_vf_funcid_top = be16_to_cpu(out->hca_cap.pf1_vf_funcid_top); + caps->pcie0_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_base); + caps->pcie0_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_top); + caps->pcie1_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_base); + caps->pcie1_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_top); + + funcid_to_pf_vf_index(&dev->caps, dev->glb_func_id, &dev->pcie_no, + &dev->pf_id, &dev->vf_id); + } caps->funcid_to_logic_port = be16_to_cpu(out->hca_cap.funcid_to_logic_port); if (xsc_core_is_pf(dev)) { xsc_core_dbg(dev, "pf0_vf_range(%4u, %4u), pf1_vf_range(%4u, %4u)\n", @@ -108,7 +163,6 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; caps->num_ports = out->hca_cap.num_ports & 0xf; - caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; caps->log_max_msix = out->hca_cap.log_max_msix & 0xf; caps->mac_port = out->hca_cap.mac_port & 0xff; @@ -131,13 +185,10 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, caps->log_max_current_uc_list = 0; caps->log_max_current_mc_list = 0; caps->log_max_vlan_list = 8; - caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; - caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; caps->log_max_mcg = out->hca_cap.log_max_mcg; - caps->log_max_mtt = out->hca_cap.log_max_mtt; caps->log_max_tso = out->hca_cap.log_max_tso; caps->hca_core_clock = be32_to_cpu(out->hca_cap.hca_core_clock); caps->max_rwq_indirection_tables = @@ -182,6 +233,7 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, dev->chip_ver_l = be32_to_cpu(out->hca_cap.chip_ver_l); dev->hotfix_num = be32_to_cpu(out->hca_cap.hotfix_num); dev->feature_flag = be32_to_cpu(out->hca_cap.feature_flag); + dev->reg_mr_via_cmdq = out->hca_cap.reg_mr_via_cmdq; board_info = xsc_get_board_info(out->hca_cap.board_sn); if (!board_info) { @@ -193,20 +245,19 @@ int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, } dev->board_info = board_info; - if (xsc_core_is_pf(dev)) { - dev->regs.tx_db = be64_to_cpu(out->hca_cap.tx_db); - dev->regs.rx_db = be64_to_cpu(out->hca_cap.rx_db); - dev->regs.complete_db = be64_to_cpu(out->hca_cap.complete_db); - dev->regs.complete_reg = be64_to_cpu(out->hca_cap.complete_reg); - dev->regs.event_db = be64_to_cpu(out->hca_cap.event_db); - } + xsc_set_pf_db_addr(dev, be64_to_cpu(out->hca_cap.tx_db), + be64_to_cpu(out->hca_cap.rx_db), + be64_to_cpu(out->hca_cap.complete_db), + be64_to_cpu(out->hca_cap.complete_reg), + be64_to_cpu(out->hca_cap.event_db)); dev->fw_version_major = out->hca_cap.fw_ver.fw_version_major; dev->fw_version_minor = out->hca_cap.fw_ver.fw_version_minor; dev->fw_version_patch = be16_to_cpu(out->hca_cap.fw_ver.fw_version_patch); dev->fw_version_tweak = be32_to_cpu(out->hca_cap.fw_ver.fw_version_tweak); dev->fw_version_extra_flag = out->hca_cap.fw_ver.fw_version_extra_flag; - dev->reg_mr_via_cmdq = out->hca_cap.reg_mr_via_cmdq; + + xsc_cmd_query_read_flush(dev); out_out: kfree(out); @@ -285,28 +336,22 @@ int xsc_cmd_modify_hca(struct xsc_core_device *dev) return err; } -static int xsc_cmd_query_guid(struct xsc_core_device *dev) +int xsc_cmd_query_read_flush(struct xsc_core_device *dev) { - struct xsc_cmd_query_guid_mbox_in in; - struct xsc_cmd_query_guid_mbox_out out; - int err; + struct xsc_cmd_read_flush_hw_config_mbox_in in; + struct xsc_cmd_read_flush_hw_config_mbox_out out; + int err = 0; - in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_GUID); + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_READ_FLUSH); err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; if (out.hdr.status) - return xsc_cmd_status_to_err(&out.hdr); - dev->board_info->guid = out.guid; - dev->board_info->guid_valid = 1; - return 0; -} + xsc_cmd_status_to_err(&out.hdr); -int xsc_query_guid(struct xsc_core_device *dev) -{ - if (dev->board_info->guid_valid) - return 0; - - return xsc_cmd_query_guid(dev); + dev->read_flush = out.read_flush; + return 0; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c index ca5e889050b3c2f896452cd404522549d5d41a22..abcb724e6766273d6db92eaaa2e102e670f80381 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c @@ -8,7 +8,6 @@ #include "common/xsc_ioctl.h" #include "common/xsc_cmd.h" -#include "xsc_reg_struct.h" #include "xsc_fw.h" #include "xsc_flow.h" @@ -56,9 +55,9 @@ int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out) struct xsc_resources *xres = get_xsc_res(xdev); int ret = 0; - xsc_acquire_lock(&xres->lock, &flags); + spin_lock_irqsave(&xres->lock, flags); ret = xsc_cmd_exec_create_mkey(xdev, in, out); - xsc_release_lock(&xres->lock, flags); + spin_unlock_irqrestore(&xres->lock, flags); return ret; } @@ -67,7 +66,7 @@ static int xsc_cmd_exec_destroy_mkey(struct xsc_core_device *xdev, void *in, voi struct xsc_destroy_mkey_mbox_in *req = in; struct xsc_destroy_mkey_mbox_out *resp = out; u32 mkey = be32_to_cpu(req->mkey); - u32 mpt_idx = xsc_mkey_to_idx(mkey); + u32 mpt_idx = xsc_mkey_to_idx(xdev, mkey); dealloc_mpt_entry(xdev, &mpt_idx); @@ -82,9 +81,9 @@ int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out) struct xsc_resources *xres = get_xsc_res(xdev); int ret = 0; - xsc_acquire_lock(&xres->lock, &flags); + spin_lock_irqsave(&xres->lock, flags); ret = xsc_cmd_exec_destroy_mkey(xdev, in, out); - xsc_release_lock(&xres->lock, flags); + spin_unlock_irqrestore(&xres->lock, flags); return ret; } @@ -92,75 +91,120 @@ static int xsc_cmd_exec_reg_mr(struct xsc_core_device *dev, void *in, void *out) { struct xsc_register_mr_mbox_in *req = in; struct xsc_register_mr_mbox_out *resp = out; - struct xsc_mpt_entry mpt_ent; - u32 mpt_idx = 0; u32 mtt_base; u64 va = be64_to_cpu(req->req.va_base); - u32 mem_size = be32_to_cpu(req->req.len); - u32 pdn = be32_to_cpu(req->req.pdn); u32 key = be32_to_cpu(req->req.mkey); + u32 mpt_idx = xsc_mkey_to_idx(dev, key); int pa_num = be32_to_cpu(req->req.pa_num); - u32 *ptr; - u64 reg_addr; - int i; - int reg_stride; int iae_idx, iae_grp; if (pa_num && alloc_mtt_entry(dev, pa_num, &mtt_base)) return -EINVAL; - mpt_idx = xsc_mkey_to_idx(key); - mpt_ent.va_l = va & 0xFFFFFFFF; - mpt_ent.va_h = va >> 32; - mpt_ent.mem_size = mem_size; - mpt_ent.pdn = pdn; - mpt_ent.key = key & 0xFF; - mpt_ent.mtt_base = mtt_base; - mpt_ent.acc = req->req.acc; - mpt_ent.page_mode = req->req.page_mode; - mpt_ent.mem_map_en = req->req.map_en; - mpt_ent.rsv = 0; + xsc_core_info(dev, "mpt idx:%u,va=0x%llx, mtt_base=%d, pa_num=%d\n", + mpt_idx, va, mtt_base, pa_num); get_xsc_res(dev)->mpt_entry[mpt_idx].va = va; get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base = mtt_base; get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = pa_num; - ptr = (u32 *)&mpt_ent; - reg_stride = REG_WIDTH_TO_STRIDE(MMC_MPT_TBL_MEM_WIDTH); - reg_addr = MMC_MPT_TBL_MEM_ADDR + - mpt_idx * roundup_pow_of_two(reg_stride); - iae_grp = xsc_iae_grp_get(dev); iae_idx = xsc_iae_idx_get(dev, iae_grp); xsc_iae_lock(dev, iae_grp); + xsc_set_mpt(dev, iae_idx, mtt_base, &req->req); + xsc_set_mtt(dev, iae_idx, mtt_base, &req->req); + xsc_iae_unlock(dev, iae_grp); - IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(mpt_ent) / sizeof(u32), iae_idx); + resp->hdr.status = 0; + return 0; +} - xsc_core_info(dev, "reg mr, write mpt[%u]: va=%llx, mem_size=%u, pdn=%u\n", - mpt_idx, va, mpt_ent.mem_size, mpt_ent.pdn); - xsc_core_info(dev, "key=%u, mtt_base=%u, acc=%u, page_mode=%u, mem_map_en=%u\n", - mpt_ent.key, mpt_ent.mtt_base, mpt_ent.acc, - mpt_ent.page_mode, mpt_ent.mem_map_en); +void xsc_sync_mr_to_fw(struct xsc_core_device *dev) +{ + struct xsc_cmd_sync_mr_to_fw_mbox_in *in; + struct xsc_cmd_sync_mr_to_fw_mbox_out out; + int mpt_idx; + int max_sync_mr_num; + int mr_num = 0; + struct xsc_resources *xres = get_xsc_res(dev); - for (i = 0; i < pa_num; i++) { - u64 pa = req->req.pas[i]; + max_sync_mr_num = (dev->caps.max_cmd_in_len - sizeof(*in)) / sizeof(struct xsc_mr_info); + in = kvzalloc(dev->caps.max_cmd_in_len, GFP_KERNEL); + if (!in) + return; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SYNC_MR_TO_FW); + mpt_idx = find_next_zero_bit((unsigned long *)xres->mpt_tbl, xres->max_mpt_num, 1); + while (mpt_idx < xres->max_mpt_num) { + in->data[mr_num].mpt_idx = cpu_to_be32(mpt_idx); + in->data[mr_num].mtt_base = cpu_to_be32(xres->mpt_entry[mpt_idx].mtt_base); + in->data[mr_num].mtt_num = cpu_to_be32(xres->mpt_entry[mpt_idx].page_num); + mr_num++; + if (mr_num == max_sync_mr_num) { + in->mr_num = cpu_to_be16(mr_num); + memset(&out, 0, sizeof(out)); + xsc_cmd_exec(dev, in, dev->caps.max_cmd_in_len, &out, sizeof(out)); + mr_num = 0; + } + mpt_idx = find_next_zero_bit((unsigned long *)xres->mpt_tbl, + xres->max_mpt_num, mpt_idx + 1); + } - pa = be64_to_cpu(pa); - pa = pa >> PAGE_SHIFT_4K; - ptr = (u32 *)&pa; - reg_addr = MMC_MTT_TBL_MEM_ADDR + - (mtt_base + i) * REG_WIDTH_TO_STRIDE(MMC_MTT_TBL_MEM_WIDTH); + if (!mr_num) + goto out; - IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(pa) / sizeof(u32), iae_idx); + in->mr_num = cpu_to_be16(mr_num); + memset(&out, 0, sizeof(out)); + xsc_cmd_exec(dev, in, dev->caps.max_cmd_in_len, &out, sizeof(out)); +out: + kfree(in); +} - xsc_core_info(dev, "reg mr, write mtt: pa[%u]=%llx\n", i, pa); - } +void xsc_sync_mr_from_fw(struct xsc_core_device *dev) +{ + struct xsc_cmd_sync_mr_from_fw_mbox_in in; + struct xsc_cmd_sync_mr_from_fw_mbox_out *out; + int max_sync_mr_num; + int ret; + int i = 0; + struct xsc_resources *xres = get_xsc_res(dev); + u32 mpt_idx = 0; - xsc_iae_unlock(dev, iae_grp); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SYNC_MR_FROM_FW); + out = kvzalloc(dev->caps.max_cmd_out_len, GFP_KERNEL); + if (!out) + return; + in.start = cpu_to_be32(1); + ret = xsc_cmd_exec(dev, &in, sizeof(in), out, dev->caps.max_cmd_out_len); + if (ret || out->hdr.status) + goto out; + max_sync_mr_num = (dev->caps.max_cmd_out_len - sizeof(*out)) / sizeof(struct xsc_mr_info); + while (be16_to_cpu(out->mr_num) == max_sync_mr_num) { + for (i = 0; i < max_sync_mr_num; i++) { + mpt_idx = be32_to_cpu(out->data[i].mpt_idx); + xres->mpt_entry[mpt_idx].mtt_base = be32_to_cpu(out->data[i].mtt_base); + xres->mpt_entry[mpt_idx].page_num = be32_to_cpu(out->data[i].mtt_num); + clear_bit(mpt_idx, (unsigned long *)xres->mpt_tbl); + save_mtt_to_free_list(dev, xres->mpt_entry[mpt_idx].mtt_base, + xres->mpt_entry[mpt_idx].page_num); + } + in.start = cpu_to_be32(mpt_idx + 1); + ret = xsc_cmd_exec(dev, &in, sizeof(in), out, dev->caps.max_cmd_out_len); + if (ret || out->hdr.status) + goto out; + } + for (i = 0; i < be16_to_cpu(out->mr_num); i++) { + mpt_idx = be32_to_cpu(out->data[i].mpt_idx); + xres->mpt_entry[mpt_idx].mtt_base = be32_to_cpu(out->data[i].mtt_base); + xres->mpt_entry[mpt_idx].page_num = be32_to_cpu(out->data[i].mtt_num); + clear_bit(mpt_idx, (unsigned long *)xres->mpt_tbl); + save_mtt_to_free_list(dev, xres->mpt_entry[mpt_idx].mtt_base, + xres->mpt_entry[mpt_idx].page_num); + } - resp->hdr.status = 0; - return 0; +out: + kfree(out); } int xsc_reg_mr(struct xsc_core_device *xdev, void *in, void *out) @@ -172,9 +216,11 @@ static int xsc_cmd_exec_dereg_mr(struct xsc_core_device *dev, void *in, void *ou { struct xsc_unregister_mr_mbox_in *req; struct xsc_unregister_mr_mbox_out *resp; + u64 va; u32 mpt_idx; - u32 mtt_base; + u32 mtt_base = 0; int pages_num; + int iae_idx, iae_grp; req = in; resp = out; @@ -183,6 +229,15 @@ static int xsc_cmd_exec_dereg_mr(struct xsc_core_device *dev, void *in, void *ou mpt_idx = be32_to_cpu(req->mkey); xsc_core_info(dev, "mpt idx:%u\n", mpt_idx); + /*clear mpt entry*/ + iae_grp = xsc_iae_grp_get(dev); + iae_idx = xsc_iae_idx_get(dev, iae_grp); + + xsc_iae_lock(dev, iae_grp); + xsc_clear_mpt(dev, iae_idx, mtt_base, req); + xsc_iae_unlock(dev, iae_grp); + + va = get_xsc_res(dev)->mpt_entry[mpt_idx].va; pages_num = get_xsc_res(dev)->mpt_entry[mpt_idx].page_num; mtt_base = get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base; if (pages_num > 0) { @@ -193,6 +248,10 @@ static int xsc_cmd_exec_dereg_mr(struct xsc_core_device *dev, void *in, void *ou } resp->hdr.status = 0; + + xsc_core_info(dev, "dereg mr, clear mpt[%u]: va=%llx\n", + mpt_idx, va); + return 0; } @@ -258,7 +317,7 @@ int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_siz opcode = be16_to_cpu(hdr->opcode); xsc_core_dbg(dev, "opcode: %x\n", opcode); - xsc_acquire_lock(&dev->reg_access_lock, &flags); + spin_lock_irqsave(&dev->reg_access_lock, flags); switch (opcode) { case XSC_CMD_OP_IOCTL_FLOW: ret = xsc_cmd_exec_ioctl_flow(dev, in, out); @@ -269,9 +328,9 @@ int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_siz } /* ensure pci sequence */ - xsc_mmiowb(); + mmiowb(); - xsc_release_lock(&dev->reg_access_lock, flags); + spin_unlock_irqrestore(&dev->reg_access_lock, flags); return ret; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c deleted file mode 100644 index 9c63cdae414be98db4d1154c1e5fd7fad9323c71..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#include -#include "common/xsc_core.h" - -void xsc_lock_init(struct xsc_lock *lock) -{ - spin_lock_init(&lock->lock); -} - -void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *oflags) -{ - unsigned long flags; - - spin_lock_irqsave(&lock->lock, flags); - *oflags = flags; -} - -void xsc_release_lock(struct xsc_lock *lock, unsigned long flags) -{ - spin_unlock_irqrestore(&lock->lock, flags); -} - -void xsc_mmiowb(void) -{ - mmiowb(); -} - -void xsc_wmb(void) -{ - /* mem barrier for xsc operation */ - wmb(); -} - -void xsc_msleep(int timeout) -{ - msleep(timeout); -} - -void xsc_udelay(int timeout) -{ - udelay(timeout); -} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h deleted file mode 100644 index 5d5d2f19a4a9e196a45e9fd637f02269e2f64996..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#ifndef OSDEP_H -#define OSDEP_H - -#include "common/xsc_core.h" - -#define xsc_print printk - -void xsc_msleep(int timeout); - -void xsc_udelay(int timeout); - -void xsc_lock_init(struct xsc_lock *lock); - -void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *flags); - -void xsc_release_lock(struct xsc_lock *lock, unsigned long flags); - -void xsc_mmiowb(void); - -void xsc_wmb(void); - -void *xsc_malloc(unsigned int size); - -void xsc_free(void *addr); - -#endif /* OSDEP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c index 0623b0f7d4ecc8fd523bde50cb80145179eb7ab4..8884c5721556781395f7885e2b8dee2675f9647e 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c @@ -11,59 +11,16 @@ static DECLARE_COMPLETION(dma_read_done); -static inline int xsc_dma_wr_isbusy(struct xsc_core_device *xdev) -{ - u32 busy = 0; - - do { - busy = REG_RD32(xdev, HIF_TBL_TBL_DL_BUSY_REG_ADDR); - } while (busy != 0x0); - - return busy; -} - -static inline int xsc_dma_rd_isbusy(struct xsc_core_device *xdev) -{ - u32 busy = 0; - - do { - busy = REG_RD32(xdev, CLSF_DMA_DMA_UL_BUSY_REG_ADDR); - } while (busy != 0x0); - - return busy; -} - -static inline int xsc_dma_done(struct xsc_core_device *xdev) -{ - u32 done = 0; - - do { - done = REG_RD32(xdev, CLSF_DMA_DMA_DL_DONE_REG_ADDR); - } while ((done & 0x1) != 0x1); - - return done; -} - -static inline void xsc_dma_wr_success_get(struct xsc_core_device *xdev, u32 *success, u32 size) -{ - u32 *ptr = NULL; - - ptr = success; - IA_READ(xdev, CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR, ptr, (size / sizeof(u32))); -} - -int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, - const struct tdi_dma_write_key_bits *key, - const struct tdi_dma_write_action_bits *action) +static int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, + const struct tdi_dma_write_key_bits *key, + const struct tdi_dma_write_action_bits *action) { u32 i = 0; - u32 busy = 0; u32 dma_wr_num = 0; - u32 value = 0; - u32 done = 0; - u64 success[2]; u32 data_len = 0; u64 dma_wr_addr = 0; + u64 success[2]; + int ret; if (!xdev || !key || !action) return -1; @@ -74,38 +31,17 @@ int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, dma_wr_num = ((action->entry_num + (XSC_DMA_WR_MAX - 1)) / XSC_DMA_WR_MAX); for (i = 0; i < dma_wr_num; i++) { + dma_wr_addr = (action->data_addr + ((i * XSC_DMA_WR_MAX) * XSC_DMA_LEN)); if ((action->entry_num % XSC_DMA_WR_MAX) && (i == (dma_wr_num - 1))) data_len = ((action->entry_num % XSC_DMA_WR_MAX) * XSC_DMA_LEN); else data_len = (XSC_DMA_WR_MAX * XSC_DMA_LEN); - - busy = xsc_dma_wr_isbusy(xdev); - if (busy) - return -1; - - REG_WR32(xdev, CLSF_DMA_ERR_CODE_CLR_REG_ADDR, 1); - - value = ((data_len << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT) | - (key->host_id << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT) | - key->func_id); - - REG_WR32(xdev, HIF_TBL_TBL_DL_REQ_REG_ADDR, value); - - dma_wr_addr = (action->data_addr + ((i * XSC_DMA_WR_MAX) * XSC_DMA_LEN)); - value = (dma_wr_addr & HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK); - REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_L_REG_ADDR, value); - - value = ((dma_wr_addr >> 32) & HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK); - REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_H_REG_ADDR, value); - - REG_WR32(xdev, HIF_TBL_TBL_DL_START_REG_ADDR, 1); - - done = xsc_dma_done(xdev); - if (done != XSC_DMA_WR_SUCCESS) { - memset(success, 0, sizeof(success)); - xsc_dma_wr_success_get(xdev, (u32 *)&success, sizeof(success)); + memset(success, 0, sizeof(success)); + ret = xsc_dma_write_tbl_once(xdev, data_len, dma_wr_addr, key->host_id, + key->func_id, success, sizeof(success)); + if (ret) { xsc_core_err(xdev, "DMA write time %d status 0x%lx%lx fail.\n", i, - (unsigned long)success[1], (unsigned long)success[0]); + (unsigned long)success[1], (unsigned long)success[0]); return -1; } } @@ -118,53 +54,24 @@ void xsc_dma_read_done_complete(void) complete(&dma_read_done); } -int xsc_flow_table_dma_read_add(struct xsc_core_device *xdev, - const struct tdi_dma_read_key_bits *key, - const struct tdi_dma_read_action_bits *action) +static int xsc_flow_table_dma_read_add(struct xsc_core_device *xdev, + const struct tdi_dma_read_key_bits *key, + const struct tdi_dma_read_action_bits *action) { - u32 busy = 0; - u32 value = 0; - if (!xdev || !key || !action) return -1; if (!action->burst_num) return -1; - busy = xsc_dma_rd_isbusy(xdev); - if (busy) - return -1; - - value = ((key->host_id << HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT) | - key->func_id); - - REG_WR32(xdev, HIF_TBL_TBL_UL_REQ_REG_ADDR, value); - - value = (action->data_addr & HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK); - REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_L_REG_ADDR, value); - - value = ((action->data_addr >> 32) & HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK); - REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_H_REG_ADDR, value); - - REG_WR32(xdev, HIF_TBL_TBL_UL_START_REG_ADDR, 1); - - value = (key->tbl_id & CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK); - REG_WR32(xdev, CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR, value); - - value = ((action->burst_num << CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT) | - key->tbl_start_addr); - REG_WR32(xdev, CLSF_DMA_DMA_RD_ADDR_REG_ADDR, value); - - REG_WR32(xdev, CLSF_DMA_INDRW_RD_START_REG_ADDR, 1); - + xsc_dma_read_tbl(xdev, key->host_id, key->func_id, action->data_addr, + key->tbl_id, action->burst_num, key->tbl_start_addr); /*wait msix interrupt */ if (!wait_for_completion_timeout(&dma_read_done, msecs_to_jiffies(5000))) { xsc_core_err(xdev, "wait for dma read done completion timeout.\n"); return -ETIMEDOUT; } - REG_WR32(xdev, HIF_TBL_MSG_RDY_REG_ADDR, 1); - return 0; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h index b1f724235309570de83d585df03bd3b118ed33b9..615a5ccfe99f608c61e7b61964bdb1d27374291a 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h @@ -6,11 +6,8 @@ #ifndef XSC_FLOW_H #define XSC_FLOW_H -#include "osdep.h" - #define XSC_DMA_LEN 64 #define XSC_DMA_WR_MAX 128 -#define XSC_DMA_WR_SUCCESS 0x3 /* key */ struct tdi_dma_write_key_bits { @@ -62,4 +59,5 @@ int xsc_flow_add(struct xsc_core_device *xdev, int table, int length, void *data); void xsc_dma_read_done_complete(void); -#endif /* XSC_FLOW_H */ + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h index cb36043b6728c929a6acb65a1a43041375f57abb..61ee88efdf13063b5a0ab6d228fda66964941121 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h @@ -6,8 +6,6 @@ #ifndef XSC_FW_H #define XSC_FW_H -#include "osdep.h" - #include "common/xsc_hsi.h" #include "common/xsc_core.h" @@ -19,7 +17,7 @@ struct xsc_free_list { struct xsc_free_list_wl { struct xsc_free_list head; - struct xsc_lock lock; + spinlock_t lock; /* lock for free list */ }; struct xsc_mpt_info { @@ -30,32 +28,19 @@ struct xsc_mpt_info { #define XSC_RES_IAE_GRP_MASK (XSC_RES_NUM_IAE_GRP - 1) struct xsc_resources { - int refcnt; atomic_t iae_grp; int iae_idx[XSC_RES_NUM_IAE_GRP]; spinlock_t iae_lock[XSC_RES_NUM_IAE_GRP]; /* iae group lock */ -#define XSC_MAX_MPT_NUM MMC_MPT_TBL_MEM_DEPTH - struct xsc_mpt_info mpt_entry[XSC_MAX_MPT_NUM]; + struct xsc_mpt_info *mpt_entry; int max_mpt_num; - u64 mpt_tbl[XSC_MAX_MPT_NUM >> 6]; -#define XSC_MAX_MTT_NUM MMC_MTT_TBL_MEM_DEPTH + u8 *mpt_tbl; int max_mtt_num; struct xsc_free_list_wl mtt_list; - struct xsc_lock lock; + spinlock_t lock; /* lock for mpt_tbl */ }; struct xsc_resources *get_xsc_res(struct xsc_core_device *dev); -int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max); - -int xsc_dealloc_res(u32 *res, u64 *res_tbl); - -int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, - u32 base_align); - -int release_to_free_list(struct xsc_free_list_wl *list, u32 release, - u32 num_released); - int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); @@ -63,4 +48,9 @@ int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base); int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base); -#endif /* XSC_FW_H */ + +void save_mtt_to_free_list(struct xsc_core_device *dev, u32 base, u32 num); +void xsc_sync_mr_to_fw(struct xsc_core_device *dev); +void xsc_sync_mr_from_fw(struct xsc_core_device *dev); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c deleted file mode 100644 index 758b5c77a263219e627c5c5cc0ed2ef7b2c5ef6b..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#include "common/xsc_core.h" - -void *xsc_malloc(unsigned int size) -{ - return kmalloc(size, GFP_ATOMIC); -} - -void xsc_free(void *addr) -{ - kfree(addr); -} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c index 8bd6916e21035a009f0fd39a47bf49816588e090..437d2affc7c4e3070ef8989088cbe4b0edec2f84 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c @@ -11,10 +11,10 @@ static int xsc_alloc_free_list_res(struct xsc_free_list_wl *list, int max_num) { struct xsc_free_list *free_node; - xsc_lock_init(&list->lock); + spin_lock_init(&list->lock); INIT_LIST_HEAD(&list->head.list); - free_node = xsc_malloc(sizeof(struct xsc_free_list)); + free_node = kmalloc(sizeof(*free_node), GFP_ATOMIC); if (!free_node) return -ENOMEM; @@ -32,7 +32,7 @@ static void xsc_destroy_free_list_res(struct xsc_free_list_wl *list) list_for_each_entry_safe(pos, next, &list->head.list, list) { list_del(&pos->list); - xsc_free(pos); + kfree(pos); } } @@ -44,6 +44,7 @@ static int xsc_res_iae_init(struct xsc_core_device *dev) struct xsc_alloc_ia_lock_mbox_in in; struct xsc_alloc_ia_lock_mbox_out out; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_IA_LOCK); in.lock_num = XSC_RES_NUM_IAE_GRP; @@ -73,6 +74,7 @@ static void xsc_res_iae_release(struct xsc_core_device *dev) struct xsc_release_ia_lock_mbox_in in; struct xsc_release_ia_lock_mbox_out out; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_RELEASE_IA_LOCK); for (i = 0; i < XSC_RES_NUM_IAE_GRP; i++) in.lock_idx[i] = res->iae_idx[i]; @@ -80,6 +82,8 @@ static void xsc_res_iae_release(struct xsc_core_device *dev) ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (ret) xsc_core_err(dev, "failed to release ia lock, ret = %d\n", ret); + + return; } int xsc_create_res(struct xsc_core_device *dev) @@ -88,32 +92,28 @@ int xsc_create_res(struct xsc_core_device *dev) u32 board_id = dev->board_info->board_id; struct xsc_resources *xres = get_xsc_res(dev); - if (xres) { - xres->refcnt++; - if (xres->refcnt > 1) - return 0; - } else { - g_xres[board_id] = vmalloc(sizeof(*g_xres[board_id])); - if (!g_xres[board_id]) - return -ENOMEM; - xres = g_xres[board_id]; - xres->refcnt = 1; - } - - xsc_lock_init(&xres->lock); - xres->max_mpt_num = XSC_MAX_MPT_NUM; - memset(xres->mpt_tbl, 0xFF, XSC_MAX_MPT_NUM >> 3); + g_xres[board_id] = vmalloc(sizeof(*g_xres[board_id])); + if (!g_xres[board_id]) + return -ENOMEM; + xres = g_xres[board_id]; + + spin_lock_init(&xres->lock); + xres->max_mpt_num = xsc_get_max_mpt_num(dev); + xres->mpt_tbl = kmalloc(xres->max_mpt_num >> 3, GFP_KERNEL); + if (!xres->mpt_tbl) + goto err_mpt_tbl; + memset(xres->mpt_tbl, 0xFF, xres->max_mpt_num >> 3); /* reserved for local dma lkey */ clear_bit(0, (unsigned long *)xres->mpt_tbl); + xres->mpt_entry = vmalloc(xres->max_mpt_num * sizeof(struct xsc_mpt_info)); + if (!xres->mpt_entry) + goto err_mpt_entry; ret = xsc_res_iae_init(dev); - if (ret) { - vfree(g_xres[board_id]); - g_xres[board_id] = NULL; - return -EINVAL; - } + if (ret) + goto err_iae_init; - xres->max_mtt_num = XSC_MAX_MTT_NUM; + xres->max_mtt_num = xsc_get_max_mtt_num(dev); ret = xsc_alloc_free_list_res(&xres->mtt_list, xres->max_mtt_num); if (ret) goto err_mtt; @@ -122,6 +122,11 @@ int xsc_create_res(struct xsc_core_device *dev) err_mtt: xsc_res_iae_release(dev); +err_iae_init: + vfree(xres->mpt_entry); +err_mpt_entry: + kfree(xres->mpt_tbl); +err_mpt_tbl: vfree(g_xres[board_id]); g_xres[board_id] = NULL; return ret; @@ -131,16 +136,12 @@ void xsc_destroy_res(struct xsc_core_device *dev) { struct xsc_resources *xres = get_xsc_res(dev); - if (xres) { - xres->refcnt--; - if (xres->refcnt) - return; - - xsc_destroy_free_list_res(&xres->mtt_list); - xsc_res_iae_release(dev); - vfree(g_xres[dev->board_info->board_id]); - g_xres[dev->board_info->board_id] = NULL; - } + xsc_destroy_free_list_res(&xres->mtt_list); + xsc_res_iae_release(dev); + vfree(xres->mpt_entry); + kfree(xres->mpt_tbl); + vfree(g_xres[dev->board_info->board_id]); + g_xres[dev->board_info->board_id] = NULL; } struct xsc_resources *get_xsc_res(struct xsc_core_device *dev) @@ -148,7 +149,7 @@ struct xsc_resources *get_xsc_res(struct xsc_core_device *dev) return g_xres[dev->board_info->board_id]; } -int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max) +static int xsc_alloc_res(u32 *res, u8 *res_tbl, u32 max) { u32 bit_num; @@ -160,7 +161,7 @@ int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max) return 0; } -int xsc_dealloc_res(u32 *res, u64 *res_tbl) +static int xsc_dealloc_res(u32 *res, u8 *res_tbl) { if (test_and_set_bit(*res, (unsigned long *)res_tbl)) return -EINVAL; @@ -169,27 +170,23 @@ int xsc_dealloc_res(u32 *res, u64 *res_tbl) return 0; } -int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, - u32 base_align) +static int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, + u32 base_align) { struct xsc_free_list *free_node; struct xsc_free_list *next; struct xsc_free_list *new_node; - unsigned long flags; *alloc = -1; - xsc_acquire_lock(&list->lock, &flags); list_for_each_entry_safe(free_node, next, &list->head.list, list) { int start = round_up(free_node->start, base_align); int avail_num = free_node->end - start + 1; if (required < avail_num) { if (start > free_node->start) { - new_node = xsc_malloc(sizeof(struct xsc_free_list)); - if (!new_node) { - xsc_release_lock(&list->lock, flags); + new_node = kmalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) return -ENOMEM; - } new_node->start = free_node->start; new_node->end = start - 1; __list_add(&new_node->list, free_node->list.prev, @@ -204,12 +201,11 @@ int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc free_node->end = start - 1; } else { list_del(&free_node->list); - xsc_free(free_node); + kfree(free_node); } break; } } - xsc_release_lock(&list->lock, flags); if (*alloc == -1) return -EINVAL; @@ -217,69 +213,121 @@ int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc return 0; } -int release_to_free_list(struct xsc_free_list_wl *list, uint32_t release, - uint32_t num_released) +void save_mtt_to_free_list(struct xsc_core_device *dev, u32 base, u32 num) { - struct xsc_free_list *free_node = NULL; - struct xsc_free_list *next, *prev; - struct xsc_free_list *new_node; + struct xsc_resources *xres = get_xsc_res(dev); + struct list_head *h = &xres->mtt_list.head.list; + struct xsc_free_list *pos, *new; unsigned long flags; - bool new_flag = false; - bool end_merge = false; - int ret = 0; - xsc_acquire_lock(&list->lock, &flags); - list_for_each_entry_safe(free_node, next, &list->head.list, list) { - if (release + num_released < free_node->start) { - new_flag = true; - } else if (release + num_released == free_node->start) { - /* backward merge */ - end_merge = true; - free_node->start = release; - } + spin_lock_irqsave(&xres->mtt_list.lock, flags); + list_for_each_entry(pos, h, list) { + if (base >= pos->start && base + num - 1 <= pos->end) + break; + } - if (new_flag || end_merge) { - /* forward merge, and backward merge if possible */ - if (free_node->list.prev == &list->head.list) - goto create_node; - - prev = list_entry(free_node->list.prev, struct xsc_free_list, list); - if (release == prev->end + 1) { - if (end_merge) { - prev->end = free_node->end; - list_del(&free_node->list); - xsc_free(free_node); - free_node = NULL; - } else { - prev->end = release + num_released - 1; - new_flag = false; - } + if (base == pos->start) { + if (base + num - 1 == pos->end) { + list_del(&pos->list); + kfree(pos); + } else { + pos->start = base + num; + } + } else if (base > pos->start) { + if (base + num - 1 < pos->end) { + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (new) { + new->start = base + num; + new->end = pos->end; + __list_add(&new->list, &pos->list, pos->list.next); } - - break; } + pos->end = base - 1; } + spin_unlock_irqrestore(&xres->mtt_list.lock, flags); +} - if (list_empty(&list->head.list)) { - new_flag = true; - free_node = &list->head; +static int release_to_free_list(struct xsc_free_list_wl *list, uint32_t release_base, + uint32_t num_released) +{ + struct list_head *head = &list->head.list; + struct xsc_free_list *pos; + struct xsc_free_list *n; + struct xsc_free_list *prev; + struct xsc_free_list *new; + struct list_head *prev_node, *next_node; + + /* find the position to insert, don't Do merge here */ + list_for_each_entry_safe(pos, n, head, list) { + if (release_base < pos->start) + break; } -create_node: - if (new_flag && free_node) { - new_node = xsc_malloc(sizeof(struct xsc_free_list)); - if (!new_node) { - ret = -ENOMEM; - goto ret; + /* merge */ + if (&pos->list == head) { + /* list is empty or release_base is great than last node */ + if (!list_empty(head)) { + prev = list_entry(head->prev, struct xsc_free_list, list); + /* merge to last node */ + if (prev->end + 1 == release_base) { + prev->end = release_base + num_released - 1; + return 0; + } + prev_node = head->prev; + next_node = head; + } else { + prev_node = head; + next_node = head; + } + + goto create_new_node; + } else { + /* release_base is little than first node of free list */ + if (pos->list.prev == head) { + /* merge to first node */ + if (release_base + num_released == pos->start) { + pos->start = release_base; + return 0; + } + + prev_node = head; + next_node = &pos->list; + goto create_new_node; + } else { /* release pos in the middle of free list */ + prev = list_prev_entry(pos, list); + + if (prev->end + 1 == release_base && + release_base + num_released == pos->start) { + prev->end = pos->end; + list_del(&pos->list); + kfree(pos); + + return 0; + } + if (prev->end + 1 == release_base) { + prev->end = release_base + num_released - 1; + return 0; + } + + if (release_base + num_released == pos->start) { + pos->start = release_base; + return 0; + } + + prev_node = &prev->list; + next_node = &pos->list; + goto create_new_node; } - new_node->start = release; - new_node->end = release + num_released - 1; - __list_add(&new_node->list, free_node->list.prev, - &free_node->list); } -ret: - xsc_release_lock(&list->lock, flags); - return ret; + +create_new_node: + new = kmalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + return -ENOMEM; + new->start = release_base; + new->end = release_base + num_released - 1; + __list_add(&new->list, prev_node, next_node); + return 0; } int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) @@ -305,7 +353,12 @@ int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base) { struct xsc_resources *xres = get_xsc_res(dev); - int ret = alloc_from_free_list(&xres->mtt_list, pages_num, mtt_base, 1); + int ret; + unsigned long flags; + + spin_lock_irqsave(&xres->mtt_list.lock, flags); + ret = alloc_from_free_list(&xres->mtt_list, pages_num, mtt_base, 1); + spin_unlock_irqrestore(&xres->mtt_list.lock, flags); xsc_core_dbg(dev, "alloc mtt for %d pages start from %d\n", pages_num, *mtt_base); @@ -316,7 +369,12 @@ int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base) int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base) { struct xsc_resources *xres = get_xsc_res(dev); - int ret = release_to_free_list(&xres->mtt_list, mtt_base, pages_num); + int ret; + unsigned long flags; + + spin_lock_irqsave(&xres->mtt_list.lock, flags); + ret = release_to_free_list(&xres->mtt_list, mtt_base, pages_num); + spin_unlock_irqrestore(&xres->mtt_list.lock, flags); xsc_core_dbg(dev, "mtt release %d pages start from %d\n", pages_num, mtt_base); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hal/andes_impl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/andes_impl.c new file mode 100644 index 0000000000000000000000000000000000000000..ce91c156b4b6c09d548b0c3c36f4a5da665fb1af --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/andes_impl.c @@ -0,0 +1,775 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 - 2024, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "andes_reg.h" +#include "common/xsc_reg.h" +#include "common/xsc_cmd.h" +#include "common/xsc_hsi.h" +#include "xsc_hal.h" + +#define REG_ADDR(bp, offset) ((bp) + (offset)) + +#define HIF_CPM_IDA_DATA_MEM_STRIDE 0x40 + +#define CPM_IAE_CMD_READ 0 +#define CPM_IAE_CMD_WRITE 1 + +#define CPM_IAE_ADDR_REG_STRIDE HIF_CPM_IDA_ADDR_REG_STRIDE + +#define CPM_IAE_DATA_MEM_STRIDE HIF_CPM_IDA_DATA_MEM_STRIDE + +#define CPM_IAE_DATA_MEM_MAX_LEN 16 + +static inline void acquire_ia32_lock(void *hal, void __iomem *bar, int *iae_idx) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + u32 lock_val; + u32 lock_vld; + + lock_val = readl(REG_ADDR(bar, _hal->regs->cpm_get_lock)); + lock_vld = lock_val >> HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT; + if (lock_vld) + *iae_idx = lock_val & HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK; + else + *iae_idx = -1; +} + +static inline void release_ia32_lock(void *hal, void __iomem *bar, int lock_idx) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + writel(lock_idx, REG_ADDR(bar, _hal->regs->cpm_put_lock)); +} + +static inline void ia32_write_data(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u32 *data, int nr, int idx) +{ + int i; + int offset = hal->regs->cpm_data_mem + idx * CPM_IAE_DATA_MEM_STRIDE; + + for (i = 0; i < nr; i++) { + writel(*(data++), REG_ADDR(bar, offset)); + offset += sizeof(*data); + } +} + +static inline void ia32_read_data(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u32 *data, int nr, int idx) +{ + int i; + int offset = hal->regs->cpm_data_mem + idx * CPM_IAE_DATA_MEM_STRIDE; + u32 *ptr = data; + + for (i = 0; i < nr; i++) { + *ptr = readl(REG_ADDR(bar, offset)); + offset += sizeof(*data); + ptr = ptr + 1; + } +} + +static inline void ia32_write_reg_addr(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u32 addr, int idx) +{ + int offset = hal->regs->cpm_addr + idx * CPM_IAE_ADDR_REG_STRIDE; + u32 reg_addr_val = addr; + + writel(reg_addr_val, REG_ADDR(bar, offset)); +} + +static inline void initiate_ia32_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length, int r0w1) +{ + struct ia_cmd { + union { + struct { + u32 iae_idx:HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH; + u32 iae_len:HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH; + u32 iae_r0w1:HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH; + }; + u32 raw; + }; + } cmd; + + int addr = hal->regs->cpm_cmd; + + cmd.iae_r0w1 = r0w1; + cmd.iae_len = length - 1; + cmd.iae_idx = iae_idx; + writel(cmd.raw, REG_ADDR(bar, addr)); +} + +static inline void initiate_ia32_write_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length) +{ + initiate_ia32_cmd(hal, bar, iae_idx, length, CPM_IAE_CMD_WRITE); +} + +static inline void initiate_ia32_read_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length) +{ + initiate_ia32_cmd(hal, bar, iae_idx, length, CPM_IAE_CMD_READ); +} + +static inline void wait_for_ia32_complete(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx) +{ + while ((readl(REG_ADDR(bar, hal->regs->cpm_busy)) & (1 << iae_idx))) + ; +} + +static void xsc_read32(void *bar, u32 off, void *data, int len) +{ + u32 val = readl(REG_ADDR(bar, off)); + + memcpy(data, &val, len); +} + +static void xsc_write32(void *bar, u32 off, void *data) +{ + writel(*(u32 *)data, REG_ADDR(bar, off)); +} + +static void xsc_ia32_write_reg_mr(void *hal, void __iomem *bar, u32 addr, + void *data, int nr, int idx) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + + ia32_write_data(_hal, bar, data, nr, idx); + ia32_write_reg_addr(_hal, bar, addr, idx); + initiate_ia32_write_cmd(_hal, bar, idx, nr); +} + +static void xsc_ia32_read(void *hal, void __iomem *bar, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + int idx; + + do { + acquire_ia32_lock(_hal, bar, &idx); + } while (idx == -1); + ia32_write_reg_addr(_hal, bar, addr, idx); + initiate_ia32_read_cmd(_hal, bar, idx, nr); + wait_for_ia32_complete(_hal, bar, idx); + ia32_read_data(_hal, bar, data, nr, idx); + release_ia32_lock(_hal, bar, idx); +} + +static void xsc_ia32_write(void *hal, void __iomem *bar, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + int idx; + + do { + acquire_ia32_lock(_hal, bar, &idx); + } while (idx == -1); + ia32_write_data(_hal, bar, data, nr, idx); + ia32_write_reg_addr(_hal, bar, addr, idx); + initiate_ia32_write_cmd(_hal, bar, idx, nr); + release_ia32_lock(_hal, bar, idx); +} + +static void andes_ring_tx_doorbell(void *hal, void __iomem *bar, u32 sqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + union xsc_send_doorbell { + struct{ + u32 next_pid : 16; + u32 qp_id : 15; + }; + u32 raw; + } db; + + db.next_pid = next_pid; + db.qp_id = sqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + xsc_write32(bar, _hal->regs->tx_db, &db.raw); +} + +static void andes_ring_rx_doorbell(void *hal, void __iomem *bar, u32 rqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + union xsc_recv_doorbell { + struct{ + u32 next_pid : 13; + u32 qp_id : 15; + }; + u32 raw; + } db; + + db.next_pid = next_pid; + db.qp_id = rqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + xsc_write32(bar, _hal->regs->rx_db, &db.raw); +} + +static void andes_update_cq_db(void *hal, void __iomem *bar, u32 cqn, u32 next_cid, u8 solicited) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union andes_cq_doorbell { + struct{ + u32 cq_next_cid:16; + u32 cq_id:15; + u32 arm:1; + }; + u32 val; + } db; + + db.cq_next_cid = next_cid; + db.cq_id = cqn; + db.arm = solicited; + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + xsc_write32(bar, _hal->regs->complete_db, &db.val); +} + +static void andes_set_cq_ci(void *hal, void __iomem *bar, u32 cqn, u32 next_cid) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union andes_cq_doorbell { + struct{ + u32 cq_next_cid:16; + u32 cq_id:15; + u32 arm:1; + }; + u32 val; + } db; + + db.val = 0; + db.cq_next_cid = next_cid; + db.cq_id = cqn; + /* make sure val write to memory done */ + wmb(); + xsc_write32(bar, _hal->regs->complete_reg, &db.val); +} + +static void andes_set_eq_ci(void *hal, void __iomem *bar, u32 eqn, u32 next_cid, u8 arm) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union andes_eq_doorbell { + struct{ + u32 eq_next_cid : 11; + u32 eq_id : 11; + u32 arm : 1; + }; + u32 val; + } db; + + db.eq_next_cid = next_cid; + db.eq_id = eqn; + db.arm = !!arm; + + /* make sure val write to memory done */ + wmb(); + xsc_write32(bar, _hal->regs->event_db, &db.val); +} + +static u8 andes_get_mr_page_mode(u8 page_shift) +{ +enum { + XSC_PAGE_SHIFT_4K = 12, + XSC_PAGE_SHIFT_64K = 16, + XSC_PAGE_SHIFT_2M = 21, + XSC_PAGE_SHIFT_1G = 30, +}; + +enum { + XSC_PAGE_MODE_4K = 0, + XSC_PAGE_MODE_64K = 1, + XSC_PAGE_MODE_2M = 2, + XSC_PAGE_MODE_1G = 3, +}; + + return (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : + (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : + (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); +} + +static inline u32 andes_mkey_to_idx(u32 mkey) +{ + return mkey >> 17; +} + +static inline u32 andes_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << 17; +} + +static void andes_set_mpt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_register_mr_request *req = mr_request; + union xsc_mpt_entry { + struct { + u32 va_l; + u32 va_h; + u32 mem_size; + u32 pdn:24; + u32 key:8; + u32 mtt_base:18; + u32 acc:4; + u32 page_mode:2; + u32 mem_map_en:1; + }; + u32 raw[5]; + } mpt; + u64 va = be64_to_cpu(req->va_base); + u32 mkey = be32_to_cpu(req->mkey); + u32 idx = andes_mkey_to_idx(mkey); + u32 reg_addr = _hal->regs->mpt_tbl_addr + idx * (_hal->regs->mpt_tbl_width >> 3); + + mpt.va_l = va & 0xffffffff; + mpt.va_h = va >> 32; + mpt.mem_size = be64_to_cpu(req->len); + mpt.pdn = be32_to_cpu(req->pdn); + mpt.key = mkey & 0xff; + mpt.mtt_base = mtt_base; + mpt.acc = req->acc; + mpt.page_mode = req->page_mode; + mpt.mem_map_en = req->map_en; + + xsc_ia32_write_reg_mr(_hal, bar, reg_addr, mpt.raw, ARRAY_SIZE(mpt.raw), iae_idx); +} + +static void andes_clear_mpt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_unregister_mr_mbox_in *req = mr_request; + union xsc_mpt_entry { + struct { + u32 va_l; + u32 va_h; + u32 mem_size; + u32 pdn:24; + u32 key:8; + u32 mtt_base:18; + u32 acc:4; + u32 page_mode:2; + u32 mem_map_en:1; + }; + u32 raw[5]; + } mpt; + + u32 idx = be32_to_cpu(req->mkey); + u32 reg_addr = _hal->regs->mpt_tbl_addr + idx * (_hal->regs->mpt_tbl_width >> 3); + + memset(&mpt, 0x00, sizeof(mpt)); + + xsc_ia32_write_reg_mr(_hal, bar, reg_addr, mpt.raw, ARRAY_SIZE(mpt.raw), iae_idx); +} + +#define PAGE_SHIFT_4K 12 +static void andes_set_mtt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_register_mr_request *req = mr_request; + int i; + u32 pa_num = be32_to_cpu(req->pa_num); + u64 pa; + u32 reg_addr; + + for (i = 0; i < pa_num; i++) { + pa = req->pas[i]; + pa = be64_to_cpu(pa); + pa = pa >> PAGE_SHIFT_4K; + reg_addr = _hal->regs->mtt_inst_base_addr + (mtt_base + i) * sizeof(u64); + xsc_ia32_write_reg_mr(_hal, bar, reg_addr, + (u32 *)&pa, sizeof(pa) / sizeof(u32), iae_idx); + } +} + +static void andes_set_read_done_msix_vector(void *hal, void __iomem *bar, u32 vector) +{ + struct xsc_hw_abstract_layer *_hal = hal; + u32 val = (1 << 12) | (vector & 0xfff); + + writel(val, REG_ADDR(bar, _hal->regs->tbl2irq_rd_done_msix_reg)); +} + +#define XSC_DMA_WR_SUCCESS 0x3 +static int andes_dma_write_tbl_once(void *hal, void __iomem *bar, u32 data_len, u64 dma_wr_addr, + u32 host_id, u32 func_id, u64 success[2], u32 size) +{ + struct xsc_hw_abstract_layer *_hal = hal; + u32 busy = 0; + u32 value = 0; + u32 done = 0; + u32 reg_addr; + + do { + busy = readl(REG_ADDR(bar, _hal->regs->tbl_dl_busy_reg)); + } while (busy != 0x0); + + writel(1, REG_ADDR(bar, _hal->regs->err_code_clr_reg)); + + value = ((data_len << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT) | + (host_id << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT) | func_id); + + writel(value, REG_ADDR(bar, _hal->regs->tbl_dl_req_reg)); + + value = (dma_wr_addr & HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK); + writel(value, REG_ADDR(bar, _hal->regs->tbl_dl_addr_l_reg)); + + value = ((dma_wr_addr >> 32) & HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK); + writel(value, REG_ADDR(bar, _hal->regs->tbl_dl_addr_h_reg)); + + writel(1, REG_ADDR(bar, _hal->regs->tbl_dl_start_reg)); + + do { + done = readl(REG_ADDR(bar, _hal->regs->dma_dl_done_reg)); + } while ((done & 0x1) != 0x1); + if (done != XSC_DMA_WR_SUCCESS) { + reg_addr = _hal->regs->dma_dl_success_reg; + xsc_ia32_read(_hal, bar, reg_addr, success, (size / sizeof(u32))); + return -1; + } + + return 0; +} + +static void andes_dma_read_tbl(void *hal, void __iomem *bar, + u32 host_id, u32 func_id, u64 data_addr, + u32 tbl_id, u32 burst_num, u32 tbl_start_addr) +{ + struct xsc_hw_abstract_layer *_hal = hal; + u32 busy; + u32 value; + + writel(1, REG_ADDR(bar, _hal->regs->tbl_msg_rdy_reg)); + + do { + busy = readl(REG_ADDR(bar, _hal->regs->dma_ul_busy_reg)); + } while (busy != 0x0); + + value = ((host_id << HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT) | func_id); + writel(value, REG_ADDR(bar, _hal->regs->tbl_ul_req_reg)); + + value = data_addr & HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK; + writel(value, REG_ADDR(bar, _hal->regs->tbl_ul_addr_l_reg)); + + value = (data_addr >> 32) & HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK; + writel(value, REG_ADDR(bar, _hal->regs->tbl_ul_addr_h_reg)); + + writel(1, REG_ADDR(bar, _hal->regs->tbl_ul_start_reg)); + + value = tbl_id & CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK; + writel(value, REG_ADDR(bar, _hal->regs->dma_rd_table_id_reg)); + + value = (burst_num << CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT) | tbl_start_addr; + writel(value, REG_ADDR(bar, _hal->regs->dma_rd_addr_reg)); + + writel(1, REG_ADDR(bar, _hal->regs->indrw_rd_start_reg)); +} + +static const u32 xsc_msg_opcode[][2][2] = { + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND, + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND_IMMDT, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_READ, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_MAD][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_REQ_SEND, + [XSC_MSG_OPCODE_MAD][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +struct andes_cqe { + union { + u8 msg_opcode; + struct { + u8 error_code:7; + u8 is_error:1; + }; + }; + __le32 qp_id:15; + u8 rsv1:1; + u8 se:1; + u8 has_pph:1; + u8 type:1; + u8 with_immdt:1; + u8 csum_err:4; + __le32 imm_data; + __le32 msg_len; + __le32 vni; + __le64 ts:48; + __le16 wqe_id; + __le16 rsv[3]; + __le16 rsv2:15; + u8 owner:1; +}; + +static bool andes_is_err_cqe(void *cqe) +{ + struct andes_cqe *_cqe = cqe; + + return _cqe->is_error; +} + +static u8 andes_get_cqe_error_code(void *cqe) +{ + struct andes_cqe *_cqe = cqe; + + return _cqe->error_code; +} + +static u8 andes_get_cqe_opcode(void *cqe) +{ + struct andes_cqe *_cqe = cqe; + u8 msg_opcode = _cqe->msg_opcode; + + if (_cqe->is_error) + return _cqe->type ? XSC_OPCODE_RDMA_RSP_ERROR : XSC_OPCODE_RDMA_REQ_ERROR; + if (msg_opcode > XSC_MSG_OPCODE_MAD) + return XSC_OPCODE_RDMA_CQE_ERROR; + return xsc_msg_opcode[msg_opcode][_cqe->type][_cqe->with_immdt]; +} + +static u32 andes_get_max_mtt_num(void *hal) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + return _hal->regs->mtt_inst_depth; +} + +static u32 andes_get_max_mpt_num(void *hal) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + return _hal->regs->mpt_tbl_depth; +} + +struct andes_data_seg { + u32 in_line:1; + u32 length:31; + u32 key; + u64 addr; +}; + +static void andes_set_data_seg(void *data_seg, u32 length, u32 key, u64 addr) +{ + struct andes_data_seg *seg = data_seg; + + seg->length = length; + seg->key = key; + seg->addr = addr; +} + +static bool andes_skb_need_linearize(int ds_num) +{ + return false; +} + +static struct xsc_hw_ops andes_arch_ops = { + .read = xsc_read32, + .write = xsc_write32, + .ia_read = xsc_ia32_read, + .ia_write = xsc_ia32_write, + .ring_tx_doorbell = andes_ring_tx_doorbell, + .ring_rx_doorbell = andes_ring_rx_doorbell, + .update_cq_db = andes_update_cq_db, + .set_cq_ci = andes_set_cq_ci, + .set_eq_ci = andes_set_eq_ci, + .get_mr_page_mode = andes_get_mr_page_mode, + .mkey_to_idx = andes_mkey_to_idx, + .idx_to_mkey = andes_idx_to_mkey, + .set_mpt = andes_set_mpt_tbl, + .clear_mpt = andes_clear_mpt_tbl, + .set_mtt = andes_set_mtt_tbl, + .set_read_done_msix_vector = andes_set_read_done_msix_vector, + .dma_write_tbl_once = andes_dma_write_tbl_once, + .dma_read_tbl = andes_dma_read_tbl, + .is_err_cqe = andes_is_err_cqe, + .get_cqe_error_code = andes_get_cqe_error_code, + .get_cqe_opcode = andes_get_cqe_opcode, + .get_max_mtt_num = andes_get_max_mtt_num, + .get_max_mpt_num = andes_get_max_mpt_num, + .set_data_seg = andes_set_data_seg, + .skb_need_linearize = andes_skb_need_linearize, +}; + +static struct xsc_hw_reg andes_pf_regs = { + .cpm_get_lock = HIF_CPM_LOCK_GET_REG_ADDR - 0xa0000000, + .cpm_put_lock = HIF_CPM_LOCK_PUT_REG_ADDR - 0xa0000000, + .cpm_lock_avail = HIF_CPM_LOCK_AVAIL_REG_ADDR - 0xa0000000, + .cpm_data_mem = HIF_CPM_IDA_DATA_MEM_ADDR - 0xa0000000, + .cpm_cmd = HIF_CPM_IDA_CMD_REG_ADDR - 0xa0000000, + .cpm_addr = HIF_CPM_IDA_ADDR_REG_ADDR - 0xa0000000, + .cpm_busy = HIF_CPM_IDA_BUSY_REG_ADDR - 0xa0000000, + .req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR - 0xa0000000, + .req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR - 0xa0000000, + .rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR - 0xa0000000, + .rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR - 0xa0000000, + .req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR - 0xa0000000, + .req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR - 0xa0000000, + .rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR - 0xa0000000, + .rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR - 0xa0000000, + .msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR - 0xa0000000, + .element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR - 0xa0000000, + .q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR - 0xa0000000, + .interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR - 0xa0000000, + .tbl2irq_rd_done_msix_reg = HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR - 0xa0000000, + .dma_ul_busy_reg = CLSF_DMA_DMA_UL_BUSY_REG_ADDR - 0xa0000000, + .dma_dl_done_reg = CLSF_DMA_DMA_DL_DONE_REG_ADDR - 0xa0000000, + .dma_dl_success_reg = CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR - 0xa0000000, + .err_code_clr_reg = CLSF_DMA_ERR_CODE_CLR_REG_ADDR - 0xa0000000, + .dma_rd_table_id_reg = CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR - 0xa0000000, + .dma_rd_addr_reg = CLSF_DMA_DMA_RD_ADDR_REG_ADDR - 0xa0000000, + .indrw_rd_start_reg = CLSF_DMA_INDRW_RD_START_REG_ADDR - 0xa0000000, + .tbl_dl_busy_reg = HIF_TBL_TBL_DL_BUSY_REG_ADDR - 0xa0000000, + .tbl_dl_req_reg = HIF_TBL_TBL_DL_REQ_REG_ADDR - 0xa0000000, + .tbl_dl_addr_l_reg = HIF_TBL_TBL_DL_ADDR_L_REG_ADDR - 0xa0000000, + .tbl_dl_addr_h_reg = HIF_TBL_TBL_DL_ADDR_H_REG_ADDR - 0xa0000000, + .tbl_dl_start_reg = HIF_TBL_TBL_DL_START_REG_ADDR - 0xa0000000, + .tbl_ul_req_reg = HIF_TBL_TBL_UL_REQ_REG_ADDR - 0xa0000000, + .tbl_ul_addr_l_reg = HIF_TBL_TBL_UL_ADDR_L_REG_ADDR - 0xa0000000, + .tbl_ul_addr_h_reg = HIF_TBL_TBL_UL_ADDR_H_REG_ADDR - 0xa0000000, + .tbl_ul_start_reg = HIF_TBL_TBL_UL_START_REG_ADDR - 0xa0000000, + .tbl_msg_rdy_reg = HIF_TBL_MSG_RDY_REG_ADDR - 0xa0000000, + .mpt_tbl_addr = MMC_MPT_TBL_MEM_ADDR - 0xa0000000, + .mpt_tbl_depth = MMC_MPT_TBL_MEM_DEPTH, + .mpt_tbl_width = MMC_MPT_TBL_MEM_WIDTH, + .mtt_inst_base_addr = MMC_MTT_TBL_MEM_ADDR - 0xa0000000, + .mtt_inst_stride = 0, + .mtt_inst_num_log = 0, + .mtt_inst_depth = MMC_MTT_TBL_MEM_DEPTH, +}; + +static struct xsc_hw_reg andes_bar_compressed_pf_regs = { + .tx_db = TX_DB_FUNC_MEM_ADDR, + .rx_db = RX_DB_FUNC_MEM_ADDR, + .complete_db = DB_CQ_FUNC_MEM_ADDR, + .complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR, + .event_db = DB_EQ_FUNC_MEM_ADDR, + .cpm_get_lock = CPM_LOCK_GET_REG_ADDR, + .cpm_put_lock = CPM_LOCK_PUT_REG_ADDR, + .cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR, + .cpm_data_mem = CPM_IDA_DATA_MEM_ADDR_NEW, + .cpm_cmd = CPM_IDA_CMD_REG_ADDR, + .cpm_addr = CPM_IDA_ADDR_REG_ADDR_NEW, + .cpm_busy = CPM_IDA_BUSY_REG_ADDR, + .req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR, + .req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR, + .rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR, + .rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR, + .req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR, + .req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR, + .rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR, + .rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR, + .msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR, + .element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR, + .q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR, + .interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR, + .tbl2irq_rd_done_msix_reg = TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR, + .dma_ul_busy_reg = DMA_UL_BUSY_REG_ADDR, + .dma_dl_done_reg = DMA_DL_DONE_REG_ADDR, + .dma_dl_success_reg = DMA_DL_SUCCESS_REG_ADDR, + .err_code_clr_reg = ERR_CODE_CLR_REG_ADDR, + .dma_rd_table_id_reg = DMA_RD_TABLE_ID_REG_ADDR, + .dma_rd_addr_reg = DMA_RD_ADDR_REG_ADDR, + .indrw_rd_start_reg = INDRW_RD_START_REG_ADDR, + .tbl_dl_busy_reg = TBL_DL_BUSY_REG_ADDR, + .tbl_dl_req_reg = TBL_DL_REQ_REG_ADDR, + .tbl_dl_addr_l_reg = TBL_DL_ADDR_L_REG_ADDR, + .tbl_dl_addr_h_reg = TBL_DL_ADDR_H_REG_ADDR, + .tbl_dl_start_reg = TBL_DL_START_REG_ADDR, + .tbl_ul_req_reg = TBL_UL_REQ_REG_ADDR, + .tbl_ul_addr_l_reg = TBL_UL_ADDR_L_REG_ADDR, + .tbl_ul_addr_h_reg = TBL_UL_ADDR_H_REG_ADDR, + .tbl_ul_start_reg = TBL_UL_START_REG_ADDR, + .tbl_msg_rdy_reg = TBL_MSG_RDY_REG_ADDR, + .mpt_tbl_addr = MMC_MPT_TBL_MEM_ADDR - 0xa0000000, + .mpt_tbl_depth = MMC_MPT_TBL_MEM_DEPTH, + .mpt_tbl_width = MMC_MPT_TBL_MEM_WIDTH, + .mtt_inst_base_addr = MMC_MTT_TBL_MEM_ADDR - 0xa0000000, + .mtt_inst_stride = 0, + .mtt_inst_num_log = 0, + .mtt_inst_depth = MMC_MTT_TBL_MEM_DEPTH, +}; + +static struct xsc_hw_reg andes_vf_regs = { + .tx_db = TX_DB_FUNC_MEM_ADDR, + .rx_db = RX_DB_FUNC_MEM_ADDR, + .complete_db = DB_CQ_FUNC_MEM_ADDR, + .complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR, + .event_db = DB_EQ_FUNC_MEM_ADDR, + .cpm_get_lock = CPM_LOCK_GET_REG_ADDR, + .cpm_put_lock = CPM_LOCK_PUT_REG_ADDR, + .cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR, + .cpm_data_mem = CPM_IDA_DATA_MEM_ADDR, + .cpm_cmd = CPM_IDA_CMD_REG_ADDR, + .cpm_addr = CPM_IDA_ADDR_REG_ADDR, + .cpm_busy = CPM_IDA_BUSY_REG_ADDR, + .req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR, + .req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR, + .rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR, + .rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR, + .req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR, + .req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR, + .rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR, + .rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR, + .msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR, + .element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR, + .q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR, + .interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR, + .mpt_tbl_addr = MMC_MPT_TBL_MEM_ADDR - 0xa0000000, + .mpt_tbl_depth = MMC_MPT_TBL_MEM_DEPTH, + .mpt_tbl_width = MMC_MPT_TBL_MEM_WIDTH, + .mtt_inst_base_addr = MMC_MTT_TBL_MEM_ADDR - 0xa0000000, + .mtt_inst_stride = 0, + .mtt_inst_num_log = 0, + .mtt_inst_depth = MMC_MTT_TBL_MEM_DEPTH, +}; + +struct xsc_hw_abstract_layer andes_pf_hal = { + .ops = &andes_arch_ops, + .regs = &andes_pf_regs, +}; + +struct xsc_hw_abstract_layer compressed_pf_hal = { + .ops = &andes_arch_ops, + .regs = &andes_bar_compressed_pf_regs, +}; + +struct xsc_hw_abstract_layer andes_vf_hal = { + .ops = &andes_arch_ops, + .regs = &andes_vf_regs, +}; + +struct xsc_hw_abstract_layer *get_andes_pf_hal(void) +{ + return &andes_pf_hal; +} + +struct xsc_hw_abstract_layer *get_andes_bar_compressed_pf_hal(void) +{ + return &compressed_pf_hal; +} + +struct xsc_hw_abstract_layer *get_andes_vf_hal(void) +{ + return &andes_vf_hal; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hal/andes_reg.h b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/andes_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..d143e0cb73f1a3bd83ebb47be6100a26ba8ab55e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/andes_reg.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +/* generated time: + * Thu Feb 29 15:33:50 CST 2024 + */ + +#ifndef ANDES_REG_H +#define ANDES_REG_H + +//hif_irq_csr_defines.h +#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0xa1100070 + +//hif_cpm_csr_defines.h +#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000104 +#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000108 +#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa000010c +#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 +#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000020 +#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000080 +#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000100 +#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 +#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 +#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 +#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 +#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f +#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x4 +#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000010 + +//mmc_csr_defines.h +#define MMC_MPT_TBL_MEM_DEPTH 32768 +#define MMC_MTT_TBL_MEM_DEPTH 262144 +#define MMC_MPT_TBL_MEM_WIDTH 256 +#define MMC_MTT_TBL_MEM_WIDTH 64 +#define MMC_MPT_TBL_MEM_ADDR 0xa4100000 +#define MMC_MTT_TBL_MEM_ADDR 0xa4200000 + +//clsf_dma_csr_defines.h +#define CLSF_DMA_DMA_UL_BUSY_REG_ADDR 0xa6010048 +#define CLSF_DMA_DMA_DL_DONE_REG_ADDR 0xa60100d0 +#define CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR 0xa60100c0 +#define CLSF_DMA_ERR_CODE_CLR_REG_ADDR 0xa60100d4 +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK 0x7f +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR 0xa6010020 +#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT 16 +#define CLSF_DMA_DMA_RD_ADDR_REG_ADDR 0xa6010024 +#define CLSF_DMA_INDRW_RD_START_REG_ADDR 0xa6010028 + +//hif_tbl_csr_defines.h +#define HIF_TBL_TBL_DL_BUSY_REG_ADDR 0xa1060030 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT 12 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_DL_REQ_REG_ADDR 0xa1060020 +#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_L_REG_ADDR 0xa1060024 +#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_H_REG_ADDR 0xa1060028 +#define HIF_TBL_TBL_DL_START_REG_ADDR 0xa106002c +#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_UL_REQ_REG_ADDR 0xa106007c +#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_L_REG_ADDR 0xa1060080 +#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_H_REG_ADDR 0xa1060084 +#define HIF_TBL_TBL_UL_START_REG_ADDR 0xa1060088 +#define HIF_TBL_MSG_RDY_REG_ADDR 0xa1060044 + +//hif_cmdqm_csr_defines.h +#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1026000 +#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1028000 +#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa102e000 +#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1030000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1022000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1024000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa102a000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa102c000 +#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa1034000 +#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1020020 +#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1020028 +#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1032000 + +//PSV use +//hif_irq_csr_defines.h +#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1102000 +#define HIF_IRQ_INT_DB_REG_ADDR 0xa11000b4 +#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa1100114 +#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa11000f0 +#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa11000ec +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000f4 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000f8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000fc +#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa1100100 +#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa11000e8 + +#endif /* XSC_HW_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_impl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_impl.c new file mode 100644 index 0000000000000000000000000000000000000000..99c810eeb97fd5b4324d72df743f5f1dc7b74307 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_impl.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 - 2024, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "diamond_reg.h" +#include "common/xsc_reg.h" +#include "common/xsc_cmd.h" +#include "common/xsc_hsi.h" +#include "xsc_hal.h" + +#define REG_ADDR(bp, offset) ((bp) + (offset)) + +#define HIF_CPM_IDA_DATA_MEM_STRIDE 0x40 + +#define CPM_IAE_CMD_READ 0 +#define CPM_IAE_CMD_WRITE 1 + +#define CPM_IAE_ADDR_REG_STRIDE HIF_CPM_IDA_ADDR_REG_STRIDE + +#define CPM_IAE_DATA_MEM_STRIDE HIF_CPM_IDA_DATA_MEM_STRIDE + +#define CPM_IAE_DATA_MEM_MAX_LEN 16 + +static inline void acquire_ia64_lock(void *hal, void __iomem *bar, int *iae_idx) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + u64 lock_val; + u64 lock_vld; + + lock_val = readq(REG_ADDR(bar, _hal->regs->cpm_get_lock)); + lock_vld = lock_val >> HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT; + if (lock_vld) + *iae_idx = lock_val & HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK; + else + *iae_idx = -1; +} + +static inline void release_ia64_lock(void *hal, void __iomem *bar, int lock_idx) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + + writeq(lock_idx, REG_ADDR(bar, _hal->regs->cpm_put_lock)); +} + +static inline void ia64_write_data(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u64 *data, int nr, int idx) +{ + int i; + int offset = hal->regs->cpm_data_mem + idx * CPM_IAE_DATA_MEM_STRIDE; + + for (i = 0; i < nr; i++) { + writeq(*(data++), REG_ADDR(bar, offset)); + offset += sizeof(*data); + } +} + +static inline void ia64_read_data(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u64 *data, int nr, int idx) +{ + int i; + int offset = hal->regs->cpm_data_mem + idx * CPM_IAE_DATA_MEM_STRIDE; + u64 *ptr = data; + + for (i = 0; i < nr; i++) { + *ptr = readq(REG_ADDR(bar, offset)); + offset += sizeof(*data); + ptr = ptr + 1; + } +} + +static inline void ia64_write_reg_addr(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u32 addr, int idx) +{ + int offset = hal->regs->cpm_addr + idx * CPM_IAE_ADDR_REG_STRIDE; + u64 reg_addr_val = addr; + + writeq(reg_addr_val, REG_ADDR(bar, offset)); +} + +static inline void initiate_ia64_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length, int r0w1) +{ + struct ia_cmd { + union { + struct { + u64 iae_idx:HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH; + u64 iae_len:HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH; + u64 iae_r0w1:HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH; + }; + u64 raw_data; + }; + } cmd; + + int addr = hal->regs->cpm_cmd; + + cmd.iae_r0w1 = r0w1; + cmd.iae_len = length - 1; + cmd.iae_idx = iae_idx; + writeq(cmd.raw_data, REG_ADDR(bar, addr)); +} + +static inline void initiate_ia64_write_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length) +{ + initiate_ia64_cmd(hal, bar, iae_idx, length, CPM_IAE_CMD_WRITE); +} + +static inline void initiate_ia64_read_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length) +{ + initiate_ia64_cmd(hal, bar, iae_idx, length, CPM_IAE_CMD_READ); +} + +static inline void wait_for_ia64_complete(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx) +{ + while ((readq(REG_ADDR(bar, hal->regs->cpm_busy)) & (1 << iae_idx))) + ; +} + +static void xsc_read64(void *bar, u32 off, void *data, int len) +{ + u64 val = readq(REG_ADDR(bar, off)); + + memcpy(data, &val, len); +} + +static void xsc_write64(void *bar, u32 off, void *data) +{ + writeq(*(u64 *)data, REG_ADDR(bar, off)); +} + +static void xsc_ia64_write_reg_mr(void *hal, void __iomem *bar, + u32 addr, void *data, int nr, int idx) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + ia64_write_data(_hal, bar, data, nr, idx); + ia64_write_reg_addr(_hal, bar, addr, idx); + initiate_ia64_write_cmd(_hal, bar, idx, nr); +} + +static void xsc_ia64_read(void *hal, void __iomem *bar, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *_hal = hal; + int idx; + + do { + acquire_ia64_lock(_hal, bar, &idx); + } while (idx == -1); + ia64_write_reg_addr(_hal, bar, addr, idx); + initiate_ia64_read_cmd(_hal, bar, idx, nr); + wait_for_ia64_complete(_hal, bar, idx); + ia64_read_data(_hal, bar, data, nr, idx); + release_ia64_lock(_hal, bar, idx); +} + +static void xsc_ia64_write(void *hal, void __iomem *bar, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *_hal = hal; + int idx; + + do { + acquire_ia64_lock(_hal, bar, &idx); + } while (idx == -1); + ia64_write_data(_hal, bar, data, nr, idx); + ia64_write_reg_addr(_hal, bar, addr, idx); + initiate_ia64_write_cmd(_hal, bar, idx, nr); + release_ia64_lock(_hal, bar, idx); +} + +static void diamond_ring_tx_doorbell(void *hal, void __iomem *bar, u32 sqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union xsc2_send_doorbell { + struct{ + u64 next_pid : 17; + u64 qp_id : 14; + }; + u64 raw; + } db; + + db.next_pid = next_pid; + db.qp_id = sqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + xsc_write64(bar, _hal->regs->tx_db, &db.raw); +} + +static void diamond_ring_rx_doorbell(void *hal, void __iomem *bar, u32 rqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union xsc2_recv_doorbell { + struct{ + u64 next_pid : 14; + u64 qp_id : 14; + }; + u64 raw; + } db; + + db.next_pid = next_pid; + db.qp_id = rqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + xsc_write64(bar, _hal->regs->rx_db, &db.raw); +} + +union diamond_cq_doorbell { + struct{ + u64 cq_next_cid:23; + u64 cq_id:14; + u64 cq_sta:2; + }; + u64 raw; +}; + +static void diamond_update_cq_db(void *hal, void __iomem *bar, u32 cqn, u32 next_cid, u8 solicited) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union diamond_cq_doorbell db; + + db.cq_next_cid = next_cid; + db.cq_id = cqn; + db.cq_sta = solicited ? CQ_STAT_ARM_SOLICITED : CQ_STAT_ARM_NEXT; + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + xsc_write64(bar, _hal->regs->complete_db, &db.raw); +} + +static void diamond_set_cq_ci(void *hal, void __iomem *bar, u32 cqn, u32 next_cid) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union diamond_cq_doorbell db; + + db.cq_next_cid = next_cid; + db.cq_id = cqn; + db.cq_sta = CQ_STAT_FIRED; + /* make sure val write to memory done */ + wmb(); + xsc_write64(bar, _hal->regs->complete_db, &db.raw); +} + +static void diamond_set_eq_ci(void *hal, void __iomem *bar, u32 eqn, u32 next_cid, u8 arm) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union diamond_eq_doorbell { + struct{ + u64 eq_next_cid : 12; + u64 eq_id : 8; + u64 eq_sta : 1; + }; + u64 raw; + } db; + + db.eq_next_cid = next_cid; + db.eq_id = eqn; + db.eq_sta = !!arm; + + /* make sure val write to memory done */ + wmb(); + xsc_write64(bar, _hal->regs->event_db, &db.raw); +} + +static u8 diamond_get_mr_page_mode(u8 page_shift) +{ + return page_shift; +} + +static inline u32 diamond_mkey_to_idx(u32 mkey) +{ + return mkey >> 8; +} + +static inline u32 diamond_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << 8; +} + +static void diamond_set_mpt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_register_mr_request *req = mr_request; + union xsc_mpt_entry { + struct { + u64 va; + u64 mem_size:38; + u64 pdn:24; + u64 key:8; + u64 mtt_base:20; + u64 acc:4; + u64 page_mode:5; + u64 is_gpu:1; + u64 mem_map_dis:1; + } __packed; + u64 raw[3]; + } mpt; + u64 va = be64_to_cpu(req->va_base); + u32 mkey = be32_to_cpu(req->mkey); + u32 idx = diamond_mkey_to_idx(mkey); + u32 reg_addr = _hal->regs->mpt_tbl_addr + idx * (_hal->regs->mpt_tbl_width >> 3); + + mpt.va = va; + mpt.mem_size = be64_to_cpu(req->len); + mpt.pdn = be32_to_cpu(req->pdn); + mpt.key = mkey & 0xff; + mpt.mtt_base = mtt_base; + mpt.acc = req->acc; + mpt.page_mode = req->page_mode; + mpt.is_gpu = req->is_gpu; + mpt.mem_map_dis = req->map_en; + + xsc_ia64_write_reg_mr(_hal, bar, reg_addr, mpt.raw, ARRAY_SIZE(mpt.raw), iae_idx); +} + +static void diamond_clear_mpt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_unregister_mr_mbox_in *req = mr_request; + union xsc_mpt_entry { + struct { + u64 va; + u64 mem_size:38; + u64 pdn:24; + u64 key:8; + u64 mtt_base:20; + u64 acc:4; + u64 page_mode:5; + u64 is_gpu:1; + u64 mem_map_dis:1; + }; + u64 raw[3]; + } mpt; + u32 idx = be32_to_cpu(req->mkey); + u32 reg_addr = _hal->regs->mpt_tbl_addr + idx * (_hal->regs->mpt_tbl_width >> 3); + + memset(&mpt, 0x00, sizeof(mpt)); + xsc_ia64_write_reg_mr(_hal, bar, reg_addr, mpt.raw, ARRAY_SIZE(mpt.raw), iae_idx); +} + +#define PAGE_SHIFT_4K 12 +static void diamond_set_mtt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_register_mr_request *req = mr_request; + int i; + u32 pa_num = be32_to_cpu(req->pa_num); + u64 pa; + u32 reg_addr; + u32 mtt_base_addr = _hal->regs->mtt_inst_base_addr; + u32 stride = _hal->regs->mtt_inst_stride; + u8 inst_mask = (1 << _hal->regs->mtt_inst_num_log) - 1; + u8 inst; + + for (i = 0; i < pa_num; i++) { + pa = req->pas[i]; + pa = be64_to_cpu(pa); + pa = pa >> PAGE_SHIFT_4K; + inst = (mtt_base + i) & inst_mask; + reg_addr = mtt_base_addr + (inst * stride) + + ((mtt_base + i) >> _hal->regs->mtt_inst_num_log) * sizeof(u64); + xsc_write64(bar, reg_addr, &pa); + } +} + +static void diamond_set_read_done_msix_vector(void *hal, void __iomem *bar, u32 vector) +{ +} + +static int diamond_dma_write_tbl_once(void *hal, void __iomem *bar, + u32 data_len, u64 dma_wr_addr, + u32 host_id, u32 func_id, + u64 success[2], u32 size) +{ + return -1; +} + +static void diamond_dma_read_tbl(void *hal, void __iomem *bar, + u32 host_id, u32 func_id, u64 data_addr, + u32 tbl_id, u32 burst_num, u32 tbl_start_addr) +{ +} + +static const u32 xsc_msg_opcode[][2][2] = { + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_WRITE_IMMDT, + [XSC_MSG_OPCODE_RAW][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_REQ_SEND, + [XSC_MSG_OPCODE_RAW][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +struct diamond_cqe { + u8 error_code; + __le32 qp_id:15; + u8 raw_is_cut:1; + u8 se:1; + u8 has_pph:1; + u8 type:1; + u8 with_immdt:1; + u8 csum_err:4; + __le32 imm_data; + __le32 msg_len; + __le32 vni; + __le64 ts:48; + __le16 wqe_id; + u8 msg_opcode; + u8 rsv; + __le16 rsv1[2]; + __le16 rsv2:15; + u8 owner:1; +}; + +static bool diamond_is_err_cqe(void *cqe) +{ + struct diamond_cqe *_cqe = cqe; + + return !!_cqe->error_code; +} + +static u8 diamond_get_cqe_error_code(void *cqe) +{ + struct diamond_cqe *_cqe = cqe; + + return _cqe->error_code; +} + +static u8 diamond_get_cqe_opcode(void *cqe) +{ + struct diamond_cqe *_cqe = cqe; + u8 msg_opcode = _cqe->msg_opcode; + + if (_cqe->error_code) + return _cqe->type ? XSC_OPCODE_RDMA_RSP_ERROR : XSC_OPCODE_RDMA_REQ_ERROR; + if (msg_opcode != XSC_MSG_OPCODE_RAW && msg_opcode != XSC_MSG_OPCODE_RDMA_WRITE) + return XSC_OPCODE_RDMA_CQE_ERROR; + return xsc_msg_opcode[msg_opcode][_cqe->type][_cqe->with_immdt]; +} + +static u32 diamond_get_max_mtt_num(void *hal) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + return _hal->regs->mtt_inst_depth << _hal->regs->mtt_inst_num_log; +} + +static u32 diamond_get_max_mpt_num(void *hal) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + return _hal->regs->mpt_tbl_depth; +} + +struct diamond_data_seg { + u32 length; + u32 key; + u64 addr; +}; + +static void diamond_set_data_seg(void *data_seg, u32 length, u32 key, u64 addr) +{ + struct diamond_data_seg *seg = data_seg; + + seg->length = length; + seg->key = key; + seg->addr = addr; +} + +static bool diamond_skb_need_linearize(int ds_num) +{ + return ds_num > 4; +} + +static struct xsc_hw_ops diamond_arch_ops = { + .read = xsc_read64, + .write = xsc_write64, + .ia_read = xsc_ia64_read, + .ia_write = xsc_ia64_write, + .ring_tx_doorbell = diamond_ring_tx_doorbell, + .ring_rx_doorbell = diamond_ring_rx_doorbell, + .update_cq_db = diamond_update_cq_db, + .set_cq_ci = diamond_set_cq_ci, + .set_eq_ci = diamond_set_eq_ci, + .get_mr_page_mode = diamond_get_mr_page_mode, + .mkey_to_idx = diamond_mkey_to_idx, + .idx_to_mkey = diamond_idx_to_mkey, + .set_mpt = diamond_set_mpt_tbl, + .clear_mpt = diamond_clear_mpt_tbl, + .set_mtt = diamond_set_mtt_tbl, + .set_read_done_msix_vector = diamond_set_read_done_msix_vector, + .dma_write_tbl_once = diamond_dma_write_tbl_once, + .dma_read_tbl = diamond_dma_read_tbl, + .is_err_cqe = diamond_is_err_cqe, + .get_cqe_error_code = diamond_get_cqe_error_code, + .get_cqe_opcode = diamond_get_cqe_opcode, + .get_max_mtt_num = diamond_get_max_mtt_num, + .get_max_mpt_num = diamond_get_max_mpt_num, + .set_data_seg = diamond_set_data_seg, + .skb_need_linearize = diamond_skb_need_linearize, +}; + +static struct xsc_hw_reg diamond_pf_regs = { + .cpm_get_lock = HIF_CPM_LOCK_GET_REG_ADDR - 0xa0000000, + .cpm_put_lock = HIF_CPM_LOCK_PUT_REG_ADDR - 0xa0000000, + .cpm_lock_avail = HIF_CPM_LOCK_AVAIL_REG_ADDR - 0xa0000000, + .cpm_data_mem = HIF_CPM_IDA_DATA_MEM_ADDR - 0xa0000000, + .cpm_cmd = HIF_CPM_IDA_CMD_REG_ADDR - 0xa0000000, + .cpm_addr = HIF_CPM_IDA_ADDR_REG_ADDR - 0xa0000000, + .cpm_busy = HIF_CPM_IDA_BUSY_REG_ADDR - 0xa0000000, + .req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR - 0xa0000000, + .req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR - 0xa0000000, + .rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR - 0xa0000000, + .rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR - 0xa0000000, + .req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR - 0xa0000000, + .req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR - 0xa0000000, + .rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR - 0xa0000000, + .rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR - 0xa0000000, + .msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR - 0xa0000000, + .element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR - 0xa0000000, + .q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR - 0xa0000000, + .interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR - 0xa0000000, + .mpt_tbl_addr = MMC_MPT_TBL_MEM_ADDR - 0xa0000000, + .mpt_tbl_depth = MMC_MPT_TBL_MEM_DEPTH, + .mpt_tbl_width = MMC_MPT_TBL_MEM_WIDTH, + .mtt_inst_base_addr = MMC_MTT_TBL_MEM_ADDR - 0xa0000000, + .mtt_inst_stride = 0, + .mtt_inst_num_log = 0, + .mtt_inst_depth = MMC_MTT_TBL_MEM_DEPTH, +}; + +static struct xsc_hw_reg diamond_vf_regs = { + .tx_db = TX_DB_FUNC_MEM_ADDR, + .rx_db = RX_DB_FUNC_MEM_ADDR, + .complete_db = DB_CQ_FUNC_MEM_ADDR, + .complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR, + .event_db = DB_EQ_FUNC_MEM_ADDR, + .cpm_get_lock = CPM_LOCK_GET_REG_ADDR, + .cpm_put_lock = CPM_LOCK_PUT_REG_ADDR, + .cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR, + .cpm_data_mem = CPM_IDA_DATA_MEM_ADDR, + .cpm_cmd = CPM_IDA_CMD_REG_ADDR, + .cpm_addr = CPM_IDA_ADDR_REG_ADDR, + .cpm_busy = CPM_IDA_BUSY_REG_ADDR, + .req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR, + .req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR, + .rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR, + .rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR, + .req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR, + .req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR, + .rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR, + .rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR, + .msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR, + .element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR, + .q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR, + .interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR, + .mpt_tbl_addr = MMC_MPT_TBL_MEM_ADDR - 0xa0000000, + .mpt_tbl_depth = MMC_MPT_TBL_MEM_DEPTH, + .mpt_tbl_width = MMC_MPT_TBL_MEM_WIDTH, + .mtt_inst_base_addr = MMC_MTT_TBL_MEM_ADDR - 0xa0000000, + .mtt_inst_stride = 0, + .mtt_inst_num_log = 0, + .mtt_inst_depth = MMC_MTT_TBL_MEM_DEPTH, +}; + +struct xsc_hw_abstract_layer diamond_pf_hal = { + .ops = &diamond_arch_ops, + .regs = &diamond_pf_regs, +}; + +struct xsc_hw_abstract_layer diamond_vf_hal = { + .ops = &diamond_arch_ops, + .regs = &diamond_vf_regs, +}; + +struct xsc_hw_abstract_layer *get_diamond_pf_hal(void) +{ + return &diamond_pf_hal; +} + +struct xsc_hw_abstract_layer *get_diamond_vf_hal(void) +{ + return &diamond_vf_hal; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_next_impl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_next_impl.c new file mode 100644 index 0000000000000000000000000000000000000000..cd66879bf3a1c60961530f242a4eab05614b6559 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_next_impl.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: GPL-2.0 +// +/* Copyright (C) 2024 - 2024, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "diamond_reg.h" +#include "common/xsc_reg.h" +#include "common/xsc_cmd.h" +#include "common/xsc_hsi.h" +#include "xsc_hal.h" + +#define REG_ADDR(bp, offset) ((bp) + (offset)) + +#define HIF_CPM_IDA_DATA_MEM_STRIDE 0x40 + +#define CPM_IAE_CMD_READ 0 +#define CPM_IAE_CMD_WRITE 1 + +#define CPM_IAE_ADDR_REG_STRIDE HIF_CPM_IDA_ADDR_REG_STRIDE + +#define CPM_IAE_DATA_MEM_STRIDE HIF_CPM_IDA_DATA_MEM_STRIDE + +#define CPM_IAE_DATA_MEM_MAX_LEN 16 + +static inline void acquire_ia64_lock(void *hal, void __iomem *bar, int *iae_idx) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + u64 lock_val; + u64 lock_vld; + + lock_val = readq(REG_ADDR(bar, _hal->regs->cpm_get_lock)); + lock_vld = lock_val >> HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT; + if (lock_vld) + *iae_idx = lock_val & HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK; + else + *iae_idx = -1; +} + +static inline void release_ia64_lock(void *hal, void __iomem *bar, int lock_idx) +{ + struct xsc_hw_abstract_layer *_hal = (struct xsc_hw_abstract_layer *)hal; + + writeq(lock_idx, REG_ADDR(bar, _hal->regs->cpm_put_lock)); +} + +static inline void ia64_write_data(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u64 *data, int nr, int idx) +{ + int i; + int offset = hal->regs->cpm_data_mem + idx * CPM_IAE_DATA_MEM_STRIDE; + + for (i = 0; i < nr; i++) { + writeq(*(data++), REG_ADDR(bar, offset)); + offset += sizeof(*data); + } +} + +static inline void ia64_read_data(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u64 *data, int nr, int idx) +{ + int i; + int offset = hal->regs->cpm_data_mem + idx * CPM_IAE_DATA_MEM_STRIDE; + u64 *ptr = data; + + for (i = 0; i < nr; i++) { + *ptr = readq(REG_ADDR(bar, offset)); + offset += sizeof(*data); + ptr = ptr + 1; + } +} + +static inline void ia64_write_reg_addr(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + u32 addr, int idx) +{ + int offset = hal->regs->cpm_addr + idx * CPM_IAE_ADDR_REG_STRIDE; + u64 reg_addr_val = addr; + + writeq(reg_addr_val, REG_ADDR(bar, offset)); +} + +static inline void initiate_ia64_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length, int r0w1) +{ + struct ia_cmd { + union { + struct { + u64 iae_idx:HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH; + u64 iae_len:HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH; + u64 iae_r0w1:HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH; + }; + u64 raw_data; + }; + } cmd; + + int addr = hal->regs->cpm_cmd; + + cmd.iae_r0w1 = r0w1; + cmd.iae_len = length - 1; + cmd.iae_idx = iae_idx; + writeq(cmd.raw_data, REG_ADDR(bar, addr)); +} + +static inline void initiate_ia64_write_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length) +{ + initiate_ia64_cmd(hal, bar, iae_idx, length, CPM_IAE_CMD_WRITE); +} + +static inline void initiate_ia64_read_cmd(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx, int length) +{ + initiate_ia64_cmd(hal, bar, iae_idx, length, CPM_IAE_CMD_READ); +} + +static inline void wait_for_ia64_complete(struct xsc_hw_abstract_layer *hal, void __iomem *bar, + int iae_idx) +{ + while ((readq(REG_ADDR(bar, hal->regs->cpm_busy)) & (1 << iae_idx))) + ; +} + +static void xsc_read64(void *bar, u32 off, void *data, int len) +{ + u64 val = readq(REG_ADDR(bar, off)); + + memcpy(data, &val, len); +} + +static void xsc_write64(void *bar, u32 off, void *data) +{ + writeq(*(u64 *)data, REG_ADDR(bar, off)); +} + +static void xsc_ia64_write_reg_mr(void *hal, void __iomem *bar, + u32 addr, void *data, int nr, int idx) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + ia64_write_data(_hal, bar, data, nr, idx); + ia64_write_reg_addr(_hal, bar, addr, idx); + initiate_ia64_write_cmd(_hal, bar, idx, nr); +} + +static void xsc_ia64_read(void *hal, void __iomem *bar, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *_hal = hal; + int idx; + + do { + acquire_ia64_lock(_hal, bar, &idx); + } while (idx == -1); + ia64_write_reg_addr(_hal, bar, addr, idx); + initiate_ia64_read_cmd(_hal, bar, idx, nr); + wait_for_ia64_complete(_hal, bar, idx); + ia64_read_data(_hal, bar, data, nr, idx); + release_ia64_lock(_hal, bar, idx); +} + +static void xsc_ia64_write(void *hal, void __iomem *bar, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *_hal = hal; + int idx; + + do { + acquire_ia64_lock(_hal, bar, &idx); + } while (idx == -1); + ia64_write_data(_hal, bar, data, nr, idx); + ia64_write_reg_addr(_hal, bar, addr, idx); + initiate_ia64_write_cmd(_hal, bar, idx, nr); + release_ia64_lock(_hal, bar, idx); +} + +static void diamond_next_ring_tx_doorbell(void *hal, void __iomem *bar, u32 sqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union xsc2_send_doorbell { + struct{ + u64 next_pid : 17; + u64 qp_id : 10; + }; + u64 raw; + } db; + + db.next_pid = next_pid; + db.qp_id = sqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + xsc_write64(bar, _hal->regs->tx_db, &db.raw); +} + +static void diamond_next_ring_rx_doorbell(void *hal, void __iomem *bar, u32 rqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union xsc2_recv_doorbell { + struct{ + u64 next_pid : 14; + u64 qp_id : 10; + }; + u64 raw; + } db; + + db.next_pid = next_pid; + db.qp_id = rqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + xsc_write64(bar, _hal->regs->rx_db, &db.raw); +} + +union diamond_next_cq_doorbell { + struct{ + u64 cq_next_cid:23; + u64 cq_id:10; + u64 cq_sta:2; + }; + u64 raw; +}; + +static void diamond_next_update_cq_db(void *hal, void __iomem *bar, + u32 cqn, u32 next_cid, u8 solicited) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union diamond_next_cq_doorbell db; + + db.cq_next_cid = next_cid; + db.cq_id = cqn; + db.cq_sta = solicited ? CQ_STAT_ARM_SOLICITED : CQ_STAT_ARM_NEXT; + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + xsc_write64(bar, _hal->regs->complete_db, &db.raw); +} + +static void diamond_next_set_cq_ci(void *hal, void __iomem *bar, u32 cqn, u32 next_cid) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union diamond_next_cq_doorbell db; + + db.cq_next_cid = next_cid; + db.cq_id = cqn; + db.cq_sta = CQ_STAT_FIRED; + /* make sure val write to memory done */ + wmb(); + xsc_write64(bar, _hal->regs->complete_db, &db.raw); +} + +static void diamond_next_set_eq_ci(void *hal, void __iomem *bar, u32 eqn, u32 next_cid, u8 arm) +{ + struct xsc_hw_abstract_layer *_hal = hal; + union diamond_next_eq_doorbell { + struct{ + u64 eq_next_cid : 12; + u64 eq_id : 8; + u64 eq_sta : 1; + }; + u64 raw; + } db; + + db.eq_next_cid = next_cid; + db.eq_id = eqn; + db.eq_sta = !!arm; + + /* make sure val write to memory done */ + wmb(); + xsc_write64(bar, _hal->regs->event_db, &db.raw); +} + +static u8 diamond_next_get_mr_page_mode(u8 page_shift) +{ + return page_shift; +} + +static inline u32 diamond_next_mkey_to_idx(u32 mkey) +{ + return mkey >> 8; +} + +static inline u32 diamond_next_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << 8; +} + +static void diamond_next_set_mpt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_register_mr_request *req = mr_request; + union xsc_mpt_entry { + struct { + u64 va; + u64 mem_size:38; + u64 pdn:24; + u64 key:8; + u64 mtt_base:20; + u64 acc:4; + u64 page_mode:5; + u64 is_gpu:1; + u64 mem_map_dis:1; + } __packed; + u64 raw[3]; + } mpt; + u64 va = be64_to_cpu(req->va_base); + u32 mkey = be32_to_cpu(req->mkey); + u32 idx = diamond_next_mkey_to_idx(mkey); + u32 reg_addr = _hal->regs->mpt_tbl_addr + idx * (_hal->regs->mpt_tbl_width >> 3); + + mpt.va = va; + mpt.mem_size = be64_to_cpu(req->len); + mpt.pdn = be32_to_cpu(req->pdn); + mpt.key = mkey & 0xff; + mpt.mtt_base = mtt_base; + mpt.acc = req->acc; + mpt.page_mode = req->page_mode; + mpt.is_gpu = req->is_gpu; + mpt.mem_map_dis = req->map_en; + + xsc_ia64_write_reg_mr(_hal, bar, reg_addr, mpt.raw, ARRAY_SIZE(mpt.raw), iae_idx); +} + +static void diamond_next_clear_mpt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_unregister_mr_mbox_in *req = mr_request; + union xsc_mpt_entry { + struct { + u64 va; + u64 mem_size:38; + u64 pdn:24; + u64 key:8; + u64 mtt_base:20; + u64 acc:4; + u64 page_mode:5; + u64 is_gpu:1; + u64 mem_map_dis:1; + }; + u64 raw[3]; + } mpt; + u32 idx = be32_to_cpu(req->mkey); + u32 reg_addr = _hal->regs->mpt_tbl_addr + idx * (_hal->regs->mpt_tbl_width >> 3); + + memset(&mpt, 0x00, sizeof(mpt)); + xsc_ia64_write_reg_mr(_hal, bar, reg_addr, mpt.raw, ARRAY_SIZE(mpt.raw), iae_idx); +} + +#define PAGE_SHIFT_4K 12 +static void diamond_next_set_mtt_tbl(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *_hal = hal; + struct xsc_register_mr_request *req = mr_request; + int i; + u32 pa_num = be32_to_cpu(req->pa_num); + u64 pa; + u32 reg_addr; + + for (i = 0; i < pa_num; i++) { + pa = req->pas[i]; + pa = be64_to_cpu(pa); + pa = pa >> PAGE_SHIFT_4K; + reg_addr = _hal->regs->mtt_inst_base_addr + (mtt_base + i) * sizeof(u64); + xsc_write64(bar, reg_addr, &pa); + } +} + +static void diamond_next_set_read_done_msix_vector(void *hal, void __iomem *bar, u32 vector) +{ +} + +static int diamond_next_dma_write_tbl_once(void *hal, void __iomem *bar, + u32 data_len, u64 dma_wr_addr, + u32 host_id, u32 func_id, u64 success[2], u32 size) +{ + return -1; +} + +static void diamond_next_dma_read_tbl(void *hal, void __iomem *bar, u32 host_id, u32 func_id, + u64 data_addr, u32 tbl_id, u32 burst_num, u32 tbl_start_addr) +{ +} + +static const u32 xsc_msg_opcode[][2][2] = { + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_WRITE_IMMDT, + [XSC_MSG_OPCODE_RAW][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_REQ_SEND, + [XSC_MSG_OPCODE_RAW][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +struct diamond_next_cqe { + u8 error_code; + __le32 qp_id:15; + u8 raw_is_cut:1; + u8 se:1; + u8 has_pph:1; + u8 type:1; + u8 with_immdt:1; + u8 csum_err:4; + __le32 imm_data; + __le32 msg_len; + __le32 vni; + __le64 ts:48; + __le16 wqe_id; + u8 msg_opcode; + u8 rsv; + __le16 rsv1[2]; + __le16 rsv2:15; + u8 owner:1; +}; + +static bool diamond_next_is_err_cqe(void *cqe) +{ + struct diamond_next_cqe *_cqe = cqe; + + return !!_cqe->error_code; +} + +static u8 diamond_next_get_cqe_error_code(void *cqe) +{ + struct diamond_next_cqe *_cqe = cqe; + + return _cqe->error_code; +} + +static u8 diamond_next_get_cqe_opcode(void *cqe) +{ + struct diamond_next_cqe *_cqe = cqe; + u8 msg_opcode = _cqe->msg_opcode; + + if (_cqe->error_code) + return _cqe->type ? XSC_OPCODE_RDMA_RSP_ERROR : XSC_OPCODE_RDMA_REQ_ERROR; + if (msg_opcode != XSC_MSG_OPCODE_RAW && msg_opcode != XSC_MSG_OPCODE_RDMA_WRITE) + return XSC_OPCODE_RDMA_CQE_ERROR; + return xsc_msg_opcode[msg_opcode][_cqe->type][_cqe->with_immdt]; +} + +static u32 diamond_next_get_max_mtt_num(void *hal) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + return _hal->regs->mtt_inst_depth << _hal->regs->mtt_inst_num_log; +} + +static u32 diamond_next_get_max_mpt_num(void *hal) +{ + struct xsc_hw_abstract_layer *_hal = hal; + + return _hal->regs->mpt_tbl_depth; +} + +struct diamond_next_data_seg { + u32 length; + u32 key; + u64 addr; +}; + +static void diamond_next_set_data_seg(void *data_seg, u32 length, u32 key, u64 addr) +{ + struct diamond_next_data_seg *seg = data_seg; + + seg->length = length; + seg->key = key; + seg->addr = addr; +} + +static bool diamond_next_skb_need_linearize(int ds_num) +{ + return ds_num > 2; +} + +static struct xsc_hw_ops diamond_next_arch_ops = { + .read = xsc_read64, + .write = xsc_write64, + .ia_read = xsc_ia64_read, + .ia_write = xsc_ia64_write, + .ring_tx_doorbell = diamond_next_ring_tx_doorbell, + .ring_rx_doorbell = diamond_next_ring_rx_doorbell, + .update_cq_db = diamond_next_update_cq_db, + .set_cq_ci = diamond_next_set_cq_ci, + .set_eq_ci = diamond_next_set_eq_ci, + .get_mr_page_mode = diamond_next_get_mr_page_mode, + .mkey_to_idx = diamond_next_mkey_to_idx, + .idx_to_mkey = diamond_next_idx_to_mkey, + .set_mpt = diamond_next_set_mpt_tbl, + .clear_mpt = diamond_next_clear_mpt_tbl, + .set_mtt = diamond_next_set_mtt_tbl, + .set_read_done_msix_vector = diamond_next_set_read_done_msix_vector, + .dma_write_tbl_once = diamond_next_dma_write_tbl_once, + .dma_read_tbl = diamond_next_dma_read_tbl, + .is_err_cqe = diamond_next_is_err_cqe, + .get_cqe_error_code = diamond_next_get_cqe_error_code, + .get_cqe_opcode = diamond_next_get_cqe_opcode, + .get_max_mtt_num = diamond_next_get_max_mtt_num, + .get_max_mpt_num = diamond_next_get_max_mpt_num, + .set_data_seg = diamond_next_set_data_seg, + .skb_need_linearize = diamond_next_skb_need_linearize, +}; + +static struct xsc_hw_reg diamond_next_pf_regs = { + .cpm_get_lock = HIF_CPM_LOCK_GET_REG_ADDR - 0xa0000000, + .cpm_put_lock = HIF_CPM_LOCK_PUT_REG_ADDR - 0xa0000000, + .cpm_lock_avail = HIF_CPM_LOCK_AVAIL_REG_ADDR - 0xa0000000, + .cpm_data_mem = HIF_CPM_IDA_DATA_MEM_ADDR - 0xa0000000, + .cpm_cmd = HIF_CPM_IDA_CMD_REG_ADDR - 0xa0000000, + .cpm_addr = HIF_CPM_IDA_ADDR_REG_ADDR - 0xa0000000, + .cpm_busy = HIF_CPM_IDA_BUSY_REG_ADDR - 0xa0000000, + .req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR - 0xa0000000, + .req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR - 0xa0000000, + .rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR - 0xa0000000, + .rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR - 0xa0000000, + .req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR - 0xa0000000, + .req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR - 0xa0000000, + .rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR - 0xa0000000, + .rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR - 0xa0000000, + .msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR - 0xa0000000, + .element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR - 0xa0000000, + .q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR - 0xa0000000, + .interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR - 0xa0000000, + .mpt_tbl_addr = MMC_MPT_TBL_MEM_ADDR - 0xa0000000, + .mpt_tbl_depth = MMC_MPT_TBL_MEM_DEPTH, + .mpt_tbl_width = MMC_MPT_TBL_MEM_WIDTH, + .mtt_inst_base_addr = MMC_MTT_TBL_MEM_ADDR - 0xa0000000, + .mtt_inst_stride = 0, + .mtt_inst_num_log = 0, + .mtt_inst_depth = MMC_MTT_TBL_MEM_DEPTH, +}; + +static struct xsc_hw_reg diamond_next_vf_regs = { + .tx_db = TX_DB_FUNC_MEM_ADDR, + .rx_db = RX_DB_FUNC_MEM_ADDR, + .complete_db = DB_CQ_FUNC_MEM_ADDR, + .complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR, + .event_db = DB_EQ_FUNC_MEM_ADDR, + .cpm_get_lock = CPM_LOCK_GET_REG_ADDR, + .cpm_put_lock = CPM_LOCK_PUT_REG_ADDR, + .cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR, + .cpm_data_mem = CPM_IDA_DATA_MEM_ADDR, + .cpm_cmd = CPM_IDA_CMD_REG_ADDR, + .cpm_addr = CPM_IDA_ADDR_REG_ADDR, + .cpm_busy = CPM_IDA_BUSY_REG_ADDR, + .req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR, + .req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR, + .rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR, + .rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR, + .req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR, + .req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR, + .rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR, + .rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR, + .msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR, + .element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR, + .q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR, + .interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR, + .mpt_tbl_addr = MMC_MPT_TBL_MEM_ADDR - 0xa0000000, + .mpt_tbl_depth = MMC_MPT_TBL_MEM_DEPTH, + .mpt_tbl_width = MMC_MPT_TBL_MEM_WIDTH, + .mtt_inst_base_addr = MMC_MTT_TBL_MEM_ADDR - 0xa0000000, + .mtt_inst_stride = 0, + .mtt_inst_num_log = 0, + .mtt_inst_depth = MMC_MTT_TBL_MEM_DEPTH, +}; + +struct xsc_hw_abstract_layer diamond_next_pf_hal = { + .ops = &diamond_next_arch_ops, + .regs = &diamond_next_pf_regs, +}; + +struct xsc_hw_abstract_layer diamond_next_vf_hal = { + .ops = &diamond_next_arch_ops, + .regs = &diamond_next_vf_regs, +}; + +struct xsc_hw_abstract_layer *get_diamond_next_pf_hal(void) +{ + return &diamond_next_pf_hal; +} + +struct xsc_hw_abstract_layer *get_diamond_next_vf_hal(void) +{ + return &diamond_next_vf_hal; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_reg.h b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..7b5cdee3ad17a1a032943a0a327e35fa1c21ceaa --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/diamond_reg.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +/* generated time: + * Tue Jan 21 16:02:05 CST 2025 + */ + +#ifndef XSC_HW_H +#define XSC_HW_H + +//hif_irq_csr_defines.h + +//hif_cpm_csr_defines.h +#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000208 +#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000210 +#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa0000218 +#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 +#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000080 +#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000100 +#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000200 +#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 +#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 +#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 +#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 +#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f +#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x8 +#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000000 + +//mmc_csr_defines.h +#define MMC_MPT_TBL_MEM_DEPTH 32768 +#define MMC_MTT_TBL_MEM_DEPTH 76800 +#define MMC_MPT_TBL_MEM_WIDTH 256 +#define MMC_MTT_TBL_MEM_WIDTH 64 +#define MMC_MPT_TBL_MEM_ADDR 0xa2100000 +#define MMC_MTT_TBL_MEM_ADDR 0xa2800000 +#define MMC_MTT_TBL_MEM_SIZE 8 +#define MMC_MTT_TBL_MEM_STRIDE 0x100000 + +//clsf_dma_csr_defines.h + +//hif_tbl_csr_defines.h + +//hif_cmdqm_csr_defines.h +#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1101100 +#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1101180 +#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa1101300 +#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1101380 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1101000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1101080 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa1101200 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa1101280 +#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa1101480 +#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1100100 +#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1100110 +#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1101400 + +//PSV use +//hif_irq_csr_defines.h +#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1004000 +#define HIF_IRQ_INT_DB_REG_ADDR 0xa1000148 +#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa1000208 +#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa10001c0 +#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa10001b8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa10001c8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa10001d0 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa10001d8 +#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa10001e0 +#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa10001b0 + +#endif /* XSC_HW_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hal/xsc_hal.c b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/xsc_hal.c new file mode 100644 index 0000000000000000000000000000000000000000..1520d117c960e08c414871ec0324fbdbfc50f1e2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/xsc_hal.c @@ -0,0 +1,539 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 - 2024, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "xsc_hal.h" + +void xsc_init_hal(struct xsc_core_device *xdev, u32 device_id) +{ + struct xsc_hw_abstract_layer *hal; + + switch (device_id) { + case XSC_MC_PF_DEV_ID: + case XSC_MF_HOST_PF_DEV_ID: + case XSC_MF_SOC_PF_DEV_ID: + case XSC_MS_PF_DEV_ID: + case XSC_MV_HOST_PF_DEV_ID: + case XSC_MV_SOC_PF_DEV_ID: + if (is_pf_bar_compressed(xdev)) + hal = get_andes_bar_compressed_pf_hal(); + else + hal = get_andes_pf_hal(); + hal->hw_arch = HW_ARCH_ANDES; + break; + case XSC_MC_VF_DEV_ID: + case XSC_MF_HOST_VF_DEV_ID: + case XSC_MS_VF_DEV_ID: + case XSC_MV_HOST_VF_DEV_ID: + hal = get_andes_vf_hal(); + hal->hw_arch = HW_ARCH_ANDES; + break; + case XSC_MC_PF_DEV_ID_DIAMOND: + hal = get_diamond_pf_hal(); + hal->hw_arch = HW_ARCH_DIAMOND; + break; + case XSC_MC_PF_DEV_ID_DIAMOND_NEXT: + hal = get_diamond_next_pf_hal(); + hal->hw_arch = HW_ARCH_DIAMOND_NEXT; + break; + default: + hal = get_andes_pf_hal(); + hal->hw_arch = HW_ARCH_ANDES; + break; + } + xdev->hal = hal; +} + +void xsc_set_mtt_info(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->regs->mpt_tbl_addr = xdev->caps.mpt_tbl_addr - 0xa0000000; + hal->regs->mpt_tbl_depth = xdev->caps.mpt_tbl_depth; + hal->regs->mpt_tbl_width = xdev->caps.mpt_tbl_width; + hal->regs->mtt_inst_base_addr = xdev->caps.mtt_inst_base_addr - 0xa0000000; + hal->regs->mtt_inst_stride = xdev->caps.mtt_inst_stride; + hal->regs->mtt_inst_num_log = xdev->caps.mtt_inst_num_log; + hal->regs->mtt_inst_depth = xdev->caps.mtt_inst_depth; +} + +void xsc_set_pf_db_addr(struct xsc_core_device *xdev, + u64 tx_db, u64 rx_db, u64 cq_db, u64 cq_reg, u64 eq_db) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + if (is_pf_bar_compressed(xdev)) + return; + + if (xsc_core_is_pf(xdev)) { + hal->regs->tx_db = tx_db - 0xa0000000; + hal->regs->rx_db = rx_db - 0xa0000000; + hal->regs->complete_db = cq_db - 0xa0000000; + hal->regs->complete_reg = cq_reg - 0xa0000000; + hal->regs->event_db = eq_db - 0xa0000000; + } +} + +void xsc_get_db_addr(struct xsc_core_device *xdev, + u64 *tx_db, u64 *rx_db, u64 *cq_db, u64 *cq_reg, u64 *eq_db) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + if (!xsc_core_is_pf(xdev) || is_pf_bar_compressed(xdev)) { + if (tx_db) + *tx_db = hal->regs->tx_db; + if (rx_db) + *rx_db = hal->regs->rx_db; + if (cq_db) + *cq_db = hal->regs->complete_db; + if (cq_reg) + *cq_reg = hal->regs->complete_reg; + if (eq_db) + *eq_db = hal->regs->event_db; + } else { + if (tx_db) + *tx_db = hal->regs->tx_db + 0xa0000000; + if (rx_db) + *rx_db = hal->regs->rx_db + 0xa0000000; + if (cq_db) + *cq_db = hal->regs->complete_db + 0xa0000000; + if (cq_reg) + *cq_reg = hal->regs->complete_reg + 0xa0000000; + if (eq_db) + *eq_db = hal->regs->event_db + 0xa0000000; + } +} +EXPORT_SYMBOL_GPL(xsc_get_db_addr); + +void xsc_read_reg(struct xsc_core_device *xdev, u32 addr, void *data, int len) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->read(xdev->bar, addr, data, len); +} + +void xsc_write_reg(struct xsc_core_device *xdev, u32 addr, void *data) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, addr, data); +} + +void xsc_ia_read(struct xsc_core_device *xdev, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->ia_read(hal, xdev->bar, addr, data, nr); +} + +void xsc_ia_write(struct xsc_core_device *xdev, u32 addr, void *data, int nr) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->ia_write(hal, xdev->bar, addr, data, nr); +} + +void xsc_update_tx_db(struct xsc_core_device *xdev, u32 sqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->ring_tx_doorbell(hal, xdev->bar, sqn, next_pid); +} +EXPORT_SYMBOL_GPL(xsc_update_tx_db); + +void xsc_update_rx_db(struct xsc_core_device *xdev, u32 rqn, u32 next_pid) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->ring_rx_doorbell(hal, xdev->bar, rqn, next_pid); +} +EXPORT_SYMBOL_GPL(xsc_update_rx_db); + +void xsc_arm_cq(struct xsc_core_device *xdev, u32 cqn, u32 next_cid, u8 solicited) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->update_cq_db(hal, xdev->bar, cqn, next_cid, solicited); +} +EXPORT_SYMBOL_GPL(xsc_arm_cq); + +void xsc_update_cq_ci(struct xsc_core_device *xdev, u32 cqn, u32 next_cid) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->set_cq_ci(hal, xdev->bar, cqn, next_cid); +} +EXPORT_SYMBOL_GPL(xsc_update_cq_ci); + +void xsc_update_eq_ci(struct xsc_core_device *xdev, u32 eqn, u32 next_cid, u8 arm) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->set_eq_ci(hal, xdev->bar, eqn, next_cid, arm); +} +EXPORT_SYMBOL_GPL(xsc_update_eq_ci); + +void xsc_update_cmdq_req_pid(struct xsc_core_device *xdev, u32 req_pid) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->req_pid_addr, &req_pid); +} + +void xsc_update_cmdq_req_cid(struct xsc_core_device *xdev, u32 req_cid) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->req_cid_addr, &req_cid); +} + +void xsc_update_cmdq_rsp_pid(struct xsc_core_device *xdev, u32 rsp_pid) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->rsp_pid_addr, &rsp_pid); +} + +void xsc_update_cmdq_rsp_cid(struct xsc_core_device *xdev, u32 rsp_cid) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->rsp_cid_addr, &rsp_cid); +} + +u32 xsc_get_cmdq_req_pid(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 val; + + hal->ops->read(xdev->bar, hal->regs->req_pid_addr, &val, sizeof(val)); + return val; +} + +u32 xsc_get_cmdq_req_cid(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 val; + + hal->ops->read(xdev->bar, hal->regs->req_cid_addr, &val, sizeof(val)); + return val; +} + +u32 xsc_get_cmdq_rsp_pid(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 val; + + hal->ops->read(xdev->bar, hal->regs->rsp_pid_addr, &val, sizeof(val)); + return val; +} + +u32 xsc_get_cmdq_rsp_cid(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 val; + + hal->ops->read(xdev->bar, hal->regs->rsp_cid_addr, &val, sizeof(val)); + return val; +} + +u32 xsc_get_cmdq_log_stride(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 val; + + hal->ops->read(xdev->bar, hal->regs->element_sz_addr, &val, sizeof(val)); + return val; +} + +void xsc_set_cmdq_depth(struct xsc_core_device *xdev, u32 depth) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->q_depth_addr, &depth); +} + +void xsc_set_cmdq_req_buf_addr(struct xsc_core_device *xdev, u32 haddr, u32 laddr) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->req_buf_h_addr, &haddr); + hal->ops->write(xdev->bar, hal->regs->req_buf_l_addr, &laddr); +} + +void xsc_set_cmdq_rsp_buf_addr(struct xsc_core_device *xdev, u32 haddr, u32 laddr) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->rsp_buf_h_addr, &haddr); + hal->ops->write(xdev->bar, hal->regs->rsp_buf_l_addr, &laddr); +} + +void xsc_set_cmdq_msix_vector(struct xsc_core_device *xdev, u32 vector) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->write(xdev->bar, hal->regs->msix_vec_addr, &vector); +} + +void xsc_check_cmdq_status(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 status; + + hal->ops->read(xdev->bar, hal->regs->interrupt_stat_addr, &status, sizeof(status)); + if (status) { + status = 0xf; + hal->ops->write(xdev->bar, hal->regs->interrupt_stat_addr, &status); + } +} + +int xsc_handle_cmdq_interrupt(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + union interrupt_stat { + struct { + u32 hw_read_req_err:1; + u32 hw_write_req_err:1; + u32 req_pid_err:1; + u32 rsp_cid_err:1; + }; + u32 raw; + } stat; + + int err = 0; + int retry = 0; + + hal->ops->read(xdev->bar, hal->regs->interrupt_stat_addr, &stat.raw, sizeof(stat.raw)); + while (stat.raw != 0) { + err++; + if (stat.hw_read_req_err) { + retry = 1; + stat.hw_read_req_err = 0; + xsc_core_err(xdev, "hw report read req from host failed!\n"); + } else if (stat.hw_write_req_err) { + retry = 1; + stat.hw_write_req_err = 0; + xsc_core_err(xdev, "hw report write req to fw failed!\n"); + } else if (stat.req_pid_err) { + stat.req_pid_err = 0; + xsc_core_err(xdev, "hw report unexpected req pid!\n"); + } else if (stat.rsp_cid_err) { + stat.rsp_cid_err = 0; + xsc_core_err(xdev, "hw report unexpected rsp cid!\n"); + } else { + stat.raw = 0; + xsc_core_err(xdev, "ignore unknown interrupt!\n"); + } + } + + if (retry) { + if (xdev->cmd.retry_cnt >= XSC_CMD_MAX_RETRY_CNT) { + xsc_core_warn(xdev, "err handler have retried for %d times, stop retry\n", + xdev->cmd.retry_cnt); + goto clear_err_int; + } + hal->ops->write(xdev->bar, hal->regs->req_pid_addr, &xdev->cmd.cmd_pid); + xdev->cmd.retry_cnt++; + } + +clear_err_int: + if (err) { + stat.raw = 0xf; + hal->ops->write(xdev->bar, hal->regs->interrupt_stat_addr, &stat.raw); + } + + return err; +} + +u8 xsc_get_mr_page_mode(struct xsc_core_device *xdev, u8 page_shift) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->get_mr_page_mode(page_shift); +} +EXPORT_SYMBOL_GPL(xsc_get_mr_page_mode); + +u32 xsc_mkey_to_idx(struct xsc_core_device *xdev, u32 mkey) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->mkey_to_idx(mkey); +} + +u32 xsc_idx_to_mkey(struct xsc_core_device *xdev, u32 mkey_idx) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->idx_to_mkey(mkey_idx); +} + +void xsc_set_mpt(struct xsc_core_device *xdev, int iae_idx, u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->set_mpt(hal, xdev->bar, iae_idx, mtt_base, mr_request); +} + +void xsc_clear_mpt(struct xsc_core_device *xdev, int iae_idx, u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->clear_mpt(hal, xdev->bar, iae_idx, mtt_base, mr_request); +} + +void xsc_set_mtt(struct xsc_core_device *xdev, int iae_idx, u32 mtt_base, void *mr_request) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->set_mtt(hal, xdev->bar, iae_idx, mtt_base, mr_request); +} + +void xsc_set_read_done_msix_vector(struct xsc_core_device *xdev, u32 vector) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->set_read_done_msix_vector(hal, xdev->bar, vector); +} + +int xsc_dma_write_tbl_once(struct xsc_core_device *xdev, u32 data_len, u64 dma_wr_addr, + u32 host_id, u32 func_id, u64 success[2], u32 size) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->dma_write_tbl_once(hal, xdev->bar, data_len, dma_wr_addr, + host_id, func_id, success, size); +} + +void xsc_dma_read_tbl(struct xsc_core_device *xdev, u32 host_id, u32 func_id, u64 data_addr, + u32 tbl_id, u32 burst_num, u32 tbl_start_addr) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->dma_read_tbl(hal, xdev->bar, host_id, func_id, data_addr, + tbl_id, burst_num, tbl_start_addr); +} + +bool xsc_skb_need_linearize(struct xsc_core_device *xdev, int ds_num) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->skb_need_linearize(ds_num); +} +EXPORT_SYMBOL_GPL(xsc_skb_need_linearize); + +bool xsc_is_err_cqe(struct xsc_core_device *xdev, void *cqe) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->is_err_cqe(cqe); +} +EXPORT_SYMBOL_GPL(xsc_is_err_cqe); + +u8 xsc_get_cqe_error_code(struct xsc_core_device *xdev, void *cqe) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->get_cqe_error_code(cqe); +} +EXPORT_SYMBOL_GPL(xsc_get_cqe_error_code); + +u8 xsc_get_cqe_opcode(struct xsc_core_device *xdev, void *cqe) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->get_cqe_opcode(cqe); +} +EXPORT_SYMBOL_GPL(xsc_get_cqe_opcode); + +u16 xsc_get_eth_channel_num(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return xsc_is_diamond_like_arch(hal->hw_arch) ? + 1 : xdev->dev_res->eq_table.num_comp_vectors; +} +EXPORT_SYMBOL_GPL(xsc_get_eth_channel_num); + +u32 xsc_get_max_mtt_num(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->get_max_mtt_num(hal); +} + +u32 xsc_get_max_mpt_num(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return hal->ops->get_max_mpt_num(hal); +} + +u32 xsc_get_rdma_stat_mask(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 mask; + + if (xsc_core_is_pf(xdev)) + mask = xsc_is_diamond_like_arch(hal->hw_arch) ? 0x1FFFFCFF : 0xFFFFFF; + else + mask = 0xfff; + + return mask; +} +EXPORT_SYMBOL_GPL(xsc_get_rdma_stat_mask); + +u32 xsc_get_eth_stat_mask(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + u32 mask; + + if (xsc_core_is_pf(xdev)) + mask = xsc_is_diamond_like_arch(hal->hw_arch) ? 0x30ff : 0x3fff; + else + mask = 0xf; + + return mask; +} +EXPORT_SYMBOL_GPL(xsc_get_eth_stat_mask); + +void xsc_set_data_seg(struct xsc_core_device *xdev, void *data_seg, u64 addr, u32 key, u32 length) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + hal->ops->set_data_seg(data_seg, length, key, addr); +} +EXPORT_SYMBOL_GPL(xsc_set_data_seg); + +u8 xsc_get_mad_msg_opcode(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return xsc_is_diamond_like_arch(hal->hw_arch) ? XSC_MSG_OPCODE_RAW : XSC_MSG_OPCODE_MAD; +} +EXPORT_SYMBOL_GPL(xsc_get_mad_msg_opcode); + +u32 xsc_get_max_qp_depth(struct xsc_core_device *xdev) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + return xsc_is_diamond_like_arch(hal->hw_arch) ? xdev->caps.max_wqes : (32 * 1024); +} +EXPORT_SYMBOL_GPL(xsc_get_max_qp_depth); + +bool xsc_check_max_qp_depth(struct xsc_core_device *xdev, u32 *wqe_cnt, u32 max_qp_depth) +{ + struct xsc_hw_abstract_layer *hal = xdev->hal; + + if (*wqe_cnt <= max_qp_depth) + return false; + + if (xsc_is_diamond_like_arch(hal->hw_arch)) + return true; + + *wqe_cnt = max_qp_depth; + return false; +} +EXPORT_SYMBOL_GPL(xsc_check_max_qp_depth); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hal/xsc_hal.h b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/xsc_hal.h new file mode 100644 index 0000000000000000000000000000000000000000..06b72245218f798785dbfd6f02878e566c2cb028 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hal/xsc_hal.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2024 - 2024, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_HAL_H +#define XSC_HAL_H + +enum hw_arch { + HW_ARCH_ANDES, + HW_ARCH_DIAMOND, + HW_ARCH_DIAMOND_NEXT, +}; + +struct xsc_hw_abstract_layer; + +struct xsc_hw_ops { + void (*read)(void __iomem *bar, u32 addr, void *data, int len); + void (*write)(void __iomem *bar, u32 addr, void *data); + void (*ia_read)(void *hal, void __iomem *bar, u32 addr, void *data, int nr); + void (*ia_write)(void *hal, void __iomem *bar, u32 addr, void *data, int nr); + void (*ring_tx_doorbell)(void *hal, void __iomem *bar, u32 sqn, u32 next_pid); + void (*ring_rx_doorbell)(void *hal, void __iomem *bar, u32 rqn, u32 next_pid); + void (*update_cq_db)(void *hal, void __iomem *bar, u32 cqn, u32 next_cid, u8 solicited); + void (*set_cq_ci)(void *hal, void __iomem *bar, u32 cqn, u32 next_cid); + void (*set_eq_ci)(void *hal, void __iomem *bar, u32 cqn, u32 next_cid, u8 arm); + u8 (*get_mr_page_mode)(u8 page_shift); + u32 (*mkey_to_idx)(u32 mkey); + u32 (*idx_to_mkey)(u32 mkey_idx); + void (*set_mpt)(void *hal, void __iomem *bar, int iae_idx, u32 mtt_base, void *mr_request); + void (*clear_mpt)(void *hal, void __iomem *bar, int iae_idx, + u32 mtt_base, void *mr_request); + void (*set_mtt)(void *hal, void __iomem *bar, int iae_idx, u32 mtt_base, void *mr_request); + void (*set_read_done_msix_vector)(void *hal, void __iomem *bar, u32 vector); + int (*dma_write_tbl_once)(void *hal, void __iomem *bar, u32 data_len, u64 dma_wr_addr, + u32 host_id, u32 func_id, u64 success[2], u32 size); + void (*dma_read_tbl)(void *hal, void __iomem *bar, u32 host_id, u32 func_id, u64 data_addr, + u32 tbl_id, u32 burst_num, u32 tbl_start_addr); + bool (*is_err_cqe)(void *cqe); + u8 (*get_cqe_error_code)(void *cqe); + u8 (*get_cqe_opcode)(void *cqe); + u32 (*get_max_mtt_num)(void *hal); + u32 (*get_max_mpt_num)(void *hal); + void (*set_data_seg)(void *data_seg, u32 length, u32 key, u64 addr); + bool (*skb_need_linearize)(int ds_num); +}; + +struct xsc_hw_reg { + u32 tx_db; + u32 rx_db; + u32 complete_db; + u32 complete_reg; + u32 event_db; + u32 cpm_get_lock; + u32 cpm_put_lock; + u32 cpm_lock_avail; + u32 cpm_data_mem; + u32 cpm_cmd; + u32 cpm_addr; + u32 cpm_busy; + u32 req_pid_addr; + u32 req_cid_addr; + u32 rsp_pid_addr; + u32 rsp_cid_addr; + u32 req_buf_h_addr; + u32 req_buf_l_addr; + u32 rsp_buf_h_addr; + u32 rsp_buf_l_addr; + u32 msix_vec_addr; + u32 element_sz_addr; + u32 q_depth_addr; + u32 interrupt_stat_addr; + u32 tbl2irq_rd_done_msix_reg; + u32 dma_ul_busy_reg; + u32 dma_dl_done_reg; + u32 dma_dl_success_reg; + u32 err_code_clr_reg; + u32 dma_rd_table_id_reg; + u32 dma_rd_addr_reg; + u32 indrw_rd_start_reg; + u32 tbl_dl_busy_reg; + u32 tbl_dl_req_reg; + u32 tbl_dl_addr_l_reg; + u32 tbl_dl_addr_h_reg; + u32 tbl_dl_start_reg; + u32 tbl_ul_req_reg; + u32 tbl_ul_addr_l_reg; + u32 tbl_ul_addr_h_reg; + u32 tbl_ul_start_reg; + u32 tbl_msg_rdy_reg; + u32 mpt_tbl_addr; + u32 mpt_tbl_depth; + u32 mpt_tbl_width; + u32 mtt_inst_base_addr; + u32 mtt_inst_stride; + u32 mtt_inst_num_log; + u32 mtt_inst_depth; +}; + +struct xsc_hw_abstract_layer { + u32 hw_arch; + struct xsc_hw_reg *regs; + struct xsc_hw_ops *ops; +}; + +struct xsc_hw_abstract_layer *get_andes_pf_hal(void); +struct xsc_hw_abstract_layer *get_andes_bar_compressed_pf_hal(void); +struct xsc_hw_abstract_layer *get_andes_vf_hal(void); +struct xsc_hw_abstract_layer *get_diamond_pf_hal(void); +struct xsc_hw_abstract_layer *get_diamond_vf_hal(void); +struct xsc_hw_abstract_layer *get_diamond_next_pf_hal(void); +struct xsc_hw_abstract_layer *get_diamond_next_vf_hal(void); + +enum { + CQ_STAT_FIRED, + CQ_STAT_KEEP, + CQ_STAT_ARM_NEXT, + CQ_STAT_ARM_SOLICITED, +}; + +static inline bool xsc_is_diamond_like_arch(u32 hw_arch) +{ + return hw_arch == HW_ARCH_DIAMOND || hw_arch == HW_ARCH_DIAMOND_NEXT; +} + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c index da4761565f1aab6ca6ae12f11eb81cbcd2ade204..445b3bbfb6fc9d47ca385bedc485145723ec5b05 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c @@ -7,7 +7,7 @@ LIST_HEAD(intf_list); LIST_HEAD(xsc_dev_list); -DEFINE_MUTEX(xsc_intf_mutex); // protect intf_list and xsc_dev_list +DEFINE_MUTEX(xsc_intf_mutex); /* protect intf_list and xsc_dev_list */ static void xsc_add_device(struct xsc_interface *intf, struct xsc_priv *priv) { @@ -159,18 +159,6 @@ void xsc_attach_device(struct xsc_core_device *dev) } EXPORT_SYMBOL(xsc_attach_device); -void xsc_attach_device_by_protocol(struct xsc_core_device *dev, int protocol) -{ - struct xsc_priv *priv = &dev->priv; - struct xsc_interface *intf; - - mutex_lock(&xsc_intf_mutex); - list_for_each_entry(intf, &intf_list, list) - if (intf->protocol == protocol) - xsc_attach_interface(intf, priv); - mutex_unlock(&xsc_intf_mutex); -} - void xsc_detach_device(struct xsc_core_device *dev) { struct xsc_priv *priv = &dev->priv; @@ -266,3 +254,56 @@ int xsc_dev_list_trylock(void) return mutex_trylock(&xsc_intf_mutex); } EXPORT_SYMBOL(xsc_dev_list_trylock); + +static int (*_xsc_get_mdev_info_func)(void *data); + +void xsc_register_get_mdev_info_func(int (*get_mdev_info)(void *data)) +{ + _xsc_get_mdev_info_func = get_mdev_info; +} +EXPORT_SYMBOL(xsc_register_get_mdev_info_func); + +void xsc_get_devinfo(u8 *data, u32 len) +{ + struct xsc_cmd_get_ioctl_info_mbox_out *out = + (struct xsc_cmd_get_ioctl_info_mbox_out *)data; + struct xsc_ioctl_get_devinfo *info; + struct xsc_devinfo *devinfo; + struct xsc_priv *priv; + struct xsc_core_device *xdev; + int used = 0; + + out->hdr.status = 0; + used += sizeof(struct xsc_outbox_hdr) + sizeof(u64); + info = (struct xsc_ioctl_get_devinfo *)(data + used); + info->dev_num = 0; + used += sizeof(u32); + devinfo = (struct xsc_devinfo *)info->data; + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(priv, &xsc_dev_list, dev_list) { + if (used + sizeof(*devinfo) > len) + break; + + xdev = container_of(priv, struct xsc_core_device, priv); + if (!xsc_core_is_pf(xdev)) + continue; + devinfo->domain = cpu_to_be32(pci_domain_nr(xdev->pdev->bus)); + devinfo->bus = cpu_to_be32(xdev->pdev->bus->number); + devinfo->devfn = cpu_to_be32(xdev->pdev->devfn); + if (xdev->get_ifname) + xdev->get_ifname(xdev, devinfo->ifname, MAX_IFNAME_LEN); + if (xdev->get_ibdev_name) + xdev->get_ibdev_name(xdev, devinfo->ibdev_name, MAX_IFNAME_LEN); + if (xdev->get_ip_addr) { + xdev->get_ip_addr(xdev, &devinfo->ip_addr); + devinfo->ip_addr = cpu_to_be32(devinfo->ip_addr); + } + devinfo->vendor_id = cpu_to_be32(xdev->pdev->vendor); + devinfo += 1; + info->dev_num++; + } + mutex_unlock(&xsc_intf_mutex); + + info->dev_num += _xsc_get_mdev_info_func((void *)devinfo); + info->dev_num = cpu_to_be32(info->dev_num); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c index 3c91a72fc6d4bb6cca0c1f26a7f607dfd732fba6..3c3869588e36e538ee07ee2501e7927594f81a9e 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c @@ -11,7 +11,6 @@ #include "common/xsc_port_ctrl.h" #include "devlink.h" #include "eswitch.h" -#include "fw/xsc_counters.h" #include "xsc_pci_ctrl.h" unsigned int xsc_debug_mask; @@ -22,7 +21,7 @@ MODULE_PARM_DESC(debug_mask, unsigned int xsc_log_level = XSC_LOG_LEVEL_WARN; module_param_named(log_level, xsc_log_level, uint, 0644); MODULE_PARM_DESC(log_level, - "lowest log level to print: 0=debug, 1=info, 2=warning, 3=error. Default=1"); + "lowest log level to print: 0=debug, 1=info, 2=warning, 3=error. Default=2"); EXPORT_SYMBOL(xsc_log_level); static bool probe_vf = 1; @@ -32,13 +31,18 @@ MODULE_PARM_DESC(probe_vf, "probe VFs or not, 0 = not probe, 1 = probe. Default static bool xsc_hw_reset; #define DRIVER_NAME "xsc_pci" -#define DRIVER_VERSION "0.1.0" -#define ETH_DRIVER_NAME "xsc_eth" +#define ETH_DRIVER_NAME "xsc_eth" + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Yunsilicon network adapters pci driver"); +MODULE_VERSION(DRIVER_VERSION); static const struct pci_device_id xsc_pci_id_table[] = { { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID) }, { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_VF_DEV_ID), .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID_DIAMOND) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID_DIAMOND_NEXT) }, { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID) }, { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_VF_DEV_ID), .driver_data = XSC_PCI_DEV_IS_VF }, @@ -62,7 +66,9 @@ static const struct xsc_device_product_info xsc_product_list[] = { XSC_SUB_DEV_ID_MC_100, "metaConnect-100")}, {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, XSC_SUB_DEV_ID_MC_200, "metaConnect-200")}, - {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID_DIAMOND, + XSC_SUB_DEV_ID_MC_400S, "metaConnect-400S")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID_DIAMOND_NEXT, XSC_SUB_DEV_ID_MC_400S, "metaConnect-400S")}, {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID, XSC_SUB_DEV_ID_MF_50, "metaFusion-50")}, @@ -80,6 +86,8 @@ static const struct xsc_device_product_info xsc_product_list[] = { XSC_SUB_DEV_ID_MS_400M, "metaScale-400M")}, {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, XSC_SUB_DEV_ID_MS_200_OCP, "metaScale-200-OCP")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_100S_OCP, "metaScale-100S-OCP")}, {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, XSC_SUB_DEV_ID_MV_100, "metaVisor-100")}, {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, @@ -112,15 +120,6 @@ static bool need_write_reg_directly(void *in) int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, void *out, int out_size) { - struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)in; - - hdr->ver = 0; - if (hdr->ver != 0) { - xsc_core_warn(dev, "recv an unexpected cmd ver = %d, opcode = %d\n", - be16_to_cpu(hdr->ver), be16_to_cpu(hdr->opcode)); - WARN_ON(hdr->ver != 0); - } - if (need_write_reg_directly(in)) return xsc_cmd_write_reg_directly(dev, in, in_size, out, out_size, dev->glb_func_id); @@ -172,7 +171,7 @@ static void xsc_pci_disable_device(struct xsc_core_device *dev) mutex_unlock(&dev->pci_status_mutex); } -int xsc_priv_init(struct xsc_core_device *dev) +static int xsc_priv_init(struct xsc_core_device *dev) { struct xsc_priv *priv = &dev->priv; @@ -186,7 +185,7 @@ int xsc_priv_init(struct xsc_core_device *dev) return 0; } -int xsc_dev_res_init(struct xsc_core_device *dev) +static int xsc_dev_res_init(struct xsc_core_device *dev) { struct xsc_dev_resource *dev_res = NULL; @@ -196,7 +195,7 @@ int xsc_dev_res_init(struct xsc_core_device *dev) dev->dev_res = dev_res; /* init access lock */ - spin_lock_init(&dev->reg_access_lock.lock); + spin_lock_init(&dev->reg_access_lock); mutex_init(&dev_res->alloc_mutex); mutex_init(&dev_res->pgdir_mutex); INIT_LIST_HEAD(&dev_res->pgdir_list); @@ -205,38 +204,12 @@ int xsc_dev_res_init(struct xsc_core_device *dev) return 0; } -void xsc_dev_res_cleanup(struct xsc_core_device *dev) +static void xsc_dev_res_cleanup(struct xsc_core_device *dev) { kfree(dev->dev_res); dev->dev_res = NULL; } -void xsc_init_reg_addr(struct xsc_core_device *dev) -{ - if (xsc_core_is_pf(dev)) { - dev->regs.cpm_get_lock = HIF_CPM_LOCK_GET_REG_ADDR; - dev->regs.cpm_put_lock = HIF_CPM_LOCK_PUT_REG_ADDR; - dev->regs.cpm_lock_avail = HIF_CPM_LOCK_AVAIL_REG_ADDR; - dev->regs.cpm_data_mem = HIF_CPM_IDA_DATA_MEM_ADDR; - dev->regs.cpm_cmd = HIF_CPM_IDA_CMD_REG_ADDR; - dev->regs.cpm_addr = HIF_CPM_IDA_ADDR_REG_ADDR; - dev->regs.cpm_busy = HIF_CPM_IDA_BUSY_REG_ADDR; - } else { - dev->regs.tx_db = TX_DB_FUNC_MEM_ADDR; - dev->regs.rx_db = RX_DB_FUNC_MEM_ADDR; - dev->regs.complete_db = DB_CQ_FUNC_MEM_ADDR; - dev->regs.complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR; - dev->regs.event_db = DB_EQ_FUNC_MEM_ADDR; - dev->regs.cpm_get_lock = CPM_LOCK_GET_REG_ADDR; - dev->regs.cpm_put_lock = CPM_LOCK_PUT_REG_ADDR; - dev->regs.cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR; - dev->regs.cpm_data_mem = CPM_IDA_DATA_MEM_ADDR; - dev->regs.cpm_cmd = CPM_IDA_CMD_REG_ADDR; - dev->regs.cpm_addr = CPM_IDA_ADDR_REG_ADDR; - dev->regs.cpm_busy = CPM_IDA_BUSY_REG_ADDR; - } -} - int xsc_dev_init(struct xsc_core_device *dev) { int err = 0; @@ -333,7 +306,7 @@ static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id dev->bar_num = bar_num; dev->bar = bar_base; - xsc_init_reg_addr(dev); + xsc_init_hal(dev, id->device); return 0; @@ -363,7 +336,6 @@ static int xsc_check_cmdq_version(struct xsc_core_device *dev) { struct xsc_cmd_query_cmdq_ver_mbox_out *out; struct xsc_cmd_query_cmdq_ver_mbox_in in; - int err; out = kzalloc(sizeof(*out), GFP_KERNEL); @@ -398,7 +370,7 @@ static int xsc_check_cmdq_version(struct xsc_core_device *dev) return err; } -int xsc_reset_function_resource(struct xsc_core_device *dev) +static int xsc_reset_function_resource(struct xsc_core_device *dev) { struct xsc_function_reset_mbox_in in; struct xsc_function_reset_mbox_out out; @@ -414,24 +386,6 @@ int xsc_reset_function_resource(struct xsc_core_device *dev) return 0; } -static int xsc_fpga_not_supported(struct xsc_core_device *dev) -{ -#define FPGA_VERSION_H 0x100 -#define ASIC_VERSION_H 0x20230423 - u32 ver_h; - - if (!xsc_core_is_pf(dev)) - return 0; - - ver_h = REG_RD32(dev, HIF_CPM_CHIP_VERSION_H_REG_ADDR); - if (ver_h != FPGA_VERSION_H && ver_h != ASIC_VERSION_H) { - xsc_core_err(dev, "fpga version 0x%x not supported\n", ver_h); - return 1; - } - - return 0; -} - int xsc_chip_type(struct xsc_core_device *dev) { switch (dev->pdev->device) { @@ -455,28 +409,157 @@ int xsc_chip_type(struct xsc_core_device *dev) } EXPORT_SYMBOL(xsc_chip_type); +#if defined(__sw_64__) +static void xsc_enable_relaxed_order(struct xsc_core_device *dev) +{ + struct xsc_cmd_enable_relaxed_order_in in; + struct xsc_cmd_enable_relaxed_order_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_RELAXED_ORDER); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto err_out; + + if (out.hdr.status) { + err = xsc_cmd_status_to_err(&out.hdr); + goto err_out; + } + + return; +err_out: + xsc_core_warn(dev, "Failed to enable relaxed order %d\n", err); +} +#endif + static int xsc_cmd_activate_hw_config(struct xsc_core_device *dev) { struct xsc_cmd_activate_hw_config_mbox_in in; struct xsc_cmd_activate_hw_config_mbox_out out; int err = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACTIVATE_HW_CONFIG); err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; if (out.hdr.status) return xsc_cmd_status_to_err(&out.hdr); - dev->board_info->hw_config_activated = 1; return 0; } -static int xsc_activate_hw_config(struct xsc_core_device *dev) +static int xsc_cmd_query_guid(struct xsc_core_device *dev) +{ + struct xsc_cmd_query_guid_mbox_in in; + struct xsc_cmd_query_guid_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_GUID); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + dev->board_info->guid = out.guid; + return 0; +} + +static int xsc_cmd_announce_driver_instance(struct xsc_core_device *dev, u8 status) +{ + struct xsc_cmd_announce_driver_instance_mbox_in in; + struct xsc_cmd_announce_driver_instance_mbox_out out; + struct xsc_core_device *rep_dev; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ANNOUNCE_DRIVER_INSTANCE); + in.status = status; + if (status == DRIVER_INSTANCE_UPDATE_REP_FUNC) { + rep_dev = list_first_entry_or_null(&dev->board_info->func_list, + struct xsc_core_device, func_node); + in.rep_func_id = cpu_to_be16(rep_dev->glb_func_id); + } + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) { + if (out.hdr.status == XSC_CMD_STATUS_NOT_SUPPORTED) { + dev->board_info->resource_access_mode = dev->reg_mr_via_cmdq; + return 0; + } + return xsc_cmd_status_to_err(&out.hdr); + } + + if (status == DRIVER_INSTANCE_LAUNCH) + dev->board_info->resource_access_mode = out.resource_access_mode; + return 0; +} + +static int xsc_board_level_init(struct xsc_core_device *dev) { - if (dev->board_info->hw_config_activated) + int err = 0; + + if (dev->board_info->ref_cnt) { + dev->board_info->ref_cnt++; + list_add_tail(&dev->func_node, &dev->board_info->func_list); return 0; + } + + err = xsc_cmd_announce_driver_instance(dev, DRIVER_INSTANCE_LAUNCH); + if (err) { + xsc_core_err(dev, "failed to announce driver instance launch\n"); + goto out; + } + err = xsc_cmd_query_guid(dev); + if (err) { + xsc_core_err(dev, "failed to query guid, err=%d\n", err); + goto out; + } - return xsc_cmd_activate_hw_config(dev); + err = xsc_cmd_activate_hw_config(dev); + if (err) { + xsc_core_err(dev, "failed to activate hw config, err=%d\n", err); + goto out; + } + +#if defined(__sw_64__) + xsc_enable_relaxed_order(dev); +#endif + if (dev->board_info->resource_access_mode == EXCLUSIVE_MODE) { + err = xsc_create_res(dev); + if (err) { + xsc_core_err(dev, "Failed to create resource, err=%d\n", err); + goto out; + } + } + dev->board_info->rep_func_id = dev->glb_func_id; + dev->board_info->ref_cnt++; + list_add_tail(&dev->func_node, &dev->board_info->func_list); + +out: + return err; +} + +static void xsc_board_level_uninit(struct xsc_core_device *dev) +{ + dev->board_info->ref_cnt--; + list_del(&dev->func_node); + if (dev->board_info->ref_cnt) { + if (dev->glb_func_id == dev->board_info->rep_func_id) + xsc_cmd_announce_driver_instance(dev, DRIVER_INSTANCE_UPDATE_REP_FUNC); + return; + } + + xsc_cmd_announce_driver_instance(dev, DRIVER_INSTANCE_PHASE_OUT); + + if (dev->board_info->resource_access_mode == EXCLUSIVE_MODE) + xsc_destroy_res(dev); } static int xsc_init_once(struct xsc_core_device *dev) @@ -501,26 +584,12 @@ static int xsc_init_once(struct xsc_core_device *dev) goto err_cmdq_ver_chk; } - err = xsc_query_guid(dev); - if (err) { - xsc_core_err(dev, "failed to query guid, err=%d\n", err); - goto err_cmdq_ver_chk; - } - - err = xsc_activate_hw_config(dev); - if (err) { - xsc_core_err(dev, "failed to activate hw config, err=%d\n", err); - goto err_cmdq_ver_chk; - } - err = xsc_reset_function_resource(dev); if (err) { xsc_core_err(dev, "Failed to reset function resource\n"); goto err_cmdq_ver_chk; } - funcid_to_pf_vf_index(&dev->caps, dev->glb_func_id, &dev->pcie_no, - &dev->pf_id, &dev->vf_id); xsc_init_cq_table(dev); xsc_init_qp_table(dev); xsc_eq_init(dev); @@ -535,9 +604,14 @@ static int xsc_init_once(struct xsc_core_device *dev) xsc_core_err(dev, "Failed to init eswitch %d\n", err); goto err_eswitch_init; } + err = xsc_board_level_init(dev); + if (err) + goto err_board_init; return 0; +err_board_init: + xsc_eswitch_cleanup(dev); err_eswitch_init: xsc_sriov_cleanup(dev); err_sriov_init: @@ -593,7 +667,7 @@ static int xsc_unload(struct xsc_core_device *dev) return 0; } -int xsc_load_one(struct xsc_core_device *dev, bool boot) +static int xsc_load_one(struct xsc_core_device *dev, bool boot) { int err = 0; @@ -623,14 +697,6 @@ int xsc_load_one(struct xsc_core_device *dev, bool boot) goto err_load; } - if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) { - err = xsc_create_res(dev); - if (err) { - xsc_core_err(dev, "Failed to create resource, err=%d\n", err); - goto err_create_res; - } - } - if (boot) { err = xsc_devlink_register(priv_to_devlink(dev), dev->device); if (err) @@ -669,10 +735,6 @@ int xsc_load_one(struct xsc_core_device *dev, bool boot) if (boot) xsc_devlink_unregister(priv_to_devlink(dev)); err_devlink_reg: - if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) - xsc_destroy_res(dev); - -err_create_res: xsc_unload(dev); err_load: @@ -684,7 +746,7 @@ int xsc_load_one(struct xsc_core_device *dev, bool boot) return err; } -int xsc_unload_one(struct xsc_core_device *dev, bool cleanup) +static int xsc_unload_one(struct xsc_core_device *dev, bool cleanup) { xsc_port_ctrl_remove(dev); xsc_devlink_unregister(priv_to_devlink(dev)); @@ -706,9 +768,7 @@ int xsc_unload_one(struct xsc_core_device *dev, bool cleanup) if (xsc_core_is_pf(dev)) xsc_lag_remove_xdev(dev); - if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) - xsc_destroy_res(dev); - + xsc_board_level_uninit(dev); xsc_unload(dev); if (cleanup) @@ -727,7 +787,6 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, struct xsc_priv *priv; int err; struct devlink *devlink; - devlink = xsc_devlink_alloc(); if (!devlink) { dev_err(&pci_dev->dev, "devlink alloc failed\n"); @@ -764,11 +823,6 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, goto err_dev_init; } - if (xsc_fpga_not_supported(xdev)) { - err = -EOPNOTSUPP; - goto err_version_check; - } - err = xsc_load_one(xdev, true); if (err) { xsc_core_err(xdev, "xsc_load_one failed %d\n", err); @@ -780,7 +834,6 @@ static int xsc_pci_probe(struct pci_dev *pci_dev, return 0; err_load: -err_version_check: xsc_dev_cleanup(xdev); err_dev_init: xsc_pci_fini(xdev); @@ -812,7 +865,7 @@ static struct pci_driver xsc_pci_driver = { .sriov_configure = xsc_core_sriov_configure, }; -int xsc_pci_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +static int xsc_pci_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) { pr_info("xsc pci driver recv %lu event\n", action); if (xsc_get_exit_flag()) @@ -889,5 +942,3 @@ static void __exit xsc_fini(void) module_init(xsc_init); module_exit(xsc_fini); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c index a834a09d23da6727f9851fed71adbaa63586e020..5ad70697d869ff8e4b4df0741a0e74a40bcf7733 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c @@ -15,15 +15,18 @@ int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) int err; u8 key; + memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); spin_lock(&dev->dev_res->mkey_lock); key = 0x80 + dev->dev_res->mkey_key++; spin_unlock(&dev->dev_res->mkey_lock); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MKEY); - if (dev->reg_mr_via_cmdq) + read_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == SHARE_MODE) err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); else err = xsc_create_mkey(dev, &in, &out); + read_unlock(&dev->board_info->mr_sync_lock); if (err) { xsc_core_err(dev, "cmd exec faile %d\n", err); @@ -35,7 +38,7 @@ int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) return xsc_cmd_status_to_err(&out.hdr); } - mr->key = xsc_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; + mr->key = xsc_idx_to_mkey(dev, be32_to_cpu(out.mkey) & 0xffffff) | key; xsc_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); return err; @@ -53,10 +56,12 @@ int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); in.mkey = cpu_to_be32(mr->key); - if (dev->reg_mr_via_cmdq) + read_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == SHARE_MODE) err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); else err = xsc_destroy_mkey(dev, &in, &out); + read_unlock(&dev->board_info->mr_sync_lock); if (err) return err; @@ -68,13 +73,15 @@ int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) } EXPORT_SYMBOL(xsc_core_destroy_mkey); -int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, - u32 *mtt_base) +static int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, + struct xsc_register_mr_mbox_in *in_cmd, + u32 *mtt_base) { struct xsc_set_mpt_mbox_in *in; struct xsc_set_mpt_mbox_out out; struct xsc_register_mr_request *req = &in_cmd->req; int err; + u64 mem_size; in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) { @@ -83,7 +90,9 @@ int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbo } in->mpt_item.pdn = req->pdn; in->mpt_item.pa_num = req->pa_num; - in->mpt_item.len = req->len; + mem_size = be64_to_cpu(req->len); + in->mpt_item.len = (u32)mem_size; + in->mpt_item.len = cpu_to_be32(in->mpt_item.len); in->mpt_item.mkey = req->mkey; in->mpt_item.acc = req->acc; in->mpt_item.page_mode = req->page_mode; @@ -103,8 +112,9 @@ int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbo return 0; } -int xsc_set_mtt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, - u32 mtt_base) +static int xsc_set_mtt_via_cmdq(struct xsc_core_device *dev, + struct xsc_register_mr_mbox_in *in_cmd, + u32 mtt_base) { #define PA_NUM_PER_CMD 1024 struct xsc_set_mtt_mbox_in *seg_in; @@ -150,12 +160,14 @@ int xsc_set_mtt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbo return 0; } -int xsc_dereg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd) +static int xsc_dereg_mr_via_cmdq(struct xsc_core_device *dev, + struct xsc_register_mr_mbox_in *in_cmd) { struct xsc_unregister_mr_mbox_in in; struct xsc_unregister_mr_mbox_out out; int err; + memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); in.mkey = in_cmd->req.mkey; @@ -167,7 +179,7 @@ int xsc_dereg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_m return 0; } -int xsc_reg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in) +static int xsc_reg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in) { u32 mtt_base; int err; @@ -200,10 +212,12 @@ int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_REG_MR); - if (dev->reg_mr_via_cmdq) + read_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == SHARE_MODE) err = xsc_reg_mr_via_cmdq(dev, in); else err = xsc_reg_mr(dev, in, &out); + read_unlock(&dev->board_info->mr_sync_lock); if (err) { xsc_core_err(dev, "cmd exec failed %d\n", err); @@ -224,13 +238,17 @@ int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr) struct xsc_unregister_mr_mbox_out out; int err; + memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); - in.mkey = cpu_to_be32(xsc_mkey_to_idx(mr->key)); - if (dev->reg_mr_via_cmdq) + /*covert mkey to mpt_idx*/ + in.mkey = cpu_to_be32(xsc_mkey_to_idx(dev, mr->key)); + read_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == SHARE_MODE) err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); else err = xsc_dereg_mr(dev, &in, &out); + read_unlock(&dev->board_info->mr_sync_lock); if (err) { xsc_core_err(dev, "cmd exec failed %d\n", err); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c index 0a3579c6fcf38cbf9e68931e345fa569ac25ed29..1d2d5a8be0b0f43d591e41a18c7c33629efd66b0 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c @@ -11,14 +11,19 @@ #include "common/driver.h" #include "common/xsc_hsi.h" #include "common/xsc_core.h" +#ifdef CONFIG_RFS_ACCEL #include +#endif #include "fw/xsc_flow.h" #include "fw/xsc_fw.h" +#include "common/tunnel_cmd.h" enum xsc_eq_type { XSC_EQ_TYPE_COMP, XSC_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING XSC_EQ_TYPE_PF, +#endif }; struct xsc_irq { @@ -30,7 +35,9 @@ struct xsc_irq { struct xsc_irq_table { struct xsc_irq *irq; int nvec; +#ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; +#endif }; struct xsc_msix_resource *g_msix_xres; @@ -47,7 +54,6 @@ static int xsc_dma_read_msix_init(struct xsc_core_device *xdev) char *name = "xsc_dma_read_done"; struct xsc_dev_resource *dev_res = xdev->dev_res; int irqn; - u32 value = 0; int vecid = 0; snprintf(dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", @@ -57,8 +63,7 @@ static int xsc_dma_read_msix_init(struct xsc_core_device *xdev) dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, (void *)xdev); vecid = (xdev->msix_vec_base + XSC_DMA_READ_DONE_VEC); - value = ((1 << 12) | (vecid & 0xfff)); - REG_WR32(xdev, HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR, value); + xsc_set_read_done_msix_vector(xdev, vecid); return err; } @@ -346,11 +351,11 @@ static irqreturn_t xsc_cmd_handler(int irq, void *arg) return IRQ_HANDLED; } -int xsc_request_irq_for_cmdq(struct xsc_core_device *dev, u8 vecidx) +static int xsc_request_irq_for_cmdq(struct xsc_core_device *dev, u8 vecidx) { struct xsc_dev_resource *dev_res = dev->dev_res; - writel(dev->msix_vec_base + vecidx, REG_ADDR(dev, dev->cmd.reg.msix_vec_addr)); + xsc_set_cmdq_msix_vector(dev, dev->msix_vec_base + vecidx); snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", "xsc_cmd", pci_name(dev->pdev)); @@ -359,29 +364,96 @@ int xsc_request_irq_for_cmdq(struct xsc_core_device *dev, u8 vecidx) dev_res->irq_info[vecidx].name, dev); } -void xsc_free_irq_for_cmdq(struct xsc_core_device *dev) +static void xsc_free_irq_for_cmdq(struct xsc_core_device *dev) { xsc_free_irq(dev, XSC_VEC_CMD); } +static void xsc_change_to_share_mode(struct xsc_core_device *dev) +{ + write_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == EXCLUSIVE_MODE) { + xsc_sync_mr_to_fw(dev); + dev->board_info->resource_access_mode = SHARE_MODE; + xsc_destroy_res(dev); + } + write_unlock(&dev->board_info->mr_sync_lock); +} + +static void xsc_change_to_exclusive_mode(struct xsc_core_device *dev) +{ + write_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == SHARE_MODE) { + xsc_create_res(dev); + xsc_sync_mr_from_fw(dev); + dev->board_info->resource_access_mode = EXCLUSIVE_MODE; + } + write_unlock(&dev->board_info->mr_sync_lock); +} + +static void xsc_event_work(struct work_struct *work) +{ + int err; + struct xsc_event_query_type_mbox_in in; + struct xsc_event_query_type_mbox_out out; + struct xsc_core_device *dev = container_of(work, struct xsc_core_device, event_work); + u8 event; + + /*query cmd_type cmd*/ + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EVENT_TYPE); + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to query event type, err=%d, stats=%d\n", + err, out.hdr.status); + return; + } + + event = out.ctx.resp_cmd_type; + while (event) { + if (event & XSC_CMD_EVENT_RESP_CHANGE_LINK) { + if (dev->link_event_handler) + dev->link_event_handler(dev); + xsc_core_dbg(dev, "event cmdtype=%04x\n", out.ctx.resp_cmd_type); + event &= ~XSC_CMD_EVENT_RESP_CHANGE_LINK; + } else if (event & XSC_CMD_EVENT_RESP_TEMP_WARN) { + xsc_core_warn(dev, "[Minor]nic chip temperature high warning\n"); + event &= ~XSC_CMD_EVENT_RESP_TEMP_WARN; + } else if (event & XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION) { + xsc_core_warn(dev, "[Critical]nic chip was over-temperature\n"); + event &= ~XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION; + } else if (event & XSC_CMD_EVENT_RECV_TUNNEL_CMD_REQ) { + xsc_tunnel_cmd_recv_req(dev); + event &= ~XSC_CMD_EVENT_RECV_TUNNEL_CMD_REQ; + } else if (event & XSC_CMD_EVENT_RECV_TUNNEL_CMD_RSP) { + xsc_tunnel_cmd_recv_resp(dev); + event &= ~XSC_CMD_EVENT_RECV_TUNNEL_CMD_RSP; + } else if (event & XSC_CMD_EVENT_CHANGE_TO_SHARE) { + xsc_change_to_share_mode(dev); + event &= ~XSC_CMD_EVENT_CHANGE_TO_SHARE; + } else if (event & XSC_CMD_EVENT_CHANGE_TO_EXCLUSIVE) { + xsc_change_to_exclusive_mode(dev); + event &= ~XSC_CMD_EVENT_CHANGE_TO_EXCLUSIVE; + } else { + xsc_core_info(dev, "unknown event cmdtype=%04x\n", out.ctx.resp_cmd_type); + event = 0; + } + } +} + static irqreturn_t xsc_event_handler(int irq, void *arg) { struct xsc_core_device *dev = (struct xsc_core_device *)arg; xsc_core_dbg(dev, "cmd event hint irq: %d\n", irq); - if (!dev->eth_priv) - return IRQ_NONE; - - if (!dev->event_handler) - return IRQ_NONE; - - dev->event_handler(dev->eth_priv); + schedule_work(&dev->event_work); return IRQ_HANDLED; } -int xsc_request_irq_for_event(struct xsc_core_device *dev) +static int xsc_request_irq_for_event(struct xsc_core_device *dev) { struct xsc_dev_resource *dev_res = dev->dev_res; @@ -391,12 +463,12 @@ int xsc_request_irq_for_event(struct xsc_core_device *dev) dev_res->irq_info[XSC_VEC_CMD_EVENT].name, dev); } -void xsc_free_irq_for_event(struct xsc_core_device *dev) +static void xsc_free_irq_for_event(struct xsc_core_device *dev) { xsc_free_irq(dev, XSC_VEC_CMD_EVENT); } -int xsc_cmd_enable_msix(struct xsc_core_device *xdev) +static int xsc_cmd_enable_msix(struct xsc_core_device *xdev) { struct xsc_msix_table_info_mbox_in in; struct xsc_msix_table_info_mbox_out out; @@ -451,6 +523,7 @@ int xsc_irq_eq_create(struct xsc_core_device *dev) xsc_core_err(dev, "failed to request irq for event, err=%d\n", err); goto err_request_event_irq; } + INIT_WORK(&dev->event_work, xsc_event_work); if (dev->caps.msix_enable && xsc_core_is_pf(dev)) { err = xsc_dma_read_msix_init(dev); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c index 0e5d365c0b23ba5a1baf37e85aebcb82093e124a..4cc4b605369aa1be4d551ee44db1f685d6f9cd6a 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c @@ -208,12 +208,113 @@ void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type) return; } + qp->err_occurred = 1; qp->event(qp, event_type); if (atomic_dec_and_test(&qp->refcount)) complete(&qp->free); } +int xsc_alloc_qpn(struct xsc_core_device *xdev, u16 *qpn_base, u16 qp_cnt, u8 qp_type) +{ + struct xsc_alloc_qpn_mbox_in in; + struct xsc_alloc_qpn_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_QPN); + in.qp_cnt = cpu_to_be16(qp_cnt); + in.qp_type = qp_type; + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + *qpn_base = be16_to_cpu(out.qpn_base); + return 0; +} +EXPORT_SYMBOL(xsc_alloc_qpn); + +int xsc_dealloc_qpn(struct xsc_core_device *xdev, u16 qpn_base, u16 qp_cnt, u8 qp_type) +{ + struct xsc_dealloc_qpn_mbox_in in; + struct xsc_dealloc_qpn_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_QPN); + in.qp_cnt = cpu_to_be16(qp_cnt); + in.qpn_base = cpu_to_be16(qpn_base); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + return 0; +} +EXPORT_SYMBOL(xsc_dealloc_qpn); + +int xsc_unset_qp_info(struct xsc_core_device *xdev, u16 qpn) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_QP_UNSET_QP_INFO); + in.qpn = cpu_to_be16(qpn); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + return 0; +} +EXPORT_SYMBOL(xsc_unset_qp_info); + +int xsc_set_qp_info(struct xsc_core_device *xdev, struct xsc_create_qp_request *qp_info, + size_t pas_buf_size) +{ + struct xsc_set_qp_info_in *in; + struct xsc_set_qp_info_out out; + size_t in_size; + int err; + + in_size = sizeof(*in) + pas_buf_size; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -ENOMEM; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_QP_INFO); + memcpy(&in->qp_info, qp_info, sizeof(*qp_info) + pas_buf_size); + + err = xsc_cmd_exec(xdev, in, in_size, &out, sizeof(out)); + if (err) + goto out; + + if (out.hdr.status) { + err = xsc_cmd_status_to_err(&out.hdr); + goto out; + } + kfree(in); + return 0; +out: + kfree(in); + return err; +} +EXPORT_SYMBOL(xsc_set_qp_info); + int xsc_core_create_qp(struct xsc_core_device *xdev, struct xsc_core_qp *qp, struct xsc_create_qp_mbox_in *in, @@ -316,7 +417,7 @@ int xsc_modify_qp(struct xsc_core_device *xdev, in->hdr.opcode = cpu_to_be16(status); in->qpn = cpu_to_be32(qpn); - in->no_need_wait = 1; + in->ctx.no_need_wait = 1; ret = xsc_cmd_exec(xdev, in, sizeof(*in), out, sizeof(*out)); if ((status == XSC_CMD_OP_2RST_QP || status == XSC_CMD_OP_2ERR_QP) && diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c index 59122a490eb851dbf9136572563c241cde0e392d..d281d7f8ff8d4e9f31389a0692c05198eb65f2c9 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c @@ -12,11 +12,22 @@ #include #include #include +#include #include "common/driver.h" #define QPTS_ELEMENT_MAX_NUM 0x4000 //16384 = 16k +#ifndef EPOLLIN +#define EPOLLIN 0x00000001 +#endif +#ifndef EPOLLHUP +#define EPOLLHUP 0x00000010 +#endif +#ifndef EPOLLRDNORM +#define EPOLLRDNORM 0x00000040 +#endif + static struct proc_dir_entry *g_entry; static DECLARE_WAIT_QUEUE_HEAD(g_ring_buff_wait); static struct xsc_qpt_update_msg *g_ring_buff; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c index 7471367ce83fe66a4021dadd62d0ac7c0a66b88e..2b2beb3be6f2295cb24e6fbe0a827e1821fc9761 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c @@ -9,6 +9,7 @@ #include "common/xsc_cmd.h" #include "common/qp.h" #include "common/driver.h" +#include "common/xsc_lag.h" static int xsc_alloc_obj(struct xsc_res_obj *obj, struct xsc_bdf_file *file, void (*release_func)(void *), unsigned long key, @@ -43,12 +44,21 @@ static inline void xsc_free_obj(struct xsc_bdf_file *file, unsigned long key, kfree((*obj)->data); } +static inline struct xsc_res_obj *xsc_get_obj(struct xsc_bdf_file *file, + unsigned long key) +{ + struct xsc_res_obj *obj = radix_tree_lookup(&file->obj_tree, key); + + return obj; +} + static void xsc_send_cmd_dealloc_pd(struct xsc_core_device *xdev, unsigned int pdn) { struct xsc_dealloc_pd_mbox_in in; struct xsc_dealloc_pd_mbox_out out; int ret; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); in.pdn = cpu_to_be32(pdn); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); @@ -66,7 +76,7 @@ static void xsc_free_pd_obj(void *obj) xsc_send_cmd_dealloc_pd(file->xdev, pd_obj->pdn); key = xsc_idx_to_key(RES_OBJ_PD, pd_obj->pdn); xsc_free_obj(file, key, &_obj); - xsc_core_warn(pd_obj->obj.file->xdev, "free pd obj: %d\n", pd_obj->pdn); + xsc_core_info(pd_obj->obj.file->xdev, "free pd obj: %d\n", pd_obj->pdn); kfree(pd_obj); } @@ -115,9 +125,10 @@ static void xsc_send_cmd_destroy_mkey(struct xsc_core_device *xdev, unsigned int struct xsc_destroy_mkey_mbox_out out; int ret; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); in.mkey = cpu_to_be32(mkey); - if (xdev->reg_mr_via_cmdq) + if (xdev->board_info->resource_access_mode == SHARE_MODE) ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); else ret = xsc_destroy_mkey(xdev, &in, &out); @@ -132,9 +143,10 @@ static void xsc_send_cmd_dereg_mr(struct xsc_core_device *xdev, unsigned int mke struct xsc_unregister_mr_mbox_out out; int ret; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); - in.mkey = cpu_to_be32(mkey); - if (xdev->reg_mr_via_cmdq) + in.mkey = cpu_to_be32(xsc_mkey_to_idx(xdev, mkey)); + if (xdev->board_info->resource_access_mode == SHARE_MODE) ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); else ret = xsc_dereg_mr(xdev, &in, &out); @@ -154,10 +166,20 @@ static void xsc_free_mr_obj(void *obj) xsc_send_cmd_dereg_mr(file->xdev, mr_obj->mkey); xsc_free_obj(file, key, &_obj); - xsc_core_warn(file->xdev, "free mr obj: %d\n", mr_obj->mkey); + xsc_core_info(file->xdev, "free mr obj: %d\n", mr_obj->mkey); kfree(mr_obj); } +static void xsc_handle_user_mode(struct xsc_core_device *xdev, u8 mode) +{ + struct net_device *ndev = xdev->netdev; + + if (netif_is_bond_slave(ndev)) + xsc_lag_set_user_mode(xdev, mode); + else + xsc_set_user_mode(xdev, mode); +} + int xsc_alloc_mr_obj(struct xsc_bdf_file *file, unsigned int mkey, char *data, unsigned int datalen) { @@ -202,6 +224,7 @@ static void xsc_send_cmd_destroy_cq(struct xsc_core_device *xdev, unsigned int c struct xsc_destroy_cq_mbox_out out; int ret; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); in.cqn = cpu_to_be32(cqn); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); @@ -218,7 +241,7 @@ static void xsc_free_cq_obj(void *obj) xsc_send_cmd_destroy_cq(file->xdev, cq_obj->cqn); xsc_free_obj(file, key, &_obj); - xsc_core_warn(file->xdev, "free cq obj: %d\n", cq_obj->cqn); + xsc_core_info(file->xdev, "free cq obj: %d\n", cq_obj->cqn); kfree(cq_obj); } @@ -266,6 +289,8 @@ void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn) struct xsc_modify_qp_mbox_out out; int ret; + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); ret = xsc_modify_qp(xdev, &in, &out, qpn, XSC_CMD_OP_2RST_QP); if (ret) xsc_core_err(xdev, "failed to reset qp %u\n", qpn); @@ -277,6 +302,7 @@ static void xsc_send_cmd_destroy_qp(struct xsc_core_device *xdev, unsigned int q struct xsc_destroy_qp_mbox_out out; int ret; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); in.qpn = cpu_to_be32(qpn); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); @@ -296,7 +322,7 @@ static void xsc_free_qp_obj(void *obj) key = xsc_idx_to_key(RES_OBJ_QP, qp_obj->qpn); xsc_free_obj(file, key, &_obj); - xsc_core_warn(file->xdev, "free qp obj: %d\n", qp_obj->qpn); + xsc_core_info(file->xdev, "free qp obj: %d\n", qp_obj->qpn); kfree(qp_obj); } @@ -391,7 +417,7 @@ static void xsc_free_pct_obj(void *obj) xsc_send_cmd_del_pct(file->xdev, pct_obj->pct_idx); xsc_free_obj(file, key, &_obj); - xsc_core_warn(file->xdev, "free pct obj, priority:%d\n", pct_obj->pct_idx); + xsc_core_info(file->xdev, "free pct obj, priority:%d\n", pct_obj->pct_idx); kfree(pct_obj); } @@ -433,13 +459,70 @@ void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority) } EXPORT_SYMBOL_GPL(xsc_destroy_pct_obj); +int xsc_alloc_user_mode_obj(struct xsc_bdf_file *file, void (*release_func)(void *), + unsigned int mode, char *data, unsigned int len) +{ + unsigned long key = xsc_idx_to_key(RES_OBJ_USER_MODE, mode); + struct xsc_user_mode_obj *user_mode_obj; + int ret; + + user_mode_obj = kzalloc(sizeof(*user_mode_obj), GFP_KERNEL); + if (!user_mode_obj) + return -ENOMEM; + + ret = xsc_alloc_obj(&user_mode_obj->obj, file, release_func, + key, data, len); + + if (!ret) { + if (mode == XSC_IOCTL_OPCODE_PF_USER_MODE) + xsc_handle_user_mode(file->xdev, true); + } else { + kfree(user_mode_obj); + } + + xsc_core_dbg(file->xdev, "alloc user mode %d obj, ret=%d\n", mode, ret); + return ret; +} +EXPORT_SYMBOL_GPL(xsc_alloc_user_mode_obj); + +void xsc_free_user_mode_obj(struct xsc_bdf_file *file, unsigned int mode) +{ + unsigned long key = xsc_idx_to_key(RES_OBJ_USER_MODE, mode); + struct xsc_user_mode_obj *user_mode_obj; + struct xsc_res_obj *obj; + + xsc_free_obj(file, key, &obj); + user_mode_obj = container_of(obj, struct xsc_user_mode_obj, obj); + kfree(user_mode_obj); + + if (mode == XSC_IOCTL_OPCODE_PF_USER_MODE) + xsc_handle_user_mode(file->xdev, false); + + xsc_core_dbg(file->xdev, "destroy user mode %d obj\n", mode); +} +EXPORT_SYMBOL_GPL(xsc_free_user_mode_obj); + +void xsc_release_user_mode(struct xsc_bdf_file *file, unsigned int mode) +{ + unsigned long key = xsc_idx_to_key(RES_OBJ_USER_MODE, mode); + struct xsc_res_obj *obj; + + spin_lock(&file->obj_lock); + obj = xsc_get_obj(file, key); + obj->release_method(obj); + spin_unlock(&file->obj_lock); + + xsc_core_dbg(file->xdev, "release user mode %d obj\n", mode); +} +EXPORT_SYMBOL_GPL(xsc_release_user_mode); + void xsc_close_bdf_file(struct xsc_bdf_file *file) { struct radix_tree_iter iter; void **slot; struct xsc_res_obj *obj; - xsc_core_warn(file->xdev, "release bdf file:%lx\n", file->key); + xsc_core_info(file->xdev, "release bdf file:%lx\n", file->key); spin_lock(&file->obj_lock); radix_tree_for_each_slot(slot, &file->obj_tree, &iter, 0) { obj = (struct xsc_res_obj *)(*slot); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c index 1e1897f576fc0ffa6c134ffbe2372c067fd24e07..8fcb9d9384cbdfe6ad46d1b9170baaea780d2551 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c @@ -29,14 +29,14 @@ static int xsc_device_enable_sriov(struct xsc_core_device *dev, int num_vfs) err = xsc_eswitch_enable(dev->priv.eswitch, XSC_ESWITCH_LEGACY, num_vfs); if (err) { - xsc_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); + xsc_core_err(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } enable_vfs: err = xsc_create_vfs_sysfs(dev, num_vfs); if (err) { - xsc_core_warn(dev, "failed to create SRIOV sysfs (%d)\n", err); + xsc_core_err(dev, "failed to create SRIOV sysfs (%d)\n", err); if (XSC_ESWITCH_MANAGER(dev)) xsc_eswitch_disable(dev->priv.eswitch, true); return err; @@ -56,8 +56,8 @@ static void xsc_device_disable_sriov(struct xsc_core_device *dev, err = xsc_cmd_disable_hca(dev, (u16)num_vfs); if (err) { - xsc_core_warn(dev, "failed to disable hca, num_vfs=%d, err=%d\n", - num_vfs, err); + xsc_core_err(dev, "failed to disable hca, num_vfs=%d, err=%d\n", + num_vfs, err); return; } @@ -80,9 +80,9 @@ static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) int err; if (num_vfs > dev->caps.max_vfs) { - xsc_core_warn(dev, - "invalid sriov param, num_vfs(%d) > total_vfs(%d)\n", - num_vfs, dev->caps.max_vfs); + xsc_core_err(dev, + "invalid sriov param, num_vfs(%d) > total_vfs(%d)\n", + num_vfs, dev->caps.max_vfs); return -EINVAL; } @@ -90,8 +90,8 @@ static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) if (num_vfs == pci_num_vf(dev->pdev)) return 0; - xsc_core_warn(dev, "VFs already enabled. Disable before enabling %d VFs\n", - num_vfs); + xsc_core_err(dev, "VFs already enabled. Disable before enabling %d VFs\n", + num_vfs); return -EBUSY; } @@ -101,13 +101,13 @@ static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) err = xsc_device_enable_sriov(dev, num_vfs); if (err) { - xsc_core_warn(dev, "xsc_device_enable_sriov failed, err=%d\n", err); + xsc_core_err(dev, "xsc_device_enable_sriov failed, err=%d\n", err); goto device_enable_sriov_err; } err = pci_enable_sriov(pdev, num_vfs); if (err) { - xsc_core_warn(dev, "pci_enable_sriov failed, err=%d\n", err); + xsc_core_err(dev, "pci_enable_sriov failed, err=%d\n", err); goto pci_enable_sriov_err; } @@ -166,6 +166,8 @@ int xsc_sriov_attach(struct xsc_core_device *dev) return 0; pf_xdev = pci_get_drvdata(pdev->physfn); + if (!pf_xdev) + return -1; sriov = &pf_xdev->priv.sriov; sriov->vfs[dev->vf_id].vf = dev->vf_id; @@ -204,11 +206,8 @@ static int xsc_sriov_pci_cfg_info(struct xsc_core_device *dev, struct pci_dev *pdev = dev->pdev; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) { - xsc_core_err(dev, "%s: failed to find SRIOV capability in device\n", - __func__); + if (!pos) return -ENODEV; - } iov->pos = pos; pci_read_config_dword(pdev, pos + PCI_SRIOV_CAP, &iov->cap); @@ -239,15 +238,15 @@ int xsc_sriov_init(struct xsc_core_device *dev) err = xsc_sriov_pci_cfg_info(dev, iov); if (err) { - xsc_core_warn(dev, "%s: pci not support sriov, err=%d\n", + xsc_core_info(dev, "%s: pci not support sriov, ret=%d\n", __func__, err); return 0; } total_vfs = pci_sriov_get_totalvfs(pdev); if (unlikely(iov->total_vfs == 0)) { - xsc_core_warn(dev, "%s: pci not support sriov, total_vfs=%d, cur_vfs=%d\n", - __func__, iov->total_vfs, sriov->num_vfs); + xsc_core_err(dev, "%s: pci not support sriov, total_vfs=%d, cur_vfs=%d\n", + __func__, iov->total_vfs, sriov->num_vfs); return 0; } sriov->max_vfs = xsc_get_max_vfs(dev); @@ -261,13 +260,9 @@ int xsc_sriov_init(struct xsc_core_device *dev) if (!sriov->vfs_ctx) return -ENOMEM; - xsc_core_info(dev, "total_vfs=%d, cur_vfs=%d, vf_bdf_base=0x%02x\n", - total_vfs, sriov->num_vfs, sriov->vf_bdf_base); - xsc_core_info(dev, "vf_offset=%d, stride=%d, vf_device_id=0x%x\n", - iov->offset, iov->stride, iov->vf_device); err = xsc_sriov_sysfs_init(dev); if (err) { - xsc_core_warn(dev, "failed to init SRIOV sysfs, err=%d\n", err); + xsc_core_err(dev, "failed to init SRIOV sysfs, err=%d\n", err); kfree(sriov->vfs_ctx); return err; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h deleted file mode 100644 index 96c4aff37feeb79f00b4d56d6bd87c587703a634..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. - * All rights reserved. - */ - -#define CMDQ_DEFINE_H - -#define CMDQ_PA_REG_ADDR 0xFC00000 -#define CMDQ_PA_REG_WIDTH 64 - -#define CMDQ_LOG_SIZE_REG_ADDR 0xFC00008 -#define CMDQ_LOG_SIZE_WIDTH 4 - -#define CMDQ_DB_REG_ADDR 0xFC0000C -#define CMDQ_DB_REG_WIDTH 32 diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/tunnel_cmd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/tunnel_cmd.c new file mode 100644 index 0000000000000000000000000000000000000000..8584db68aa300bb068e092d9ce861fed7ae0bafb --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/tunnel_cmd.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2024, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "common/tunnel_cmd.h" +#include +#include +#include +#include +#include + +static DEFINE_MUTEX(tunnel_cmd_lock); + +void xsc_tunnel_cmd_recv_resp(struct xsc_core_device *xdev) +{ + xsc_core_info(xdev, "recv tunnel cmd response, wake up tunnel cmd exec.\n"); + complete(&xdev->recv_tunnel_resp_event); +} + +int xsc_tunnel_cmd_exec(struct xsc_core_device *xdev, void *in, int inlen, void *out, int outlen, + struct xsc_ioctl_tunnel_hdr *hdr) +{ + struct xsc_send_tunnel_cmd_req_mbox_in *tunnel_req_in; + struct xsc_send_tunnel_cmd_req_mbox_out tunnel_req_out; + struct xsc_recv_tunnel_cmd_resp_mbox_in tunnel_resp_in; + struct xsc_recv_tunnel_cmd_resp_mbox_out *tunnel_resp_out; + int tunnel_req_inlen; + int tunnel_resp_outlen; + int ret = 0; + unsigned long timeout = msecs_to_jiffies(1000); + + mutex_lock(&tunnel_cmd_lock); + tunnel_req_inlen = inlen + sizeof(*tunnel_req_in); + tunnel_resp_outlen = outlen + sizeof(*tunnel_resp_out); + + tunnel_req_in = kzalloc(tunnel_req_inlen, GFP_KERNEL); + if (!tunnel_req_in) { + ret = -ENOMEM; + goto err_alloc_req; + } + + tunnel_resp_out = kzalloc(tunnel_resp_outlen, GFP_KERNEL); + if (!tunnel_resp_out) { + ret = -ENOMEM; + goto err_alloc_resp; + } + + tunnel_req_in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SEND_TUNNEL_CMD_REQ); + tunnel_req_in->target.domain = cpu_to_be32(hdr->domain); + tunnel_req_in->target.bus = cpu_to_be32(hdr->bus); + tunnel_req_in->target.devfn = cpu_to_be32(hdr->devfn); + tunnel_req_in->target.data_length = cpu_to_be32(inlen); + memcpy(tunnel_req_in->data, in, inlen); + ret = xsc_cmd_exec(xdev, tunnel_req_in, tunnel_req_inlen, + &tunnel_req_out, sizeof(tunnel_req_out)); + if (ret) { + xsc_core_err(xdev, "send tunnel cmd request failed, ret %d\n", ret); + goto err_send_req; + } + if (tunnel_req_out.hdr.status) { + xsc_core_err(xdev, "send tunnel cmd requset failed, req out status %d\n", + tunnel_req_out.hdr.status); + ret = xsc_cmd_status_to_err(&tunnel_req_out.hdr); + goto err_send_req; + } + + init_completion(&xdev->recv_tunnel_resp_event); + ret = wait_for_completion_timeout(&xdev->recv_tunnel_resp_event, timeout); + if (!ret) { + ret = -ETIMEDOUT; + goto err_send_req; + } + + memset(&tunnel_resp_in, 0, sizeof(tunnel_resp_in)); + tunnel_resp_in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_RECV_TUNNEL_CMD_RESP); + ret = xsc_cmd_exec(xdev, &tunnel_resp_in, sizeof(tunnel_resp_in), + tunnel_resp_out, tunnel_resp_outlen); + if (ret) { + xsc_core_err(xdev, "recv tunnel cmd response failed, ret %d\n", ret); + goto err_recv_resp; + } + if (tunnel_resp_out->hdr.status) { + xsc_core_err(xdev, "recv tunnel cmd response failed, rsp out status %d\n", + tunnel_resp_out->hdr.status); + ret = xsc_cmd_status_to_err(&tunnel_resp_out->hdr); + goto err_recv_resp; + } + memcpy(out, tunnel_resp_out->data, outlen); +err_recv_resp: +err_send_req: + kfree(tunnel_resp_out); +err_alloc_resp: + kfree(tunnel_req_in); +err_alloc_req: + mutex_unlock(&tunnel_cmd_lock); + return ret; +} +EXPORT_SYMBOL_GPL(xsc_tunnel_cmd_exec); + +static void xsc_read_hw_counter(char *file_fn, char *buf, size_t count) +{ + struct file *filp; + loff_t pos = 0; + + filp = filp_open(file_fn, O_RDONLY, 0); + if (!filp) + return; + kernel_read(filp, buf, count, &pos); +} + +#define ITEM_VALUE_LEN 16 +static void xsc_ioctl_get_hw_counters(struct xsc_core_device *xdev, void *indata, void *outdata) +{ + struct xsc_cmd_ioctl_get_hw_counters_mbox_in *in = indata; + struct xsc_cmd_ioctl_get_hw_counters_mbox_out *out = outdata; + char dev[8] = {0}; + char path[128] = {0}; + int offset = 0; + int end = be32_to_cpu(in->length); + + memcpy(dev, in->data, 8); + offset += 8; + + while (offset < end) { + int item_key_len = *(u32 *)(&in->data[offset]); + + offset += sizeof(int); + sprintf(path, "/sys/class/infiniband/%s/ports/1/hw_counters/%s", + dev, (char *)(&in->data[offset])); + offset += item_key_len + 1; + xsc_read_hw_counter(path, &in->data[offset], ITEM_VALUE_LEN); + offset += ITEM_VALUE_LEN; + } + memcpy(out->data, in->data, end); + out->hdr.status = 0; +} + +int xsc_tunnel_cmd_recv_req(struct xsc_core_device *xdev) +{ + struct xsc_recv_tunnel_cmd_req_mbox_in req_in; + struct xsc_recv_tunnel_cmd_req_mbox_out *req_out; + struct xsc_send_tunnel_cmd_resp_mbox_in *resp_in; + struct xsc_send_tunnel_cmd_resp_mbox_out resp_out; + struct xsc_inbox_hdr *hdr; + int ret = 0; + u16 opcode; + u32 domain; + u32 bus; + u32 devfn; + struct xsc_core_device *target_xdev; + int inlen; + int outlen; + u16 ioctl_opcode; + struct xsc_cmd_get_ioctl_info_mbox_in *in; + struct xsc_cmd_get_ioctl_info_mbox_out *out; + struct xsc_qos_mbox_in *qos_in; + struct xsc_hw_stats_mbox_in *stat_in; + struct xsc_prio_stats_mbox_in *prio_in; + struct xsc_pfc_prio_stats_mbox_in *pfc_in; + + xsc_core_info(xdev, "recv tunnel cmd req, process and send response.\n"); + + req_out = kzalloc(xdev->caps.max_cmd_out_len, GFP_KERNEL); + if (!req_out) { + ret = -ENOMEM; + goto err_alloc_req; + } + resp_in = kzalloc(xdev->caps.max_cmd_out_len, GFP_KERNEL); + if (!resp_in) { + ret = -ENOMEM; + goto err_alloc_resp; + } + + memset(&req_in, 0, sizeof(req_in)); + req_in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_RECV_TUNNEL_CMD_REQ); + ret = xsc_cmd_exec(xdev, &req_in, sizeof(req_in), req_out, xdev->caps.max_cmd_out_len); + if (ret) { + xsc_core_err(xdev, "recv tunnel cmd request failed, ret %d\n", ret); + goto err_recv_req; + } + if (req_out->hdr.status) { + xsc_core_err(xdev, "recv tunnel cmd request failed, req out status %d\n", + req_out->hdr.status); + ret = xsc_cmd_status_to_err(&req_out->hdr); + goto err_recv_req; + } + + domain = be32_to_cpu(req_out->target.domain); + bus = be32_to_cpu(req_out->target.bus); + devfn = be32_to_cpu(req_out->target.devfn); + if (!domain && !bus && !devfn) { + target_xdev = xdev; + } else { + target_xdev = xsc_pci_get_xdev_by_bus_and_slot(domain, bus, devfn); + if (!target_xdev) + goto err_recv_req; + } + + hdr = (struct xsc_inbox_hdr *)req_out->data; + opcode = be16_to_cpu(hdr->opcode); + switch (opcode) { + case XSC_CMD_OP_GET_IOCTL_INFO: + in = (struct xsc_cmd_get_ioctl_info_mbox_in *)req_out->data; + ioctl_opcode = be16_to_cpu(in->ioctl_opcode); + switch (ioctl_opcode) { + case XSC_IOCTL_GET_DEVINFO: + xsc_get_devinfo(resp_in->data, + xdev->caps.max_cmd_in_len - sizeof(struct xsc_inbox_hdr)); + break; + case XSC_IOCTL_GET_FORCE_PCP: + case XSC_IOCTL_GET_FORCE_DSCP: + case XSC_IOCTL_SET_FORCE_PCP: + case XSC_IOCTL_SET_FORCE_DSCP: + case XSC_IOCTL_GET_CMA_PCP: + case XSC_IOCTL_GET_CMA_DSCP: + case XSC_IOCTL_SET_CMA_PCP: + case XSC_IOCTL_SET_CMA_DSCP: + inlen = be16_to_cpu(in->length); + target_xdev->get_rdma_ctrl_info(target_xdev, ioctl_opcode, in->data, inlen); + out = (struct xsc_cmd_get_ioctl_info_mbox_out *)resp_in->data; + memcpy(out->data, in->data, inlen); + out->hdr.status = 0; + break; + default: + ret = -EOPNOTSUPP; + goto err_process_cmd; + } + goto send_resp; + case XSC_CMD_OP_IOCTL_NETLINK: + target_xdev->handle_netlink_cmd(target_xdev, req_out->data, resp_in->data); + goto send_resp; + case XSC_CMD_OP_IOCTL_GET_HW_COUNTERS: + xsc_ioctl_get_hw_counters(target_xdev, req_out->data, resp_in->data); + goto send_resp; + case XSC_CMD_OP_QUERY_HW_STATS_RDMA: + stat_in = (struct xsc_hw_stats_mbox_in *)req_out->data; + stat_in->mac_port = target_xdev->mac_port; + break; + case XSC_CMD_OP_QUERY_PRIO_STATS: + prio_in = (struct xsc_prio_stats_mbox_in *)req_out->data; + prio_in->pport = target_xdev->mac_port; + break; + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + pfc_in = (struct xsc_pfc_prio_stats_mbox_in *)req_out->data; + pfc_in->pport = target_xdev->mac_port; + break; + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + case XSC_CMD_OP_IOCTL_SET_PFC: + case XSC_CMD_OP_IOCTL_SET_PFC_NEW: + case XSC_CMD_OP_IOCTL_GET_PFC: + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + case XSC_CMD_OP_IOCTL_SET_SP: + case XSC_CMD_OP_IOCTL_GET_SP: + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN: + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN: + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD: + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD: + qos_in = (struct xsc_qos_mbox_in *)req_out->data; + qos_in->req_prfx.mac_port = target_xdev->mac_port; + break; + default: + break; + } + + inlen = be32_to_cpu(req_out->target.data_length); + outlen = xdev->caps.max_cmd_out_len - sizeof(struct xsc_inbox_hdr); + ret = xsc_cmd_exec(target_xdev, req_out->data, inlen, resp_in->data, outlen); + if (ret) { + xsc_core_err(xdev, "exec cmd on host failed, opcode %d, ret %d\n", opcode, ret); + goto err_process_cmd; + } + +send_resp: + resp_in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SEND_TUNNEL_CMD_RESP); + ret = xsc_cmd_exec(xdev, resp_in, xdev->caps.max_cmd_out_len, &resp_out, sizeof(resp_out)); + if (ret) + goto err_send_resp; + if (resp_out.hdr.status) { + ret = xsc_cmd_status_to_err(&resp_out.hdr); + goto err_send_resp; + } + +err_process_cmd: +err_send_resp: +err_recv_req: + kfree(resp_in); +err_alloc_resp: + kfree(req_out); +err_alloc_req: + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c index acbe7e83a9e20f251a034a6ae03097fd7cf8ac56..3bdcc2e5bd864b7b720efd0e56b93080a4f95168 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c @@ -74,6 +74,26 @@ static int xsc_query_nic_vport_context(struct xsc_core_device *dev, u16 vport, return __xsc_query_nic_vport_context(dev, vport, out, outlen, 0); } +static void xsc_nic_isolate_and_drop_modify(struct xsc_core_device *dev, + struct xsc_modify_nic_vport_context_in *in) +{ + u16 caps = 0; + u16 caps_mask = 0; + + if (xsc_get_pf_isolate_config(dev, true)) { + caps = BIT(XSC_TBM_CAP_PF_ISOLATE_CONFIG); + caps_mask = BIT(XSC_TBM_CAP_PF_ISOLATE_CONFIG); + } + + if (xsc_get_mac_drop_config(dev, true)) { + caps |= BIT(XSC_TBM_CAP_MAC_DROP_CONFIG); + caps_mask |= BIT(XSC_TBM_CAP_MAC_DROP_CONFIG); + } + + in->caps |= cpu_to_be16(caps); + in->caps_mask |= cpu_to_be16(caps_mask); +} + int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, int inlen) { @@ -93,38 +113,6 @@ int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, return err; } -int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, - u16 vport, u8 *min_inline) -{ - struct xsc_query_nic_vport_context_out out; - int err; - - memset(&out, 0, sizeof(out)); - err = xsc_query_nic_vport_context(dev, vport, &out, sizeof(out)); - if (!err) - *min_inline = out.nic_vport_ctx.min_wqe_inline_mode; - return err; -} -EXPORT_SYMBOL_GPL(xsc_query_nic_vport_min_inline); - -void xsc_query_min_inline(struct xsc_core_device *dev, - u8 *min_inline_mode) -{ - switch (dev->caps.wqe_inline_mode) { - case XSC_CAP_INLINE_MODE_VPORT_CONTEXT: - if (!xsc_query_nic_vport_min_inline(dev, 0, min_inline_mode)) - break; - fallthrough; - case XSC_CAP_INLINE_MODE_L2: - *min_inline_mode = XSC_INLINE_MODE_L2; - break; - case XSC_CAP_INLINE_MODE_NOT_REQUIRED: - *min_inline_mode = XSC_INLINE_MODE_NONE; - break; - } -} -EXPORT_SYMBOL_GPL(xsc_query_min_inline); - int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, u16 vport, u8 min_inline) { @@ -179,12 +167,15 @@ static int __xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, struct xsc_modify_nic_vport_context_out out; struct xsc_adapter *adapter = netdev_priv(dev->netdev); struct xsc_vport *evport = NULL; - int err, in_sz, i; + int err, in_sz; + int i = 0; u8 *mac_addr; u16 caps = 0; u16 caps_mask = 0; u16 lag_id = xsc_get_lag_id(dev); + memset(&out, 0, sizeof(out)); + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; in = kzalloc(in_sz, GFP_KERNEL); @@ -213,6 +204,8 @@ static int __xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, in->caps = cpu_to_be16(caps); in->caps_mask = cpu_to_be16(caps_mask); + xsc_nic_isolate_and_drop_modify(dev, in); + ether_addr_copy(mac_addr, addr); in->field_select.addresses_list = 1; @@ -419,6 +412,82 @@ int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, } EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mac_list); +int xsc_nic_vport_add_uc_mac(struct xsc_core_device *xdev, + u8 *mac_addr, u16 *pct_prio) +{ + struct xsc_modify_nic_vport_uc_mac_in in; + struct xsc_modify_nic_vport_uc_mac_out out; + int err; + + memset(&in, 0, sizeof(in)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_UC_MAC); + in.add_mac = true; + ether_addr_copy(in.mac_addr, mac_addr); + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + + if (err || (out.hdr.status && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { + xsc_core_err(xdev, "Failed to add uc mac err=%d out.status=%u", + err, out.hdr.status); + return -ENOEXEC; + } + + *pct_prio = be16_to_cpu(out.out_pct_prio); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_nic_vport_add_uc_mac); + +int xsc_nic_vport_del_uc_mac(struct xsc_core_device *xdev, u16 pct_prio) +{ + struct xsc_modify_nic_vport_uc_mac_in in; + struct xsc_modify_nic_vport_uc_mac_out out; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_UC_MAC); + in.add_mac = false; + in.in_pct_prio = cpu_to_be16(pct_prio); + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + + if (err || (out.hdr.status && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { + xsc_core_err(xdev, "Failed to del uc mac err=%d out.status=%u", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_nic_vport_del_uc_mac); + +int xsc_nic_vport_modify_mc_mac(struct xsc_core_device *xdev, u8 *mac, u8 action) +{ + struct xsc_modify_nic_vport_mc_mac_in in; + struct xsc_modify_nic_vport_mc_mac_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_MC_MAC); + ether_addr_copy(in.mac, mac); + in.action = action; + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + + if (err || (out.hdr.status && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { + xsc_core_err(xdev, "Failed to mod mc mac err=%d out.status=%u", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_nic_vport_modify_mc_mac); + int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, unsigned long *vlans) { @@ -482,6 +551,8 @@ int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; in->nic_vport_ctx.vlan = cpu_to_be16(vid); + xsc_nic_isolate_and_drop_modify(dev, in); + memset(&out, 0, sizeof(out)); err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); kfree(in); @@ -780,7 +851,7 @@ int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, struct xsc_query_nic_vport_context_out *out; int err; - out = kzalloc(sizeof(out), GFP_KERNEL); + out = kzalloc(sizeof(*out), GFP_KERNEL); if (!out) return -ENOMEM; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h index f640f631169a4625492e5aad868f1ea751d52f71..8811ef1bf0f772472c583dad59349c9ce84c90b1 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h @@ -166,4 +166,5 @@ static inline void xsc_cqwq_pop(struct xsc_cqwq *wq) { wq->cc++; } + #endif /* __XSC_WQ_H__ */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c index 4d12ce7f0459c5a4dab282d8ef8c9b119ff5a4ae..eeac16ebfe3e6767af307517e0aa778229d27b1f 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c @@ -12,6 +12,7 @@ #include "common/xsc_ioctl.h" #include "common/xsc_cmd.h" #include "net/xsc_eth.h" +#include "eswitch.h" #include #include @@ -26,8 +27,8 @@ struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev) } EXPORT_SYMBOL(xsc_board_lag_get); -void xsc_board_lag_set(struct xsc_core_device *xdev, - void *board_lag) +static void xsc_board_lag_set(struct xsc_core_device *xdev, + void *board_lag) { struct xsc_board_lag *board_lag_new = board_lag; @@ -35,7 +36,7 @@ void xsc_board_lag_set(struct xsc_core_device *xdev, board_lag_array[xdev->board_info->board_id] = board_lag_new; } -void xsc_board_lag_reset(u32 board_id) +static void xsc_board_lag_reset(u32 board_id) { board_lag_array[board_id] = NULL; } @@ -72,7 +73,7 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) } } -enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond) +static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond) { switch (bond->params.xmit_policy) { case BOND_XMIT_POLICY_LAYER2: @@ -119,7 +120,7 @@ static inline struct xsc_lag *__xsc_get_lag(struct xsc_core_device *xdev) return &board_lag->xsc_lag[xdev->bond_id]; } -int xsc_cmd_create_lag(struct xsc_lag_event *entry) +static int xsc_cmd_create_lag(struct xsc_lag_event *entry) { struct xsc_create_lag_mbox_in in = {}; struct xsc_create_lag_mbox_out out = {}; @@ -127,12 +128,13 @@ int xsc_cmd_create_lag(struct xsc_lag_event *entry) struct net_device *netdev = xdev->netdev; int ret = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_CREATE); in.req.lag_id = cpu_to_be16(entry->lag_id); in.req.lag_type = entry->lag_type; in.req.lag_sel_mode = entry->lag_sel_mode; - in.req.mac_idx = xdev->pf_id; + in.req.pf_idx = xdev->pf_id; in.req.bond_mode = entry->bond_mode; in.req.slave_status = entry->slave_status; @@ -142,7 +144,7 @@ int xsc_cmd_create_lag(struct xsc_lag_event *entry) entry->lag_id, entry->lag_type, entry->lag_sel_mode, entry->bond_mode); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (ret || out.hdr.status) { + if (ret || (out.hdr.status != 0 && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { xsc_core_err(xdev, "failed to create LAG, err =%d out.status= %u\n", ret, out.hdr.status); return -ENOEXEC; @@ -151,7 +153,7 @@ int xsc_cmd_create_lag(struct xsc_lag_event *entry) return 0; } -int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) +static int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) { struct xsc_add_lag_member_mbox_in in = {}; struct xsc_add_lag_member_mbox_out out = {}; @@ -159,15 +161,19 @@ int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) struct net_device *netdev = xdev->netdev; int ret = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_ADD_MEMBER); in.req.lag_id = cpu_to_be16(entry->lag_id); in.req.lag_type = entry->lag_type; in.req.lag_sel_mode = entry->lag_sel_mode; - in.req.mac_idx = xdev->pf_id; + in.req.pf_idx = xdev->pf_id; in.req.bond_mode = entry->bond_mode; in.req.slave_status = entry->slave_status; - in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + in.req.roce_pf_idx = entry->roce_lag_xdev->pf_id; + + in.hdr.ver = LAG_CMD_V1; + in.req.roce_pf_func_data = entry->roce_pf_func_data; memcpy(in.req.netdev_addr, netdev->dev_addr, ETH_ALEN); @@ -175,7 +181,7 @@ int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) entry->lag_id, entry->lag_type, entry->bond_mode); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (ret || out.hdr.status) { + if (ret || (out.hdr.status != 0 && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", ret, out.hdr.status); return -ENOEXEC; @@ -184,30 +190,36 @@ int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) return 0; } -int xsc_cmd_remove_lag_member(struct xsc_lag_event *entry) +static int xsc_cmd_remove_lag_member(struct xsc_lag_event *entry) { struct xsc_remove_lag_member_mbox_in in = {}; struct xsc_remove_lag_member_mbox_out out = {}; struct xsc_core_device *xdev = entry->xdev; int ret = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_REMOVE_MEMBER); in.req.lag_id = cpu_to_be16(entry->lag_id); in.req.lag_type = entry->lag_type; - in.req.mac_idx = xdev->pf_id; + in.req.pf_idx = xdev->pf_id; in.req.bond_mode = entry->bond_mode; - if (entry->lag_type & XSC_LAG_FLAG_ROCE && entry->is_roce_lag_xdev) { + if (entry->lag_type & XSC_LAG_FLAG_ROCE) { in.req.is_roce_lag_xdev = entry->is_roce_lag_xdev; - in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + in.req.roce_pf_idx = entry->roce_lag_xdev->pf_id; in.req.not_roce_lag_xdev_mask = entry->not_roce_lag_xdev_mask; } + in.hdr.ver = LAG_CMD_V1; + in.req.roce_pf_func_data = entry->roce_pf_func_data; + memcpy(in.req.func_data, + entry->func_data, sizeof(entry->func_data)); + xsc_core_info(xdev, "remove LAG member: lag_id = %d, lag_type = %d, bond_mode = %d\n", entry->lag_id, entry->lag_type, entry->bond_mode); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (ret || out.hdr.status) { + if (ret || (out.hdr.status != 0 && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", ret, out.hdr.status); return -ENOEXEC; @@ -216,19 +228,20 @@ int xsc_cmd_remove_lag_member(struct xsc_lag_event *entry) return 0; } -int xsc_cmd_update_lag_member_status(struct xsc_lag_event *entry) +static int xsc_cmd_update_lag_member_status(struct xsc_lag_event *entry) { struct xsc_update_lag_member_status_mbox_in in = {}; struct xsc_update_lag_member_status_mbox_out out = {}; struct xsc_core_device *xdev = entry->xdev; int ret = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS); in.req.lag_type = entry->lag_type; in.req.bond_mode = entry->bond_mode; in.req.lag_id = cpu_to_be16(entry->lag_id); - in.req.mac_idx = xdev->pf_id; + in.req.pf_idx = xdev->pf_id; in.req.slave_status = entry->slave_status; xsc_core_info(xdev, "update LAG member status: lag_id = %d, bond_mode = %d, lag_type = %d, slave_status = %d, mac_idx = %d\n", @@ -236,7 +249,7 @@ int xsc_cmd_update_lag_member_status(struct xsc_lag_event *entry) entry->slave_status, xdev->pf_id); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (ret || out.hdr.status) { + if (ret || (out.hdr.status != 0 && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { xsc_core_err(xdev, "failed to update LAG member status, err=%d out.status=%u\n", ret, out.hdr.status); return -ENOEXEC; @@ -245,13 +258,14 @@ int xsc_cmd_update_lag_member_status(struct xsc_lag_event *entry) return ret; } -int xsc_cmd_update_lag_hash_type(struct xsc_lag_event *entry) +static int xsc_cmd_update_lag_hash_type(struct xsc_lag_event *entry) { struct xsc_update_lag_hash_type_mbox_in in = {}; struct xsc_update_lag_hash_type_mbox_out out = {}; struct xsc_core_device *xdev = entry->xdev; int ret = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_HASH_TYPE); in.req.lag_id = cpu_to_be16(entry->lag_id); @@ -261,7 +275,7 @@ int xsc_cmd_update_lag_hash_type(struct xsc_lag_event *entry) entry->lag_id, in.req.lag_sel_mode); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (ret || out.hdr.status) { + if (ret || (out.hdr.status != 0 && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { xsc_core_err(xdev, "failed to update LAG hash type, err=%d out.status=%u\n", ret, out.hdr.status); return -ENOEXEC; @@ -270,24 +284,25 @@ int xsc_cmd_update_lag_hash_type(struct xsc_lag_event *entry) return ret; } -int xsc_cmd_destroy_lag(struct xsc_lag_event *entry) +static int xsc_cmd_destroy_lag(struct xsc_lag_event *entry) { struct xsc_destroy_lag_mbox_in in = {}; struct xsc_destroy_lag_mbox_out out = {}; struct xsc_core_device *xdev = entry->xdev; int ret = 0; + memset(&in, 0, sizeof(in)); in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_DESTROY); in.req.lag_id = cpu_to_be16(entry->lag_id); in.req.lag_type = entry->lag_type; - in.req.mac_idx = xdev->pf_id; + in.req.pf_idx = xdev->pf_id; in.req.bond_mode = entry->bond_mode; xsc_core_info(xdev, "destroy LAG: lag_id = %d\n", entry->lag_id); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - if (ret || out.hdr.status) { + if (ret || (out.hdr.status != 0 && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { xsc_core_err(xdev, "failed to destroy LAG, err =%d out.status= %u\n", ret, out.hdr.status); return -ENOEXEC; @@ -313,15 +328,34 @@ static int xsc_lag_set_qos(struct xsc_core_device *xdev, u16 lag_id, u8 member_i in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_SET_QOS); ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); - return ret; + if (ret || (out.hdr.status != 0 && out.hdr.status != XSC_CMD_STATUS_NOT_SUPPORTED)) { + xsc_core_err(xdev, "failed to set lag qos, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static inline int xsc_lag_abnormal_operate_check(struct xsc_core_device *xdev, + u8 lag_type) +{ + if (lag_type != XSC_LAG_FLAG_SRIOV && xsc_get_user_mode(xdev)) { + xsc_core_err(xdev, "Failed to opetate non sriov LAG while ovs is on"); + return -EOPNOTSUPP; + } + return 0; } -void xsc_create_lag(struct xsc_lag_event *entry) +static void xsc_create_lag(struct xsc_lag_event *entry) { int ret = 0; bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; struct xsc_core_device *xdev = entry->xdev; + if (xsc_lag_abnormal_operate_check(xdev, entry->lag_type)) + return; + if (roce_lag) xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); @@ -347,12 +381,15 @@ void xsc_create_lag(struct xsc_lag_event *entry) xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); } -void xsc_add_lag_member(struct xsc_lag_event *entry) +static void xsc_add_lag_member(struct xsc_lag_event *entry) { int ret = 0; bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; struct xsc_core_device *xdev = entry->xdev; + if (xsc_lag_abnormal_operate_check(xdev, entry->lag_type)) + return; + if (roce_lag) xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); @@ -375,13 +412,16 @@ void xsc_add_lag_member(struct xsc_lag_event *entry) xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); } -void xsc_remove_lag_member(struct xsc_lag_event *entry) +static void xsc_remove_lag_member(struct xsc_lag_event *entry) { int ret = 0; bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; struct xsc_core_device *xdev = entry->xdev; struct xsc_core_device *roce_lag_xdev = entry->roce_lag_xdev; + if (xsc_lag_abnormal_operate_check(xdev, entry->lag_type)) + return; + if (roce_lag && entry->is_roce_lag_xdev) xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); @@ -410,7 +450,7 @@ void xsc_remove_lag_member(struct xsc_lag_event *entry) xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); } -void xsc_update_lag_member_status(struct xsc_lag_event *entry) +static void xsc_update_lag_member_status(struct xsc_lag_event *entry) { int ret = 0; struct xsc_core_device *xdev = entry->xdev; @@ -430,7 +470,7 @@ void xsc_update_lag_member_status(struct xsc_lag_event *entry) } } -void xsc_update_lag_hash_type(struct xsc_lag_event *entry) +static void xsc_update_lag_hash_type(struct xsc_lag_event *entry) { int ret = 0; struct xsc_core_device *xdev = entry->xdev; @@ -440,7 +480,7 @@ void xsc_update_lag_hash_type(struct xsc_lag_event *entry) xsc_core_err(xdev, "failed to update LAG member status, err =%d\n", ret); } -void xsc_destroy_lag(struct xsc_lag_event *entry) +static void xsc_destroy_lag(struct xsc_lag_event *entry) { int ret = 0; bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; @@ -451,7 +491,7 @@ void xsc_destroy_lag(struct xsc_lag_event *entry) ret = xsc_cmd_destroy_lag(entry); if (ret) { - xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + xsc_core_err(xdev, "failed to destroy LAG, err =%d\n", ret); goto out; } @@ -529,12 +569,6 @@ static inline bool xsc_is_roce_lag_allowed(struct xsc_lag *lag) xsc_core_info(xdev, "create ROCE LAG while sriov is open\n"); break; } - - roce_lag_support &= radix_tree_empty(&xdev->priv_device.bdf_tree); - if (!roce_lag_support) { - xsc_core_info(xdev, "create ROCE LAG while the ib device is open\n"); - break; - } } return roce_lag_support; @@ -554,6 +588,14 @@ static bool xsc_is_sriov_lag_allowed(struct xsc_lag *lag) return sriov_lag_support; } +static bool xsc_is_dpu_soc_lag(struct xsc_lag *lag) +{ + struct xsc_core_device *xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + + return xsc_is_soc_pf(xdev); +} + static u8 xsc_get_lag_type(struct xsc_lag *lag) { u8 lag_type; @@ -564,7 +606,7 @@ static u8 xsc_get_lag_type(struct xsc_lag *lag) lag_mode_support = (lag->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || lag->tx_type == NETDEV_LAG_TX_TYPE_HASH); roce_lag = lag_mode_support && xsc_is_roce_lag_allowed(lag); - sriov_lag = lag_mode_support && xsc_is_sriov_lag_allowed(lag); + sriov_lag = lag_mode_support && (xsc_is_sriov_lag_allowed(lag) || xsc_is_dpu_soc_lag(lag)); lag_type = sriov_lag ? XSC_LAG_FLAG_SRIOV : (roce_lag ? XSC_LAG_FLAG_ROCE : XSC_LAG_FLAG_KERNEL); @@ -601,8 +643,8 @@ static inline enum lag_slave_status lag_slave_status_get(struct net_device *ndev return slave_status; } -void pack_lag_create(struct xsc_lag *lag, - struct xsc_core_device *xdev, bool no_wq) +static void pack_lag_create(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) { struct net_device *ndev = xdev->netdev; struct xsc_lag_event *entry; @@ -634,8 +676,16 @@ void pack_lag_create(struct xsc_lag *lag, xsc_create_lag(entry); } -void pack_lag_add_member(struct xsc_lag *lag, - struct xsc_core_device *xdev, bool no_wq) +static inline void xsc_salve_func_data_set(struct xsc_core_device *xdev, + struct slave_func_data *func_data) +{ + func_data->pcie_no = xdev->pcie_no; + func_data->pf_id = xdev->pf_id; + func_data->valid = 1; +} + +static void pack_lag_add_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) { struct xsc_lag_event *entry; struct net_device *ndev = xdev->netdev; @@ -653,7 +703,6 @@ void pack_lag_add_member(struct xsc_lag *lag, if (entry->lag_type != lag->lag_type) { xsc_core_err(xdev, "do not permit add slave to different type lag, xdev_lag_type = %d, lag_type = %d\n", entry->lag_type, lag->lag_type); - kfree(entry); return; } @@ -668,7 +717,7 @@ void pack_lag_add_member(struct xsc_lag *lag, roce_lag_xdev = list_first_entry(&lag->slave_list, struct xsc_core_device, slave_node); entry->roce_lag_xdev = roce_lag_xdev; - entry->not_roce_lag_xdev_mask = lag->not_roce_lag_xdev_mask; + xsc_salve_func_data_set(roce_lag_xdev, &entry->roce_pf_func_data); xsc_core_info(xdev, "lag_sel_mode = %d, slave_status = %d, lag_type = %d\n", entry->lag_sel_mode, entry->slave_status, entry->lag_type); @@ -679,8 +728,8 @@ void pack_lag_add_member(struct xsc_lag *lag, xsc_add_lag_member(entry); } -void pack_lag_remove_member(struct xsc_lag *lag, - struct xsc_core_device *xdev, bool no_wq) +static void pack_lag_remove_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) { struct xsc_lag_event *entry; struct xsc_core_device *roce_lag_xdev = NULL; @@ -705,28 +754,30 @@ void pack_lag_remove_member(struct xsc_lag *lag, if (entry->lag_type & XSC_LAG_FLAG_ROCE) { roce_lag_xdev = list_first_entry(&lag->slave_list, struct xsc_core_device, slave_node); + entry->roce_lag_xdev = roce_lag_xdev; if (roce_lag_xdev == xdev) { entry->is_roce_lag_xdev = 1; - list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { cnt++; if (cnt == 1) continue; - if (cnt == 2) { - roce_lag_xdev = xdev_tmp; + entry->roce_lag_xdev = xdev_tmp; + xsc_salve_func_data_set(xdev_tmp, + &entry->roce_pf_func_data); continue; } - + xsc_salve_func_data_set(xdev_tmp, &entry->func_data[cnt - 3]); not_roce_lag_xdev_mask |= BIT(xdev_tmp->pf_id); } - entry->roce_lag_xdev = roce_lag_xdev; entry->not_roce_lag_xdev_mask = not_roce_lag_xdev_mask; } } - xsc_core_info(xdev, "lag_type = %d, is_roce_lag_xdev = %d, not_roce_lag_xdev_mask = %d\n", - entry->lag_type, entry->is_roce_lag_xdev, entry->not_roce_lag_xdev_mask); + xsc_core_info(xdev, "lag_type = %d, is_roce_lag_xdev = %d, roce_pf_func_data = %d\n", + entry->lag_type, entry->is_roce_lag_xdev, + *(u8 *)&entry->roce_pf_func_data); if (!no_wq) pack_add_and_wake_wq(board_lag, entry); @@ -734,8 +785,9 @@ void pack_lag_remove_member(struct xsc_lag *lag, xsc_remove_lag_member(entry); } -void pack_lag_update_member_status(struct xsc_lag *lag, - struct net_device *ndev, enum lag_slave_status slave_status) +static void pack_lag_update_member_status(struct xsc_lag *lag, + struct net_device *ndev, + enum lag_slave_status slave_status) { struct xsc_lag_event *entry; struct xsc_adapter *adapter = netdev_priv(ndev); @@ -762,8 +814,8 @@ void pack_lag_update_member_status(struct xsc_lag *lag, pack_add_and_wake_wq(board_lag, entry); } -void pack_lag_update_hash_type(struct xsc_lag *lag, - u8 bond_id, enum netdev_lag_hash hash_type) +static void pack_lag_update_hash_type(struct xsc_lag *lag, + u8 bond_id, enum netdev_lag_hash hash_type) { struct xsc_lag_event *entry; struct xsc_core_device *xdev = NULL; @@ -792,7 +844,7 @@ void pack_lag_update_hash_type(struct xsc_lag *lag, pack_add_and_wake_wq(board_lag, entry); } -void pack_lag_destroy(struct xsc_lag *lag, struct xsc_core_device *xdev, bool no_wq) +static void pack_lag_destroy(struct xsc_lag *lag, struct xsc_core_device *xdev, bool no_wq) { struct xsc_lag_event *entry; struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); @@ -836,7 +888,9 @@ static u8 xsc_get_valid_bond_id(struct xsc_board_lag *board_lag) } static void xsc_lag_setup(struct xsc_board_lag *board_lag, - struct net_device *upper, struct xsc_core_device *xdev, bool no_wq) + struct net_device *upper, + struct xsc_core_device *xdev, + bool no_wq) { struct bonding *bond = netdev_priv(upper); struct xsc_lag *lag = NULL; @@ -872,7 +926,8 @@ static bool xsc_is_ndev_xsc_pf(struct net_device *slave_ndev) struct pci_dev *pdev = to_pci_dev(dev->parent); return (pdev->device == XSC_MS_PF_DEV_ID || - pdev->device == XSC_MV_SOC_PF_DEV_ID); + pdev->device == XSC_MV_SOC_PF_DEV_ID || + pdev->device == XSC_MC_PF_DEV_ID_DIAMOND); } static u8 xsc_get_bond_board_xsc_cnt(struct net_device *upper, @@ -956,6 +1011,26 @@ static void xsc_lag_update_member(struct xsc_lag *lag, xsc_lag_member_remove(lag, xdev, false); } +static bool ndev_is_member_of_lag(struct net_device *bond_ndev, + struct net_device *ndev, + struct xsc_lag *lag) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_core_device *xdev_tmp; + + if (lag->bond_dev != bond_ndev || + lag->board_id != xdev->board_info->board_id) + return false; + + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + if (xdev_tmp->mac_port == xdev->mac_port) + return false; + } + + return true; +} + static u8 xsc_get_upper_bond_id(struct net_device *bond_ndev, struct net_device *ndev, struct xsc_board_lag *board_lag, bool hash_change) @@ -963,18 +1038,13 @@ static u8 xsc_get_upper_bond_id(struct net_device *bond_ndev, u8 i; struct xsc_lag *lag; u8 bond_valid_mask = board_lag->bond_valid_mask; - struct xsc_adapter *adapter; - struct xsc_core_device *xdev; u8 bond_id = BOND_ID_INVALID; for (i = 0; i < XSC_BOARD_LAG_MAX; i++) { if (bond_valid_mask & BIT(i)) { lag = &board_lag->xsc_lag[i]; if (!hash_change) { - adapter = netdev_priv(ndev); - xdev = adapter->xdev; - if (lag->bond_dev == bond_ndev && - lag->board_id == xdev->board_info->board_id) { + if (ndev_is_member_of_lag(bond_ndev, ndev, lag)) { bond_id = i; break; } @@ -1028,8 +1098,12 @@ static void xsc_handle_changeupper_event(struct xsc_board_lag *board_lag, adapter = netdev_priv(ndev); xdev = adapter->xdev; - bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); - xdev->bond_id = bond_id; + if (xdev->bond_id == BOND_ID_INVALID) { + bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); + xdev->bond_id = bond_id; + } else { + bond_id = xdev->bond_id; + } xsc_core_dbg(xdev, "bond_id = %d\n", bond_id); @@ -1054,6 +1128,8 @@ static void xsc_handle_changelowerstate_event(struct xsc_board_lag *board_lag, struct xsc_lag *lag; u8 bond_id; enum lag_slave_status slave_status = XSC_LAG_SLAVE_INACTIVE; + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; if (!netif_is_lag_port(ndev) || !info->lower_state_info) return; @@ -1076,7 +1152,9 @@ static void xsc_handle_changelowerstate_event(struct xsc_board_lag *board_lag, return; } - bond_id = xsc_get_upper_bond_id(bond_dev, ndev, board_lag, false); + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + bond_id = xdev->bond_id; if (bond_id == BOND_ID_INVALID) { mutex_unlock(&board_lag->lock); return; @@ -1085,6 +1163,8 @@ static void xsc_handle_changelowerstate_event(struct xsc_board_lag *board_lag, lag = &board_lag->xsc_lag[bond_id]; pack_lag_update_member_status(lag, ndev, slave_status); mutex_unlock(&board_lag->lock); + + return; } static void xsc_handle_changehash_event(struct xsc_board_lag *board_lag, @@ -1117,6 +1197,8 @@ static void xsc_handle_changehash_event(struct xsc_board_lag *board_lag, pack_lag_update_hash_type(lag, bond_id, hash_type); } mutex_unlock(&board_lag->lock); + + return; } static int xsc_lag_netdev_event(struct notifier_block *this, @@ -1125,6 +1207,9 @@ static int xsc_lag_netdev_event(struct notifier_block *this, struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct xsc_board_lag *board_lag; + if (!net_eq(dev_net(ndev), &init_net)) + return NOTIFY_DONE; + if (event != NETDEV_CHANGE && event != NETDEV_CHANGEUPPER && event != NETDEV_CHANGELOWERSTATE) return NOTIFY_DONE; @@ -1217,6 +1302,7 @@ void xsc_lag_add_xdev(struct xsc_core_device *xdev) if (err) xsc_core_dbg(xdev, "add xdev err=%d\n", err); + } EXPORT_SYMBOL(xsc_lag_add_xdev); @@ -1390,7 +1476,7 @@ u16 xsc_get_lag_id(struct xsc_core_device *xdev) xsc_board_lag_lock(xdev); lag = __xsc_get_lag(xdev); - if (lag && __xsc_lag_is_active(lag) && !__xsc_lag_is_kernel(lag)) + if (lag && __xsc_lag_is_active(lag)) lag_id = lag->lag_id; xsc_board_lag_unlock(xdev); @@ -1398,6 +1484,21 @@ u16 xsc_get_lag_id(struct xsc_core_device *xdev) } EXPORT_SYMBOL(xsc_get_lag_id); +bool xsc_lag_is_kernel(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + bool is_kernel = false; + + xsc_board_lag_lock(xdev); + lag = __xsc_get_lag(xdev); + if (lag && __xsc_lag_is_active(lag) && __xsc_lag_is_kernel(lag)) + is_kernel = true; + xsc_board_lag_unlock(xdev); + + return is_kernel; +} +EXPORT_SYMBOL(xsc_lag_is_kernel); + struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev) { struct xsc_core_device *roce_lag_xdev; @@ -1416,3 +1517,22 @@ struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev) return roce_lag_xdev; } EXPORT_SYMBOL(xsc_get_roce_lag_xdev); + +u16 xsc_lag_set_user_mode(struct xsc_core_device *xdev, u8 mode) +{ + struct xsc_lag *lag; + struct xsc_core_device *tmp_xdev; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (lag && __xsc_lag_is_active(lag)) { + list_for_each_entry(tmp_xdev, &lag->slave_list, slave_node) { + xsc_set_user_mode(tmp_xdev, mode); + } + } + mutex_unlock(&board_lag->lock); + + return 0; +} +EXPORT_SYMBOL(xsc_lag_set_user_mode); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c index 64aea4ccaecd1a8dfc342fe76a66e4e02c2b8538..c2463b8e374be9cb7d7c29b741b15414dde37aa4 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c @@ -12,12 +12,14 @@ #include "common/xsc_hsi.h" #include "common/xsc_lag.h" #include "common/xsc_port_ctrl.h" +#include "common/qp.h" #include #include #include #include #include "xsc_pci_ctrl.h" #include "common/res_obj.h" +#include "common/tunnel_cmd.h" #define FEATURE_ONCHIP_FT_MASK BIT(4) #define FEATURE_DMA_RW_TBL_MASK BIT(8) @@ -87,8 +89,9 @@ static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, struct xsc_eswitch *esw = xdev->priv.eswitch; struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; struct xsc_ioctl_get_phy_info_res *resp; - u16 lag_id = xsc_get_lag_id(xdev); + u16 lag_id = xsc_lag_is_kernel(xdev) ? LAG_ID_INVALID : xsc_get_lag_id(xdev); struct xsc_core_device *rl_xdev; + u16 did = xdev->pdev->device; switch (tl->opmod) { case XSC_IOCTL_OP_GET_LOCAL: @@ -118,8 +121,18 @@ static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, resp->pcie_no, resp->func_id, resp->pcie_host, resp->mac_phy_port, resp->lag_id, resp->funcid_to_logic_port_off); - resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; - resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + if (did == XSC_MV_SOC_PF_DEV_ID) { + if (xdev->caps.pcie_host == 1) { + resp->pf0_vf_funcid_base = xdev->caps.pcie1_pf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pcie1_pf_funcid_top; + } else { + resp->pf0_vf_funcid_base = xdev->caps.pcie0_pf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pcie0_pf_funcid_top; + } + } else { + resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + } resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; @@ -215,13 +228,11 @@ static int xsc_pci_ctrl_get_contextinfo(struct xsc_core_device *xdev, resp = (struct xsc_alloc_ucontext_resp *)(tl + 1); - resp->max_cq = 1 << rl_xdev->caps.log_max_cq; - resp->max_qp = 1 << rl_xdev->caps.log_max_qp; + resp->max_cq = rl_xdev->caps.max_cq; + resp->max_qp = rl_xdev->caps.max_qp; resp->max_rwq_indirection_table_size = rl_xdev->caps.max_rwq_indirection_table_size; - resp->qpm_tx_db = rl_xdev->regs.tx_db; - resp->qpm_rx_db = rl_xdev->regs.rx_db; - resp->cqm_next_cid_reg = rl_xdev->regs.complete_reg; - resp->cqm_armdb = rl_xdev->regs.complete_db; + xsc_get_db_addr(rl_xdev, &resp->qpm_tx_db, &resp->qpm_rx_db, &resp->cqm_next_cid_reg, + &resp->cqm_armdb, NULL); resp->send_ds_num = rl_xdev->caps.send_ds_num; resp->recv_ds_num = rl_xdev->caps.recv_ds_num; resp->send_ds_shift = rl_xdev->caps.send_wqe_shift; @@ -246,7 +257,7 @@ static int xsc_pci_ctrl_get_contextinfo(struct xsc_core_device *xdev, return ret; } -int noop_pre(struct kprobe *p, struct pt_regs *regs) { return 0; } +static int noop_pre(struct kprobe *p, struct pt_regs *regs) { return 0; } static struct kprobe kp = { .symbol_name = "kallsyms_lookup_name", @@ -255,7 +266,7 @@ static struct kprobe kp = { unsigned long (*kallsyms_lookup_name_func)(const char *name) = NULL; //调用kprobe找到kallsyms_lookup_name的地址位置 -int find_kallsyms_lookup_name(void) +static int find_kallsyms_lookup_name(void) { int ret = -1; @@ -306,8 +317,56 @@ u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev) return m->global_available; } -int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, - int out_size) +static int xsc_pci_ctrl_get_devinfo(struct xsc_core_device *xdev, void *in, int in_size, + void *out, int out_size) +{ + struct xsc_cmd_get_ioctl_info_mbox_in _in; + struct xsc_cmd_get_ioctl_info_mbox_out *_out; + int outlen; + int err; + int i; + struct xsc_ioctl_tunnel_hdr tunnel_hdr = {0}; + struct xsc_ioctl_attr *hdr = (struct xsc_ioctl_attr *)in; + struct xsc_devinfo *devinfo = NULL; + struct xsc_ioctl_get_devinfo *info = NULL; + + outlen = sizeof(*_out) + out_size; + _out = kzalloc(outlen, GFP_KERNEL); + if (!_out) + return -ENOMEM; + + memset(&_in, 0, sizeof(in)); + _in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_GET_IOCTL_INFO); + _in.ioctl_opcode = cpu_to_be16(hdr->opcode); + err = xsc_tunnel_cmd_exec(xdev, &_in, sizeof(_in), _out, outlen, &tunnel_hdr); + if (err) + goto out; + if (_out->hdr.status) { + err = xsc_cmd_status_to_err(&_out->hdr); + goto out; + } + + info = (struct xsc_ioctl_get_devinfo *)_out->data; + info->dev_num = be32_to_cpu(info->dev_num); + devinfo = info->data; + for (i = 0; i < info->dev_num; i++) { + devinfo->domain = be32_to_cpu(devinfo->domain); + devinfo->bus = be32_to_cpu(devinfo->bus); + devinfo->devfn = be32_to_cpu(devinfo->devfn); + devinfo->ip_addr = be32_to_cpu(devinfo->ip_addr); + devinfo->vendor_id = be32_to_cpu(devinfo->vendor_id); + devinfo += 1; + } + + memcpy(out, _out->data, out_size); +out: + kfree(_out); + return err; +} + +static int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, + void *in, int in_size, + void *out, int out_size) { int opcode, ret = 0; struct xsc_ioctl_attr *hdr; @@ -326,6 +385,9 @@ int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, xsc_core_dbg(xdev, "case XSC_IOCTL_GET_CONTEXT:\n"); ret = xsc_pci_ctrl_get_contextinfo(xdev, in, out); break; + case XSC_IOCTL_GET_DEVINFO: + ret = xsc_pci_ctrl_get_devinfo(xdev, in, in_size, out, out_size); + break; default: ret = -EINVAL; break; @@ -411,6 +473,7 @@ static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, case XSC_IOCTL_SET_QP_STATUS: case XSC_IOCTL_GET_CONTEXT: case XSC_IOCTL_GET_VECTOR_MATRIX: + case XSC_IOCTL_GET_DEVINFO: break; default: return TRY_NEXT_CB; @@ -444,6 +507,7 @@ static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, next: if (copy_to_user((void *)user_hdr, in, in_size)) err = -EFAULT; + kvfree(in); return err; } @@ -717,6 +781,7 @@ static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, void *in xsc_destroy_cq_obj(file, idx); break; case XSC_CMD_OP_CREATE_CQ: + case XSC_CMD_OP_CREATE_CQ_EX: idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); xsc_alloc_cq_obj(file, idx, in, inlen); break; @@ -769,6 +834,56 @@ static long xsc_pci_ctrl_cmdq(struct xsc_bdf_file *file, return err; } +static int xsc_ioctl_raw_create_multiqp(struct xsc_core_device *xdev, void *in, int in_size, + void *out, int out_size) +{ + struct xsc_create_multiqp_mbox_in *req = in; + struct xsc_create_multiqp_mbox_out *resp = out; + u16 qp_cnt = be16_to_cpu(req->qp_num); + u8 qp_type = req->qp_type; + u16 qpn_base = 0; + struct xsc_create_qp_request *qp_info = NULL; + size_t pas_buf_size; + u8 *ptr; + int i, j; + int ret; + + ret = xsc_alloc_qpn(xdev, &qpn_base, qp_cnt, qp_type); + if (ret == -EOPNOTSUPP) { + xsc_core_info(xdev, "alloc qpn not available\n"); + goto alloc_qpn_not_supp; + } else if (ret) { + xsc_core_err(xdev, "alloc qpn failed\n"); + goto alloc_qpn_err; + } + + ptr = req->data; + for (i = 0; i < qp_cnt; i++) { + qp_info = (struct xsc_create_qp_request *)ptr; + qp_info->input_qpn = cpu_to_be16(qpn_base + i); + pas_buf_size = be16_to_cpu(qp_info->pa_num) * sizeof(__be64); + if (xsc_set_qp_info(xdev, qp_info, pas_buf_size)) { + xsc_core_err(xdev, "failed to set qp info for qp%d\n", qpn_base + i); + for (j = 0; j < i; j++) + xsc_unset_qp_info(xdev, qpn_base + j); + ret = -EFAULT; + goto set_qp_err; + } + ptr += sizeof(*qp_info) + pas_buf_size; + } + resp->hdr.status = 0; + resp->qpn_base = cpu_to_be32((u32)qpn_base); + return 0; +set_qp_err: + xsc_dealloc_qpn(xdev, qpn_base, qp_cnt, qp_type); +alloc_qpn_err: + resp->hdr.status = XSC_CMD_STATUS_NO_QPN_RES; + return ret; +alloc_qpn_not_supp: + ret = xsc_cmd_exec(xdev, in, in_size, out, out_size); + return ret; +} + static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, struct xsc_ioctl_hdr __user *user_hdr) { @@ -786,7 +901,7 @@ static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); if (err) { - xsc_core_err(dev, "fail to copy from user user_hdr\n"); + xsc_core_err(dev, "fail to copy from user_hdr\n"); return -EFAULT; } @@ -800,11 +915,10 @@ static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, if (!in) return -ENOMEM; - out_len = min_t(u16, hdr.attr.length, MAX_MBOX_OUT_LEN); + out_len = min(hdr.attr.length, dev->caps.max_cmd_out_len); out = kvzalloc(out_len, GFP_KERNEL); if (!out) { kfree(in); - xsc_core_err(dev, "fail to alloc hdr length for mbox out\n"); return -ENOMEM; } @@ -821,37 +935,48 @@ static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, spin_lock(&dev->dev_res->mkey_lock); key = 0x80 + dev->dev_res->mkey_key++; spin_unlock(&dev->dev_res->mkey_lock); - if (dev->reg_mr_via_cmdq) + read_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == SHARE_MODE) err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); else err = xsc_create_mkey(dev, in, out); + read_unlock(&dev->board_info->mr_sync_lock); resp = (struct xsc_create_mkey_mbox_out *)out; - resp->mkey = xsc_idx_to_mkey(be32_to_cpu(resp->mkey) & 0xffffff) | key; + resp->mkey = xsc_idx_to_mkey(dev, be32_to_cpu(resp->mkey) & 0xffffff) | key; resp->mkey = cpu_to_be32(resp->mkey); break; case XSC_CMD_OP_DESTROY_MKEY: - if (!dev->reg_mr_via_cmdq) + read_lock(&dev->board_info->mr_sync_lock); + if (!(dev->board_info->resource_access_mode == SHARE_MODE)) err = xsc_destroy_mkey(dev, in, out); + read_unlock(&dev->board_info->mr_sync_lock); break; case XSC_CMD_OP_REG_MR: - if (!dev->reg_mr_via_cmdq) + read_lock(&dev->board_info->mr_sync_lock); + if (!(dev->board_info->resource_access_mode == SHARE_MODE)) err = xsc_reg_mr(dev, in, out); + read_unlock(&dev->board_info->mr_sync_lock); break; case XSC_CMD_OP_DEREG_MR: req = (struct xsc_unregister_mr_mbox_in *)in; req->mkey = be32_to_cpu(req->mkey); - req->mkey = cpu_to_be32(xsc_mkey_to_idx(req->mkey)); - if (dev->reg_mr_via_cmdq) + req->mkey = cpu_to_be32(xsc_mkey_to_idx(dev, req->mkey)); + read_lock(&dev->board_info->mr_sync_lock); + if (dev->board_info->resource_access_mode == SHARE_MODE) err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); else err = xsc_dereg_mr(dev, in, out); + read_unlock(&dev->board_info->mr_sync_lock); break; case XSC_CMD_OP_DESTROY_QP: qpn = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); xsc_send_cmd_2rst_qp(dev, qpn); err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); break; + case XSC_CMD_OP_CREATE_MULTI_QP: + xsc_ioctl_raw_create_multiqp(dev, in, hdr.attr.length, out, out_len); + break; default: err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); break; diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h index 6d4d837256f82882f9f32f3202c151da64e5e8d9..c57caed380b7f014af53607f66f1f71004ace9c2 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h @@ -12,7 +12,7 @@ //for x86 #ifndef NR_VECTORS -#define NR_VECTORS 256 +#define NR_VECTORS 256 #endif #define IRQ_MATRIX_BITS NR_VECTORS #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) @@ -47,4 +47,5 @@ u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev); int xsc_pci_ctrl_init(void); void xsc_pci_ctrl_fini(void); -#endif /* XSC_PCI_CTRL_H */ + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c index 0d183bf49d710e39fb7da101a65838b12809ce75..bde6c750683467eea697d7cd02f4ca8ae35427ff 100644 --- a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c @@ -13,6 +13,7 @@ #include "common/xsc_core.h" #include "common/driver.h" #include "common/xsc_port_ctrl.h" +#include "common/xsc_prgrmmbl_cc_ctrl.h" #include "common/res_obj.h" #define XSC_PORT_CTRL_MAX 1024 @@ -99,13 +100,19 @@ static int _port_ctrl_release(struct inode *inode, struct file *filp) static bool is_db_ofst(struct xsc_core_device *xdev, unsigned long offset) { - if (offset == (xdev->regs.tx_db & PAGE_MASK)) + u64 tx_db = 0; + u64 rx_db = 0; + u64 cq_db = 0; + u64 cq_reg = 0; + + xsc_get_db_addr(xdev, &tx_db, &rx_db, &cq_db, &cq_reg, NULL); + if (offset == (tx_db & PAGE_MASK)) return true; - else if (offset == (xdev->regs.rx_db & PAGE_MASK)) + else if (offset == (rx_db & PAGE_MASK)) return true; - else if (offset == (xdev->regs.complete_db & PAGE_MASK)) + else if (offset == (cq_db & PAGE_MASK)) return true; - else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) + else if (offset == (cq_reg & PAGE_MASK)) return true; return false; } @@ -125,6 +132,10 @@ static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) struct xsc_core_device *xdev; struct xsc_core_device *rl_xdev; u32 bdf; + u64 tx_db = 0; + u64 rx_db = 0; + u64 cq_db = 0; + u64 cq_reg = 0; file = filp->private_data; xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); @@ -143,14 +154,15 @@ static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) if (!rl_xdev) return -1; + xsc_get_db_addr(rl_xdev, &tx_db, &rx_db, &cq_db, &cq_reg, NULL); if (db_type == XSC_MMAP_MSG_SQDB) { - addr = rl_xdev->regs.tx_db; + addr = tx_db; } else if (db_type == XSC_MMAP_MSG_RQDB) { - addr = rl_xdev->regs.rx_db; + addr = rx_db; } else if (db_type == XSC_MMAP_MSG_CQDB) { - addr = rl_xdev->regs.complete_db; + addr = cq_db; } else if (db_type == XSC_MMAP_MSG_ARM_CQDB) { - addr = rl_xdev->regs.complete_reg; + addr = cq_reg; } else { pr_err("[%s:%d] mmap err\n", __func__, __LINE__); return -1; @@ -164,17 +176,12 @@ static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) } xsc_core_dbg(xdev, "tx_db=%llx,rx_db=%llx,cq_db=%llx,cq_reg=%llx\n", - rl_xdev->regs.tx_db, rl_xdev->regs.rx_db, - rl_xdev->regs.complete_db, rl_xdev->regs.complete_reg); + tx_db, rx_db, cq_db, cq_reg); reg_base = (pci_resource_start(rl_xdev->pdev, rl_xdev->bar_num) + (addr & PAGE_MASK)); - if (addr) { - if (xdev->chip_ver_h == 0x100) - reg_base = xsc_core_is_pf(rl_xdev) ? reg_base - 0xA0000000 : reg_base; - else - reg_base = reg_base - 0xA0000000; - } + if (addr) + reg_base = xsc_core_is_pf(rl_xdev) ? reg_base - 0xA0000000 : reg_base; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, start, (reg_base >> PAGE_SHIFT), size, vma->vm_page_prot)) { @@ -412,7 +419,6 @@ static void _port_ctrl_cb_fini(void) static int _port_ctrl_cb_init(void) { mutex_init(&g_port_ctrl_cbs_lock); - return 0; } @@ -425,6 +431,7 @@ void xsc_port_ctrl_fini(void) _port_ctrl_dev_flush(); _port_ctrl_data_fini(); _port_ctrl_cb_fini(); + xsc_prgrmmbl_cc_ctrl_cb_fini(); } int xsc_port_ctrl_init(void) @@ -444,12 +451,56 @@ int xsc_port_ctrl_init(void) return -1; } + ret = xsc_prgrmmbl_cc_ctrl_cb_init(); + if (ret != 0) { + pr_err("failed to initialize prgrmmbl cc ctrl cb\n"); + _port_ctrl_data_fini(); + return -1; + } return 0; } +static void xsc_prgrmmbl_cc_ctrl_dev_del_wrapper(struct xsc_core_device *dev) +{ + int dev_id = 0; + + if (!xsc_prgrmmble_cc_ctrl_is_supported(dev)) + return; + + if (xsc_prgrmmbl_cc_ctrl_dev_del(dev, g_port_ctrl_class, &dev_id)) + return; + + clear_bit(dev_id, g_bitmap_dev_id); + g_port_ctrl_dev_cnt--; +} + +static int xsc_prgrmmbl_cc_ctrl_dev_add_wrapper(struct xsc_core_device *dev) +{ + int ret = 0; + int dev_id = 0; + + if (!xsc_prgrmmble_cc_ctrl_is_supported(dev)) + return ret; + + if (g_port_ctrl_dev_cnt >= XSC_PORT_CTRL_MAX) { + xsc_core_err(dev, "too many port control devices\n"); + return -ENOMEM; + } + + dev_id = find_first_zero_bit(g_bitmap_dev_id, XSC_PORT_CTRL_MAX); + ret = xsc_prgrmmbl_cc_ctrl_dev_add(dev, g_port_ctrl_class, g_port_ctrl_root_dev + dev_id); + if (!ret) { + g_port_ctrl_dev_cnt++; + set_bit(dev_id, g_bitmap_dev_id); + } + + return ret; +} + void xsc_port_ctrl_remove(struct xsc_core_device *dev) { _port_ctrl_dev_del(dev); + xsc_prgrmmbl_cc_ctrl_dev_del_wrapper(dev); } int xsc_port_ctrl_probe(struct xsc_core_device *dev) @@ -457,8 +508,14 @@ int xsc_port_ctrl_probe(struct xsc_core_device *dev) int ret = 0; ret = _port_ctrl_dev_add(dev); - if (ret != 0) + if (ret != 0) { xsc_core_err(dev, "failed to add new port control device\n"); + return ret; + } + + ret = xsc_prgrmmbl_cc_ctrl_dev_add_wrapper(dev); + if (ret != 0) + xsc_core_err(dev, "failed to add programmable cc control device\n"); return ret; } diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_prgrmmbl_cc_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_prgrmmbl_cc_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..0185f3b3afae934565e4a9e2c13e134433a2fcfd --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_prgrmmbl_cc_ctrl.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/xsc_port_ctrl.h" +#include "common/xsc_prgrmmbl_cc_ctrl.h" +#include "common/res_obj.h" + +#define XSC_PORT_CTRL_NAME_PRE "yunsilicon" +#define XSC_PRGRMMBL_CC_CTRL_NAME "flexcc" +#define XSC_PORT_CTRL_CB_NAME_LEN 15 + +struct xsc_prgrmmbl_cc_ctrl_reg { + struct list_head node; + char name[XSC_PORT_CTRL_CB_NAME_LEN + 1]; + port_prgrmmbl_cc_ctrl_cb cb; + void *data; +}; + +static struct list_head g_prgrmmbl_cc_ctrl_cbs = LIST_HEAD_INIT(g_prgrmmbl_cc_ctrl_cbs); +struct mutex g_prgrmmbl_cc_ctrl_cbs_lock; /* protect programmable cc ctrl node list */ + +static void xsc_release_port_ctrl_file(struct xsc_port_ctrl_file *file) +{ + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + + xsc_close_bdf_file(file->root_bdf); + kfree(file->root_bdf); + spin_lock(&file->bdf_lock); + radix_tree_for_each_slot(slot, &file->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&file->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&file->bdf_lock); +} + +static int _prgrmmble_cc_ctrl_release(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl_file *file = filp->private_data; + + xsc_release_port_ctrl_file(file); + spin_lock(&file->ctrl->file_lock); + list_del(&file->file_node); + spin_unlock(&file->ctrl->file_lock); + kfree(file); + + return 0; +} + +bool xsc_prgrmmble_cc_ctrl_is_supported(struct xsc_core_device *dev) +{ + return xsc_core_is_pf(dev) && xsc_support_hw_feature(dev, XSC_HW_PRGRMMBL_CC_SUPPORT); +} + +static int _prgrmmble_cc_ctrl_open(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl *ctrl = container_of(inode->i_cdev, struct xsc_port_ctrl, cdev); + struct xsc_port_ctrl_file *file; + + file = kzalloc(sizeof(*file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&file->bdf_tree, GFP_ATOMIC); + spin_lock_init(&file->bdf_lock); + file->ctrl = ctrl; + + file->root_bdf = kzalloc(sizeof(*file->root_bdf), GFP_KERNEL); + if (!file->root_bdf) { + kfree(file); + return -ENOMEM; + } + INIT_RADIX_TREE(&file->root_bdf->obj_tree, GFP_ATOMIC); + spin_lock_init(&file->root_bdf->obj_lock); + file->root_bdf->xdev = container_of(ctrl, struct xsc_core_device, prgrmmbl_cc_ctrl); + + spin_lock(&ctrl->file_lock); + list_add_tail(&file->file_node, &ctrl->file_list); + spin_unlock(&ctrl->file_lock); + filp->private_data = file; + + xsc_core_info(file->root_bdf->xdev, + "process %d open programmable cc ctrl file\n", current->pid); + + return 0; +} + +static long _prgrmmbl_cc_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct xsc_prgrmmbl_cc_ctrl_reg *p; + struct xsc_port_ctrl_file *file; + struct xsc_bdf_file *bdf_file; + int err; + + err = TRY_NEXT_CB; + file = filp->private_data; + + bdf_file = file->root_bdf; + if (!bdf_file) { + pr_err("%s: fail to find bdf file\n", __func__); + return -EFAULT; + } + + if (!xsc_prgrmmble_cc_ctrl_is_supported(bdf_file->xdev)) { + xsc_core_err(bdf_file->xdev, "%s: programmable cc is not supported!\n", __func__); + return err; + } + + list_for_each_entry(p, &g_prgrmmbl_cc_ctrl_cbs, node) { + if (p->cb) { + err = p->cb(bdf_file, cmd, arg, p->data); + if (err != TRY_NEXT_CB) + break; + } + } + + return err; +} + +static const struct file_operations g_prgrmmbl_cc_ctrl_fops = { + .owner = THIS_MODULE, + .open = _prgrmmble_cc_ctrl_open, + .unlocked_ioctl = _prgrmmbl_cc_ctrl_ioctl, + .compat_ioctl = _prgrmmbl_cc_ctrl_ioctl, + .release = _prgrmmble_cc_ctrl_release, +}; + +int xsc_prgrmmbl_cc_ctrl_dev_add(struct xsc_core_device *dev, + struct class *port_ctrl_class, dev_t dev_id) +{ + struct xsc_port_ctrl *ctrl; + int ret = 0; + + ctrl = &dev->prgrmmbl_cc_ctrl; + ctrl->devid = dev_id; + ctrl->cdev.owner = THIS_MODULE; + INIT_LIST_HEAD(&ctrl->file_list); + spin_lock_init(&ctrl->file_lock); + cdev_init(&ctrl->cdev, &g_prgrmmbl_cc_ctrl_fops); + ret = cdev_add(&ctrl->cdev, ctrl->devid, 1); + if (ret != 0) { + xsc_core_err(dev, "failed to add cdev\n"); + kfree(ctrl); + return -ENOMEM; + } + + ctrl->device = device_create(port_ctrl_class, NULL, ctrl->devid, NULL, + "%s!%s_%04x:%02x:%02x.%x", XSC_PORT_CTRL_NAME_PRE, + XSC_PRGRMMBL_CC_CTRL_NAME, pci_domain_nr(dev->pdev->bus), + dev->pdev->bus->number, + PCI_SLOT(dev->pdev->devfn), + PCI_FUNC(dev->pdev->devfn)); + if (IS_ERR(ctrl->device)) { + xsc_core_err(dev, "failed to create programmable cc control device\n"); + cdev_del(&ctrl->cdev); + kfree(ctrl); + return -ENOMEM; + } + + return 0; +} + +int xsc_prgrmmbl_cc_ctrl_dev_del(struct xsc_core_device *dev, + struct class *port_ctrl_class, int *dev_id) +{ + struct xsc_port_ctrl *ctrl; + struct xsc_port_ctrl_file *file, *n; + + ctrl = &dev->prgrmmbl_cc_ctrl; + if (!ctrl) + return -EFAULT; + + *dev_id = MINOR(ctrl->devid); + spin_lock(&ctrl->file_lock); + list_for_each_entry_safe(file, n, &ctrl->file_list, file_node) { + xsc_release_port_ctrl_file(file); + list_del(&file->file_node); + kfree(file); + } + spin_unlock(&ctrl->file_lock); + + device_destroy(port_ctrl_class, ctrl->devid); + cdev_del(&ctrl->cdev); + + return 0; +} + +int xsc_prgrmmbl_cc_ctrl_cb_init(void) +{ + mutex_init(&g_prgrmmbl_cc_ctrl_cbs_lock); + return 0; +} + +void xsc_prgrmmbl_cc_ctrl_cb_fini(void) +{ + struct xsc_prgrmmbl_cc_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_prgrmmbl_cc_ctrl_cbs, node) { + mutex_lock(&g_prgrmmbl_cc_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_prgrmmbl_cc_ctrl_cbs_lock); + kfree(p); + } +} + +static struct xsc_prgrmmbl_cc_ctrl_reg *_prgrmmbl_cc_ctrl_cbs_get(const char *name) +{ + struct xsc_prgrmmbl_cc_ctrl_reg *p, *found; + + found = NULL; + list_for_each_entry(p, &g_prgrmmbl_cc_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + found = p; + break; + } + } + + return found; +} + +int xsc_prgrmmbl_cc_ctrl_cb_reg(const char *name, port_prgrmmbl_cc_ctrl_cb cb, void *data) +{ + struct xsc_prgrmmbl_cc_ctrl_reg *reg_node; + + if (strlen(name) > XSC_PORT_CTRL_CB_NAME_LEN) { + pr_err("the name is too long to register to programmable cc control\n"); + return -1; + } + + reg_node = _prgrmmbl_cc_ctrl_cbs_get(name); + if (reg_node) { + pr_err("failed to register a duplicated node\n"); + return -1; + } + + reg_node = kmalloc(sizeof(*reg_node), GFP_KERNEL); + if (!reg_node) + return -1; + + strscpy(reg_node->name, name, sizeof(reg_node->name)); + reg_node->cb = cb; + reg_node->data = data; + INIT_LIST_HEAD(®_node->node); + + mutex_lock(&g_prgrmmbl_cc_ctrl_cbs_lock); + list_add_tail(®_node->node, &g_prgrmmbl_cc_ctrl_cbs); + mutex_unlock(&g_prgrmmbl_cc_ctrl_cbs_lock); + + return 0; +} +EXPORT_SYMBOL(xsc_prgrmmbl_cc_ctrl_cb_reg); + +void xsc_prgrmmbl_cc_ctrl_cb_dereg(const char *name) +{ + struct xsc_prgrmmbl_cc_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_prgrmmbl_cc_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + mutex_lock(&g_prgrmmbl_cc_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_prgrmmbl_cc_ctrl_cbs_lock); + kfree(p); + break; + } + } +} +EXPORT_SYMBOL(xsc_prgrmmbl_cc_ctrl_cb_dereg);